2
0

elfload.c 138 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485
  1. /* This is the Linux kernel elf-loading code, ported into user space */
  2. #include "qemu/osdep.h"
  3. #include <sys/param.h>
  4. #include <sys/prctl.h>
  5. #include <sys/resource.h>
  6. #include <sys/shm.h>
  7. #include "qemu.h"
  8. #include "user/tswap-target.h"
  9. #include "user/page-protection.h"
  10. #include "exec/page-protection.h"
  11. #include "exec/translation-block.h"
  12. #include "user/guest-base.h"
  13. #include "user-internals.h"
  14. #include "signal-common.h"
  15. #include "loader.h"
  16. #include "user-mmap.h"
  17. #include "disas/disas.h"
  18. #include "qemu/bitops.h"
  19. #include "qemu/path.h"
  20. #include "qemu/queue.h"
  21. #include "qemu/guest-random.h"
  22. #include "qemu/units.h"
  23. #include "qemu/selfmap.h"
  24. #include "qemu/lockable.h"
  25. #include "qapi/error.h"
  26. #include "qemu/error-report.h"
  27. #include "target_signal.h"
  28. #include "tcg/debuginfo.h"
  29. #ifdef TARGET_ARM
  30. #include "target/arm/cpu-features.h"
  31. #endif
  32. #ifdef _ARCH_PPC64
  33. #undef ARCH_DLINFO
  34. #undef ELF_PLATFORM
  35. #undef ELF_HWCAP
  36. #undef ELF_HWCAP2
  37. #undef ELF_CLASS
  38. #undef ELF_DATA
  39. #undef ELF_ARCH
  40. #endif
  41. #ifndef TARGET_ARCH_HAS_SIGTRAMP_PAGE
  42. #define TARGET_ARCH_HAS_SIGTRAMP_PAGE 0
  43. #endif
  44. typedef struct {
  45. const uint8_t *image;
  46. const uint32_t *relocs;
  47. unsigned image_size;
  48. unsigned reloc_count;
  49. unsigned sigreturn_ofs;
  50. unsigned rt_sigreturn_ofs;
  51. } VdsoImageInfo;
  52. #define ELF_OSABI ELFOSABI_SYSV
  53. /* from personality.h */
  54. /*
  55. * Flags for bug emulation.
  56. *
  57. * These occupy the top three bytes.
  58. */
  59. enum {
  60. ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */
  61. FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to
  62. descriptors (signal handling) */
  63. MMAP_PAGE_ZERO = 0x0100000,
  64. ADDR_COMPAT_LAYOUT = 0x0200000,
  65. READ_IMPLIES_EXEC = 0x0400000,
  66. ADDR_LIMIT_32BIT = 0x0800000,
  67. SHORT_INODE = 0x1000000,
  68. WHOLE_SECONDS = 0x2000000,
  69. STICKY_TIMEOUTS = 0x4000000,
  70. ADDR_LIMIT_3GB = 0x8000000,
  71. };
  72. /*
  73. * Personality types.
  74. *
  75. * These go in the low byte. Avoid using the top bit, it will
  76. * conflict with error returns.
  77. */
  78. enum {
  79. PER_LINUX = 0x0000,
  80. PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT,
  81. PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS,
  82. PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
  83. PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
  84. PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS | SHORT_INODE,
  85. PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
  86. PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
  87. PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS,
  88. PER_BSD = 0x0006,
  89. PER_SUNOS = 0x0006 | STICKY_TIMEOUTS,
  90. PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
  91. PER_LINUX32 = 0x0008,
  92. PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB,
  93. PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */
  94. PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */
  95. PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */
  96. PER_RISCOS = 0x000c,
  97. PER_SOLARIS = 0x000d | STICKY_TIMEOUTS,
  98. PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
  99. PER_OSF4 = 0x000f, /* OSF/1 v4 */
  100. PER_HPUX = 0x0010,
  101. PER_MASK = 0x00ff,
  102. };
  103. /*
  104. * Return the base personality without flags.
  105. */
  106. #define personality(pers) (pers & PER_MASK)
  107. int info_is_fdpic(struct image_info *info)
  108. {
  109. return info->personality == PER_LINUX_FDPIC;
  110. }
  111. /* this flag is uneffective under linux too, should be deleted */
  112. #ifndef MAP_DENYWRITE
  113. #define MAP_DENYWRITE 0
  114. #endif
  115. /* should probably go in elf.h */
  116. #ifndef ELIBBAD
  117. #define ELIBBAD 80
  118. #endif
  119. #if TARGET_BIG_ENDIAN
  120. #define ELF_DATA ELFDATA2MSB
  121. #else
  122. #define ELF_DATA ELFDATA2LSB
  123. #endif
  124. #ifdef TARGET_ABI_MIPSN32
  125. typedef abi_ullong target_elf_greg_t;
  126. #define tswapreg(ptr) tswap64(ptr)
  127. #else
  128. typedef abi_ulong target_elf_greg_t;
  129. #define tswapreg(ptr) tswapal(ptr)
  130. #endif
  131. #ifdef USE_UID16
  132. typedef abi_ushort target_uid_t;
  133. typedef abi_ushort target_gid_t;
  134. #else
  135. typedef abi_uint target_uid_t;
  136. typedef abi_uint target_gid_t;
  137. #endif
  138. typedef abi_int target_pid_t;
  139. #ifdef TARGET_I386
  140. #define ELF_HWCAP get_elf_hwcap()
  141. static uint32_t get_elf_hwcap(void)
  142. {
  143. X86CPU *cpu = X86_CPU(thread_cpu);
  144. return cpu->env.features[FEAT_1_EDX];
  145. }
  146. #ifdef TARGET_X86_64
  147. #define ELF_CLASS ELFCLASS64
  148. #define ELF_ARCH EM_X86_64
  149. #define ELF_PLATFORM "x86_64"
  150. static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
  151. {
  152. regs->rax = 0;
  153. regs->rsp = infop->start_stack;
  154. regs->rip = infop->entry;
  155. }
  156. #define ELF_NREG 27
  157. typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
  158. /*
  159. * Note that ELF_NREG should be 29 as there should be place for
  160. * TRAPNO and ERR "registers" as well but linux doesn't dump
  161. * those.
  162. *
  163. * See linux kernel: arch/x86/include/asm/elf.h
  164. */
  165. static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env)
  166. {
  167. (*regs)[0] = tswapreg(env->regs[15]);
  168. (*regs)[1] = tswapreg(env->regs[14]);
  169. (*regs)[2] = tswapreg(env->regs[13]);
  170. (*regs)[3] = tswapreg(env->regs[12]);
  171. (*regs)[4] = tswapreg(env->regs[R_EBP]);
  172. (*regs)[5] = tswapreg(env->regs[R_EBX]);
  173. (*regs)[6] = tswapreg(env->regs[11]);
  174. (*regs)[7] = tswapreg(env->regs[10]);
  175. (*regs)[8] = tswapreg(env->regs[9]);
  176. (*regs)[9] = tswapreg(env->regs[8]);
  177. (*regs)[10] = tswapreg(env->regs[R_EAX]);
  178. (*regs)[11] = tswapreg(env->regs[R_ECX]);
  179. (*regs)[12] = tswapreg(env->regs[R_EDX]);
  180. (*regs)[13] = tswapreg(env->regs[R_ESI]);
  181. (*regs)[14] = tswapreg(env->regs[R_EDI]);
  182. (*regs)[15] = tswapreg(get_task_state(env_cpu_const(env))->orig_ax);
  183. (*regs)[16] = tswapreg(env->eip);
  184. (*regs)[17] = tswapreg(env->segs[R_CS].selector & 0xffff);
  185. (*regs)[18] = tswapreg(env->eflags);
  186. (*regs)[19] = tswapreg(env->regs[R_ESP]);
  187. (*regs)[20] = tswapreg(env->segs[R_SS].selector & 0xffff);
  188. (*regs)[21] = tswapreg(env->segs[R_FS].selector & 0xffff);
  189. (*regs)[22] = tswapreg(env->segs[R_GS].selector & 0xffff);
  190. (*regs)[23] = tswapreg(env->segs[R_DS].selector & 0xffff);
  191. (*regs)[24] = tswapreg(env->segs[R_ES].selector & 0xffff);
  192. (*regs)[25] = tswapreg(env->segs[R_FS].selector & 0xffff);
  193. (*regs)[26] = tswapreg(env->segs[R_GS].selector & 0xffff);
  194. }
  195. #if ULONG_MAX > UINT32_MAX
  196. #define INIT_GUEST_COMMPAGE
  197. static bool init_guest_commpage(void)
  198. {
  199. /*
  200. * The vsyscall page is at a high negative address aka kernel space,
  201. * which means that we cannot actually allocate it with target_mmap.
  202. * We still should be able to use page_set_flags, unless the user
  203. * has specified -R reserved_va, which would trigger an assert().
  204. */
  205. if (reserved_va != 0 &&
  206. TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE - 1 > reserved_va) {
  207. error_report("Cannot allocate vsyscall page");
  208. exit(EXIT_FAILURE);
  209. }
  210. page_set_flags(TARGET_VSYSCALL_PAGE,
  211. TARGET_VSYSCALL_PAGE | ~TARGET_PAGE_MASK,
  212. PAGE_EXEC | PAGE_VALID);
  213. return true;
  214. }
  215. #endif
  216. #else
  217. /*
  218. * This is used to ensure we don't load something for the wrong architecture.
  219. */
  220. #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
  221. /*
  222. * These are used to set parameters in the core dumps.
  223. */
  224. #define ELF_CLASS ELFCLASS32
  225. #define ELF_ARCH EM_386
  226. #define ELF_PLATFORM get_elf_platform()
  227. #define EXSTACK_DEFAULT true
  228. static const char *get_elf_platform(void)
  229. {
  230. static char elf_platform[] = "i386";
  231. int family = object_property_get_int(OBJECT(thread_cpu), "family", NULL);
  232. if (family > 6) {
  233. family = 6;
  234. }
  235. if (family >= 3) {
  236. elf_platform[1] = '0' + family;
  237. }
  238. return elf_platform;
  239. }
  240. static inline void init_thread(struct target_pt_regs *regs,
  241. struct image_info *infop)
  242. {
  243. regs->esp = infop->start_stack;
  244. regs->eip = infop->entry;
  245. /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
  246. starts %edx contains a pointer to a function which might be
  247. registered using `atexit'. This provides a mean for the
  248. dynamic linker to call DT_FINI functions for shared libraries
  249. that have been loaded before the code runs.
  250. A value of 0 tells we have no such handler. */
  251. regs->edx = 0;
  252. }
  253. #define ELF_NREG 17
  254. typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
  255. /*
  256. * Note that ELF_NREG should be 19 as there should be place for
  257. * TRAPNO and ERR "registers" as well but linux doesn't dump
  258. * those.
  259. *
  260. * See linux kernel: arch/x86/include/asm/elf.h
  261. */
  262. static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env)
  263. {
  264. (*regs)[0] = tswapreg(env->regs[R_EBX]);
  265. (*regs)[1] = tswapreg(env->regs[R_ECX]);
  266. (*regs)[2] = tswapreg(env->regs[R_EDX]);
  267. (*regs)[3] = tswapreg(env->regs[R_ESI]);
  268. (*regs)[4] = tswapreg(env->regs[R_EDI]);
  269. (*regs)[5] = tswapreg(env->regs[R_EBP]);
  270. (*regs)[6] = tswapreg(env->regs[R_EAX]);
  271. (*regs)[7] = tswapreg(env->segs[R_DS].selector & 0xffff);
  272. (*regs)[8] = tswapreg(env->segs[R_ES].selector & 0xffff);
  273. (*regs)[9] = tswapreg(env->segs[R_FS].selector & 0xffff);
  274. (*regs)[10] = tswapreg(env->segs[R_GS].selector & 0xffff);
  275. (*regs)[11] = tswapreg(get_task_state(env_cpu_const(env))->orig_ax);
  276. (*regs)[12] = tswapreg(env->eip);
  277. (*regs)[13] = tswapreg(env->segs[R_CS].selector & 0xffff);
  278. (*regs)[14] = tswapreg(env->eflags);
  279. (*regs)[15] = tswapreg(env->regs[R_ESP]);
  280. (*regs)[16] = tswapreg(env->segs[R_SS].selector & 0xffff);
  281. }
  282. /*
  283. * i386 is the only target which supplies AT_SYSINFO for the vdso.
  284. * All others only supply AT_SYSINFO_EHDR.
  285. */
  286. #define DLINFO_ARCH_ITEMS (vdso_info != NULL)
  287. #define ARCH_DLINFO \
  288. do { \
  289. if (vdso_info) { \
  290. NEW_AUX_ENT(AT_SYSINFO, vdso_info->entry); \
  291. } \
  292. } while (0)
  293. #endif /* TARGET_X86_64 */
  294. #define VDSO_HEADER "vdso.c.inc"
  295. #define USE_ELF_CORE_DUMP
  296. #define ELF_EXEC_PAGESIZE 4096
  297. #endif /* TARGET_I386 */
  298. #ifdef TARGET_ARM
  299. #ifndef TARGET_AARCH64
  300. /* 32 bit ARM definitions */
  301. #define ELF_ARCH EM_ARM
  302. #define ELF_CLASS ELFCLASS32
  303. #define EXSTACK_DEFAULT true
  304. static inline void init_thread(struct target_pt_regs *regs,
  305. struct image_info *infop)
  306. {
  307. abi_long stack = infop->start_stack;
  308. memset(regs, 0, sizeof(*regs));
  309. regs->uregs[16] = ARM_CPU_MODE_USR;
  310. if (infop->entry & 1) {
  311. regs->uregs[16] |= CPSR_T;
  312. }
  313. regs->uregs[15] = infop->entry & 0xfffffffe;
  314. regs->uregs[13] = infop->start_stack;
  315. /* FIXME - what to for failure of get_user()? */
  316. get_user_ual(regs->uregs[2], stack + 8); /* envp */
  317. get_user_ual(regs->uregs[1], stack + 4); /* envp */
  318. /* XXX: it seems that r0 is zeroed after ! */
  319. regs->uregs[0] = 0;
  320. /* For uClinux PIC binaries. */
  321. /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
  322. regs->uregs[10] = infop->start_data;
  323. /* Support ARM FDPIC. */
  324. if (info_is_fdpic(infop)) {
  325. /* As described in the ABI document, r7 points to the loadmap info
  326. * prepared by the kernel. If an interpreter is needed, r8 points
  327. * to the interpreter loadmap and r9 points to the interpreter
  328. * PT_DYNAMIC info. If no interpreter is needed, r8 is zero, and
  329. * r9 points to the main program PT_DYNAMIC info.
  330. */
  331. regs->uregs[7] = infop->loadmap_addr;
  332. if (infop->interpreter_loadmap_addr) {
  333. /* Executable is dynamically loaded. */
  334. regs->uregs[8] = infop->interpreter_loadmap_addr;
  335. regs->uregs[9] = infop->interpreter_pt_dynamic_addr;
  336. } else {
  337. regs->uregs[8] = 0;
  338. regs->uregs[9] = infop->pt_dynamic_addr;
  339. }
  340. }
  341. }
  342. #define ELF_NREG 18
  343. typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
  344. static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUARMState *env)
  345. {
  346. (*regs)[0] = tswapreg(env->regs[0]);
  347. (*regs)[1] = tswapreg(env->regs[1]);
  348. (*regs)[2] = tswapreg(env->regs[2]);
  349. (*regs)[3] = tswapreg(env->regs[3]);
  350. (*regs)[4] = tswapreg(env->regs[4]);
  351. (*regs)[5] = tswapreg(env->regs[5]);
  352. (*regs)[6] = tswapreg(env->regs[6]);
  353. (*regs)[7] = tswapreg(env->regs[7]);
  354. (*regs)[8] = tswapreg(env->regs[8]);
  355. (*regs)[9] = tswapreg(env->regs[9]);
  356. (*regs)[10] = tswapreg(env->regs[10]);
  357. (*regs)[11] = tswapreg(env->regs[11]);
  358. (*regs)[12] = tswapreg(env->regs[12]);
  359. (*regs)[13] = tswapreg(env->regs[13]);
  360. (*regs)[14] = tswapreg(env->regs[14]);
  361. (*regs)[15] = tswapreg(env->regs[15]);
  362. (*regs)[16] = tswapreg(cpsr_read((CPUARMState *)env));
  363. (*regs)[17] = tswapreg(env->regs[0]); /* XXX */
  364. }
  365. #define USE_ELF_CORE_DUMP
  366. #define ELF_EXEC_PAGESIZE 4096
  367. enum
  368. {
  369. ARM_HWCAP_ARM_SWP = 1 << 0,
  370. ARM_HWCAP_ARM_HALF = 1 << 1,
  371. ARM_HWCAP_ARM_THUMB = 1 << 2,
  372. ARM_HWCAP_ARM_26BIT = 1 << 3,
  373. ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
  374. ARM_HWCAP_ARM_FPA = 1 << 5,
  375. ARM_HWCAP_ARM_VFP = 1 << 6,
  376. ARM_HWCAP_ARM_EDSP = 1 << 7,
  377. ARM_HWCAP_ARM_JAVA = 1 << 8,
  378. ARM_HWCAP_ARM_IWMMXT = 1 << 9,
  379. ARM_HWCAP_ARM_CRUNCH = 1 << 10,
  380. ARM_HWCAP_ARM_THUMBEE = 1 << 11,
  381. ARM_HWCAP_ARM_NEON = 1 << 12,
  382. ARM_HWCAP_ARM_VFPv3 = 1 << 13,
  383. ARM_HWCAP_ARM_VFPv3D16 = 1 << 14,
  384. ARM_HWCAP_ARM_TLS = 1 << 15,
  385. ARM_HWCAP_ARM_VFPv4 = 1 << 16,
  386. ARM_HWCAP_ARM_IDIVA = 1 << 17,
  387. ARM_HWCAP_ARM_IDIVT = 1 << 18,
  388. ARM_HWCAP_ARM_VFPD32 = 1 << 19,
  389. ARM_HWCAP_ARM_LPAE = 1 << 20,
  390. ARM_HWCAP_ARM_EVTSTRM = 1 << 21,
  391. ARM_HWCAP_ARM_FPHP = 1 << 22,
  392. ARM_HWCAP_ARM_ASIMDHP = 1 << 23,
  393. ARM_HWCAP_ARM_ASIMDDP = 1 << 24,
  394. ARM_HWCAP_ARM_ASIMDFHM = 1 << 25,
  395. ARM_HWCAP_ARM_ASIMDBF16 = 1 << 26,
  396. ARM_HWCAP_ARM_I8MM = 1 << 27,
  397. };
  398. enum {
  399. ARM_HWCAP2_ARM_AES = 1 << 0,
  400. ARM_HWCAP2_ARM_PMULL = 1 << 1,
  401. ARM_HWCAP2_ARM_SHA1 = 1 << 2,
  402. ARM_HWCAP2_ARM_SHA2 = 1 << 3,
  403. ARM_HWCAP2_ARM_CRC32 = 1 << 4,
  404. ARM_HWCAP2_ARM_SB = 1 << 5,
  405. ARM_HWCAP2_ARM_SSBS = 1 << 6,
  406. };
  407. /* The commpage only exists for 32 bit kernels */
  408. #define HI_COMMPAGE (intptr_t)0xffff0f00u
  409. static bool init_guest_commpage(void)
  410. {
  411. ARMCPU *cpu = ARM_CPU(thread_cpu);
  412. int host_page_size = qemu_real_host_page_size();
  413. abi_ptr commpage;
  414. void *want;
  415. void *addr;
  416. /*
  417. * M-profile allocates maximum of 2GB address space, so can never
  418. * allocate the commpage. Skip it.
  419. */
  420. if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
  421. return true;
  422. }
  423. commpage = HI_COMMPAGE & -host_page_size;
  424. want = g2h_untagged(commpage);
  425. addr = mmap(want, host_page_size, PROT_READ | PROT_WRITE,
  426. MAP_ANONYMOUS | MAP_PRIVATE |
  427. (commpage < reserved_va ? MAP_FIXED : MAP_FIXED_NOREPLACE),
  428. -1, 0);
  429. if (addr == MAP_FAILED) {
  430. perror("Allocating guest commpage");
  431. exit(EXIT_FAILURE);
  432. }
  433. if (addr != want) {
  434. return false;
  435. }
  436. /* Set kernel helper versions; rest of page is 0. */
  437. __put_user(5, (uint32_t *)g2h_untagged(0xffff0ffcu));
  438. if (mprotect(addr, host_page_size, PROT_READ)) {
  439. perror("Protecting guest commpage");
  440. exit(EXIT_FAILURE);
  441. }
  442. page_set_flags(commpage, commpage | (host_page_size - 1),
  443. PAGE_READ | PAGE_EXEC | PAGE_VALID);
  444. return true;
  445. }
  446. #define ELF_HWCAP get_elf_hwcap()
  447. #define ELF_HWCAP2 get_elf_hwcap2()
  448. uint32_t get_elf_hwcap(void)
  449. {
  450. ARMCPU *cpu = ARM_CPU(thread_cpu);
  451. uint32_t hwcaps = 0;
  452. hwcaps |= ARM_HWCAP_ARM_SWP;
  453. hwcaps |= ARM_HWCAP_ARM_HALF;
  454. hwcaps |= ARM_HWCAP_ARM_THUMB;
  455. hwcaps |= ARM_HWCAP_ARM_FAST_MULT;
  456. /* probe for the extra features */
  457. #define GET_FEATURE(feat, hwcap) \
  458. do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0)
  459. #define GET_FEATURE_ID(feat, hwcap) \
  460. do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
  461. /* EDSP is in v5TE and above, but all our v5 CPUs are v5TE */
  462. GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP);
  463. GET_FEATURE(ARM_FEATURE_IWMMXT, ARM_HWCAP_ARM_IWMMXT);
  464. GET_FEATURE(ARM_FEATURE_THUMB2EE, ARM_HWCAP_ARM_THUMBEE);
  465. GET_FEATURE(ARM_FEATURE_NEON, ARM_HWCAP_ARM_NEON);
  466. GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS);
  467. GET_FEATURE(ARM_FEATURE_LPAE, ARM_HWCAP_ARM_LPAE);
  468. GET_FEATURE_ID(aa32_arm_div, ARM_HWCAP_ARM_IDIVA);
  469. GET_FEATURE_ID(aa32_thumb_div, ARM_HWCAP_ARM_IDIVT);
  470. GET_FEATURE_ID(aa32_vfp, ARM_HWCAP_ARM_VFP);
  471. if (cpu_isar_feature(aa32_fpsp_v3, cpu) ||
  472. cpu_isar_feature(aa32_fpdp_v3, cpu)) {
  473. hwcaps |= ARM_HWCAP_ARM_VFPv3;
  474. if (cpu_isar_feature(aa32_simd_r32, cpu)) {
  475. hwcaps |= ARM_HWCAP_ARM_VFPD32;
  476. } else {
  477. hwcaps |= ARM_HWCAP_ARM_VFPv3D16;
  478. }
  479. }
  480. GET_FEATURE_ID(aa32_simdfmac, ARM_HWCAP_ARM_VFPv4);
  481. /*
  482. * MVFR1.FPHP and .SIMDHP must be in sync, and QEMU uses the same
  483. * isar_feature function for both. The kernel reports them as two hwcaps.
  484. */
  485. GET_FEATURE_ID(aa32_fp16_arith, ARM_HWCAP_ARM_FPHP);
  486. GET_FEATURE_ID(aa32_fp16_arith, ARM_HWCAP_ARM_ASIMDHP);
  487. GET_FEATURE_ID(aa32_dp, ARM_HWCAP_ARM_ASIMDDP);
  488. GET_FEATURE_ID(aa32_fhm, ARM_HWCAP_ARM_ASIMDFHM);
  489. GET_FEATURE_ID(aa32_bf16, ARM_HWCAP_ARM_ASIMDBF16);
  490. GET_FEATURE_ID(aa32_i8mm, ARM_HWCAP_ARM_I8MM);
  491. return hwcaps;
  492. }
  493. uint64_t get_elf_hwcap2(void)
  494. {
  495. ARMCPU *cpu = ARM_CPU(thread_cpu);
  496. uint64_t hwcaps = 0;
  497. GET_FEATURE_ID(aa32_aes, ARM_HWCAP2_ARM_AES);
  498. GET_FEATURE_ID(aa32_pmull, ARM_HWCAP2_ARM_PMULL);
  499. GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1);
  500. GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2);
  501. GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32);
  502. GET_FEATURE_ID(aa32_sb, ARM_HWCAP2_ARM_SB);
  503. GET_FEATURE_ID(aa32_ssbs, ARM_HWCAP2_ARM_SSBS);
  504. return hwcaps;
  505. }
  506. const char *elf_hwcap_str(uint32_t bit)
  507. {
  508. static const char *hwcap_str[] = {
  509. [__builtin_ctz(ARM_HWCAP_ARM_SWP )] = "swp",
  510. [__builtin_ctz(ARM_HWCAP_ARM_HALF )] = "half",
  511. [__builtin_ctz(ARM_HWCAP_ARM_THUMB )] = "thumb",
  512. [__builtin_ctz(ARM_HWCAP_ARM_26BIT )] = "26bit",
  513. [__builtin_ctz(ARM_HWCAP_ARM_FAST_MULT)] = "fast_mult",
  514. [__builtin_ctz(ARM_HWCAP_ARM_FPA )] = "fpa",
  515. [__builtin_ctz(ARM_HWCAP_ARM_VFP )] = "vfp",
  516. [__builtin_ctz(ARM_HWCAP_ARM_EDSP )] = "edsp",
  517. [__builtin_ctz(ARM_HWCAP_ARM_JAVA )] = "java",
  518. [__builtin_ctz(ARM_HWCAP_ARM_IWMMXT )] = "iwmmxt",
  519. [__builtin_ctz(ARM_HWCAP_ARM_CRUNCH )] = "crunch",
  520. [__builtin_ctz(ARM_HWCAP_ARM_THUMBEE )] = "thumbee",
  521. [__builtin_ctz(ARM_HWCAP_ARM_NEON )] = "neon",
  522. [__builtin_ctz(ARM_HWCAP_ARM_VFPv3 )] = "vfpv3",
  523. [__builtin_ctz(ARM_HWCAP_ARM_VFPv3D16 )] = "vfpv3d16",
  524. [__builtin_ctz(ARM_HWCAP_ARM_TLS )] = "tls",
  525. [__builtin_ctz(ARM_HWCAP_ARM_VFPv4 )] = "vfpv4",
  526. [__builtin_ctz(ARM_HWCAP_ARM_IDIVA )] = "idiva",
  527. [__builtin_ctz(ARM_HWCAP_ARM_IDIVT )] = "idivt",
  528. [__builtin_ctz(ARM_HWCAP_ARM_VFPD32 )] = "vfpd32",
  529. [__builtin_ctz(ARM_HWCAP_ARM_LPAE )] = "lpae",
  530. [__builtin_ctz(ARM_HWCAP_ARM_EVTSTRM )] = "evtstrm",
  531. [__builtin_ctz(ARM_HWCAP_ARM_FPHP )] = "fphp",
  532. [__builtin_ctz(ARM_HWCAP_ARM_ASIMDHP )] = "asimdhp",
  533. [__builtin_ctz(ARM_HWCAP_ARM_ASIMDDP )] = "asimddp",
  534. [__builtin_ctz(ARM_HWCAP_ARM_ASIMDFHM )] = "asimdfhm",
  535. [__builtin_ctz(ARM_HWCAP_ARM_ASIMDBF16)] = "asimdbf16",
  536. [__builtin_ctz(ARM_HWCAP_ARM_I8MM )] = "i8mm",
  537. };
  538. return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL;
  539. }
  540. const char *elf_hwcap2_str(uint32_t bit)
  541. {
  542. static const char *hwcap_str[] = {
  543. [__builtin_ctz(ARM_HWCAP2_ARM_AES )] = "aes",
  544. [__builtin_ctz(ARM_HWCAP2_ARM_PMULL)] = "pmull",
  545. [__builtin_ctz(ARM_HWCAP2_ARM_SHA1 )] = "sha1",
  546. [__builtin_ctz(ARM_HWCAP2_ARM_SHA2 )] = "sha2",
  547. [__builtin_ctz(ARM_HWCAP2_ARM_CRC32)] = "crc32",
  548. [__builtin_ctz(ARM_HWCAP2_ARM_SB )] = "sb",
  549. [__builtin_ctz(ARM_HWCAP2_ARM_SSBS )] = "ssbs",
  550. };
  551. return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL;
  552. }
  553. #undef GET_FEATURE
  554. #undef GET_FEATURE_ID
  555. #define ELF_PLATFORM get_elf_platform()
  556. static const char *get_elf_platform(void)
  557. {
  558. CPUARMState *env = cpu_env(thread_cpu);
  559. #if TARGET_BIG_ENDIAN
  560. # define END "b"
  561. #else
  562. # define END "l"
  563. #endif
  564. if (arm_feature(env, ARM_FEATURE_V8)) {
  565. return "v8" END;
  566. } else if (arm_feature(env, ARM_FEATURE_V7)) {
  567. if (arm_feature(env, ARM_FEATURE_M)) {
  568. return "v7m" END;
  569. } else {
  570. return "v7" END;
  571. }
  572. } else if (arm_feature(env, ARM_FEATURE_V6)) {
  573. return "v6" END;
  574. } else if (arm_feature(env, ARM_FEATURE_V5)) {
  575. return "v5" END;
  576. } else {
  577. return "v4" END;
  578. }
  579. #undef END
  580. }
  581. #if TARGET_BIG_ENDIAN
  582. #include "elf.h"
  583. #include "vdso-be8.c.inc"
  584. #include "vdso-be32.c.inc"
  585. static const VdsoImageInfo *vdso_image_info(uint32_t elf_flags)
  586. {
  587. return (EF_ARM_EABI_VERSION(elf_flags) >= EF_ARM_EABI_VER4
  588. && (elf_flags & EF_ARM_BE8)
  589. ? &vdso_be8_image_info
  590. : &vdso_be32_image_info);
  591. }
  592. #define vdso_image_info vdso_image_info
  593. #else
  594. # define VDSO_HEADER "vdso-le.c.inc"
  595. #endif
  596. #else
  597. /* 64 bit ARM definitions */
  598. #define ELF_ARCH EM_AARCH64
  599. #define ELF_CLASS ELFCLASS64
  600. #if TARGET_BIG_ENDIAN
  601. # define ELF_PLATFORM "aarch64_be"
  602. #else
  603. # define ELF_PLATFORM "aarch64"
  604. #endif
  605. static inline void init_thread(struct target_pt_regs *regs,
  606. struct image_info *infop)
  607. {
  608. abi_long stack = infop->start_stack;
  609. memset(regs, 0, sizeof(*regs));
  610. regs->pc = infop->entry & ~0x3ULL;
  611. regs->sp = stack;
  612. }
  613. #define ELF_NREG 34
  614. typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
  615. static void elf_core_copy_regs(target_elf_gregset_t *regs,
  616. const CPUARMState *env)
  617. {
  618. int i;
  619. for (i = 0; i < 32; i++) {
  620. (*regs)[i] = tswapreg(env->xregs[i]);
  621. }
  622. (*regs)[32] = tswapreg(env->pc);
  623. (*regs)[33] = tswapreg(pstate_read((CPUARMState *)env));
  624. }
  625. #define USE_ELF_CORE_DUMP
  626. #define ELF_EXEC_PAGESIZE 4096
  627. enum {
  628. ARM_HWCAP_A64_FP = 1 << 0,
  629. ARM_HWCAP_A64_ASIMD = 1 << 1,
  630. ARM_HWCAP_A64_EVTSTRM = 1 << 2,
  631. ARM_HWCAP_A64_AES = 1 << 3,
  632. ARM_HWCAP_A64_PMULL = 1 << 4,
  633. ARM_HWCAP_A64_SHA1 = 1 << 5,
  634. ARM_HWCAP_A64_SHA2 = 1 << 6,
  635. ARM_HWCAP_A64_CRC32 = 1 << 7,
  636. ARM_HWCAP_A64_ATOMICS = 1 << 8,
  637. ARM_HWCAP_A64_FPHP = 1 << 9,
  638. ARM_HWCAP_A64_ASIMDHP = 1 << 10,
  639. ARM_HWCAP_A64_CPUID = 1 << 11,
  640. ARM_HWCAP_A64_ASIMDRDM = 1 << 12,
  641. ARM_HWCAP_A64_JSCVT = 1 << 13,
  642. ARM_HWCAP_A64_FCMA = 1 << 14,
  643. ARM_HWCAP_A64_LRCPC = 1 << 15,
  644. ARM_HWCAP_A64_DCPOP = 1 << 16,
  645. ARM_HWCAP_A64_SHA3 = 1 << 17,
  646. ARM_HWCAP_A64_SM3 = 1 << 18,
  647. ARM_HWCAP_A64_SM4 = 1 << 19,
  648. ARM_HWCAP_A64_ASIMDDP = 1 << 20,
  649. ARM_HWCAP_A64_SHA512 = 1 << 21,
  650. ARM_HWCAP_A64_SVE = 1 << 22,
  651. ARM_HWCAP_A64_ASIMDFHM = 1 << 23,
  652. ARM_HWCAP_A64_DIT = 1 << 24,
  653. ARM_HWCAP_A64_USCAT = 1 << 25,
  654. ARM_HWCAP_A64_ILRCPC = 1 << 26,
  655. ARM_HWCAP_A64_FLAGM = 1 << 27,
  656. ARM_HWCAP_A64_SSBS = 1 << 28,
  657. ARM_HWCAP_A64_SB = 1 << 29,
  658. ARM_HWCAP_A64_PACA = 1 << 30,
  659. ARM_HWCAP_A64_PACG = 1UL << 31,
  660. ARM_HWCAP2_A64_DCPODP = 1 << 0,
  661. ARM_HWCAP2_A64_SVE2 = 1 << 1,
  662. ARM_HWCAP2_A64_SVEAES = 1 << 2,
  663. ARM_HWCAP2_A64_SVEPMULL = 1 << 3,
  664. ARM_HWCAP2_A64_SVEBITPERM = 1 << 4,
  665. ARM_HWCAP2_A64_SVESHA3 = 1 << 5,
  666. ARM_HWCAP2_A64_SVESM4 = 1 << 6,
  667. ARM_HWCAP2_A64_FLAGM2 = 1 << 7,
  668. ARM_HWCAP2_A64_FRINT = 1 << 8,
  669. ARM_HWCAP2_A64_SVEI8MM = 1 << 9,
  670. ARM_HWCAP2_A64_SVEF32MM = 1 << 10,
  671. ARM_HWCAP2_A64_SVEF64MM = 1 << 11,
  672. ARM_HWCAP2_A64_SVEBF16 = 1 << 12,
  673. ARM_HWCAP2_A64_I8MM = 1 << 13,
  674. ARM_HWCAP2_A64_BF16 = 1 << 14,
  675. ARM_HWCAP2_A64_DGH = 1 << 15,
  676. ARM_HWCAP2_A64_RNG = 1 << 16,
  677. ARM_HWCAP2_A64_BTI = 1 << 17,
  678. ARM_HWCAP2_A64_MTE = 1 << 18,
  679. ARM_HWCAP2_A64_ECV = 1 << 19,
  680. ARM_HWCAP2_A64_AFP = 1 << 20,
  681. ARM_HWCAP2_A64_RPRES = 1 << 21,
  682. ARM_HWCAP2_A64_MTE3 = 1 << 22,
  683. ARM_HWCAP2_A64_SME = 1 << 23,
  684. ARM_HWCAP2_A64_SME_I16I64 = 1 << 24,
  685. ARM_HWCAP2_A64_SME_F64F64 = 1 << 25,
  686. ARM_HWCAP2_A64_SME_I8I32 = 1 << 26,
  687. ARM_HWCAP2_A64_SME_F16F32 = 1 << 27,
  688. ARM_HWCAP2_A64_SME_B16F32 = 1 << 28,
  689. ARM_HWCAP2_A64_SME_F32F32 = 1 << 29,
  690. ARM_HWCAP2_A64_SME_FA64 = 1 << 30,
  691. ARM_HWCAP2_A64_WFXT = 1ULL << 31,
  692. ARM_HWCAP2_A64_EBF16 = 1ULL << 32,
  693. ARM_HWCAP2_A64_SVE_EBF16 = 1ULL << 33,
  694. ARM_HWCAP2_A64_CSSC = 1ULL << 34,
  695. ARM_HWCAP2_A64_RPRFM = 1ULL << 35,
  696. ARM_HWCAP2_A64_SVE2P1 = 1ULL << 36,
  697. ARM_HWCAP2_A64_SME2 = 1ULL << 37,
  698. ARM_HWCAP2_A64_SME2P1 = 1ULL << 38,
  699. ARM_HWCAP2_A64_SME_I16I32 = 1ULL << 39,
  700. ARM_HWCAP2_A64_SME_BI32I32 = 1ULL << 40,
  701. ARM_HWCAP2_A64_SME_B16B16 = 1ULL << 41,
  702. ARM_HWCAP2_A64_SME_F16F16 = 1ULL << 42,
  703. ARM_HWCAP2_A64_MOPS = 1ULL << 43,
  704. ARM_HWCAP2_A64_HBC = 1ULL << 44,
  705. };
  706. #define ELF_HWCAP get_elf_hwcap()
  707. #define ELF_HWCAP2 get_elf_hwcap2()
  708. #define GET_FEATURE_ID(feat, hwcap) \
  709. do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
  710. uint32_t get_elf_hwcap(void)
  711. {
  712. ARMCPU *cpu = ARM_CPU(thread_cpu);
  713. uint32_t hwcaps = 0;
  714. hwcaps |= ARM_HWCAP_A64_FP;
  715. hwcaps |= ARM_HWCAP_A64_ASIMD;
  716. hwcaps |= ARM_HWCAP_A64_CPUID;
  717. /* probe for the extra features */
  718. GET_FEATURE_ID(aa64_aes, ARM_HWCAP_A64_AES);
  719. GET_FEATURE_ID(aa64_pmull, ARM_HWCAP_A64_PMULL);
  720. GET_FEATURE_ID(aa64_sha1, ARM_HWCAP_A64_SHA1);
  721. GET_FEATURE_ID(aa64_sha256, ARM_HWCAP_A64_SHA2);
  722. GET_FEATURE_ID(aa64_sha512, ARM_HWCAP_A64_SHA512);
  723. GET_FEATURE_ID(aa64_crc32, ARM_HWCAP_A64_CRC32);
  724. GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3);
  725. GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3);
  726. GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4);
  727. GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
  728. GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS);
  729. GET_FEATURE_ID(aa64_lse2, ARM_HWCAP_A64_USCAT);
  730. GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM);
  731. GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP);
  732. GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA);
  733. GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE);
  734. GET_FEATURE_ID(aa64_pauth, ARM_HWCAP_A64_PACA | ARM_HWCAP_A64_PACG);
  735. GET_FEATURE_ID(aa64_fhm, ARM_HWCAP_A64_ASIMDFHM);
  736. GET_FEATURE_ID(aa64_dit, ARM_HWCAP_A64_DIT);
  737. GET_FEATURE_ID(aa64_jscvt, ARM_HWCAP_A64_JSCVT);
  738. GET_FEATURE_ID(aa64_sb, ARM_HWCAP_A64_SB);
  739. GET_FEATURE_ID(aa64_condm_4, ARM_HWCAP_A64_FLAGM);
  740. GET_FEATURE_ID(aa64_dcpop, ARM_HWCAP_A64_DCPOP);
  741. GET_FEATURE_ID(aa64_rcpc_8_3, ARM_HWCAP_A64_LRCPC);
  742. GET_FEATURE_ID(aa64_rcpc_8_4, ARM_HWCAP_A64_ILRCPC);
  743. return hwcaps;
  744. }
  745. uint64_t get_elf_hwcap2(void)
  746. {
  747. ARMCPU *cpu = ARM_CPU(thread_cpu);
  748. uint64_t hwcaps = 0;
  749. GET_FEATURE_ID(aa64_dcpodp, ARM_HWCAP2_A64_DCPODP);
  750. GET_FEATURE_ID(aa64_sve2, ARM_HWCAP2_A64_SVE2);
  751. GET_FEATURE_ID(aa64_sve2_aes, ARM_HWCAP2_A64_SVEAES);
  752. GET_FEATURE_ID(aa64_sve2_pmull128, ARM_HWCAP2_A64_SVEPMULL);
  753. GET_FEATURE_ID(aa64_sve2_bitperm, ARM_HWCAP2_A64_SVEBITPERM);
  754. GET_FEATURE_ID(aa64_sve2_sha3, ARM_HWCAP2_A64_SVESHA3);
  755. GET_FEATURE_ID(aa64_sve2_sm4, ARM_HWCAP2_A64_SVESM4);
  756. GET_FEATURE_ID(aa64_condm_5, ARM_HWCAP2_A64_FLAGM2);
  757. GET_FEATURE_ID(aa64_frint, ARM_HWCAP2_A64_FRINT);
  758. GET_FEATURE_ID(aa64_sve_i8mm, ARM_HWCAP2_A64_SVEI8MM);
  759. GET_FEATURE_ID(aa64_sve_f32mm, ARM_HWCAP2_A64_SVEF32MM);
  760. GET_FEATURE_ID(aa64_sve_f64mm, ARM_HWCAP2_A64_SVEF64MM);
  761. GET_FEATURE_ID(aa64_sve_bf16, ARM_HWCAP2_A64_SVEBF16);
  762. GET_FEATURE_ID(aa64_i8mm, ARM_HWCAP2_A64_I8MM);
  763. GET_FEATURE_ID(aa64_bf16, ARM_HWCAP2_A64_BF16);
  764. GET_FEATURE_ID(aa64_rndr, ARM_HWCAP2_A64_RNG);
  765. GET_FEATURE_ID(aa64_bti, ARM_HWCAP2_A64_BTI);
  766. GET_FEATURE_ID(aa64_mte, ARM_HWCAP2_A64_MTE);
  767. GET_FEATURE_ID(aa64_mte3, ARM_HWCAP2_A64_MTE3);
  768. GET_FEATURE_ID(aa64_sme, (ARM_HWCAP2_A64_SME |
  769. ARM_HWCAP2_A64_SME_F32F32 |
  770. ARM_HWCAP2_A64_SME_B16F32 |
  771. ARM_HWCAP2_A64_SME_F16F32 |
  772. ARM_HWCAP2_A64_SME_I8I32));
  773. GET_FEATURE_ID(aa64_sme_f64f64, ARM_HWCAP2_A64_SME_F64F64);
  774. GET_FEATURE_ID(aa64_sme_i16i64, ARM_HWCAP2_A64_SME_I16I64);
  775. GET_FEATURE_ID(aa64_sme_fa64, ARM_HWCAP2_A64_SME_FA64);
  776. GET_FEATURE_ID(aa64_hbc, ARM_HWCAP2_A64_HBC);
  777. GET_FEATURE_ID(aa64_mops, ARM_HWCAP2_A64_MOPS);
  778. return hwcaps;
  779. }
  780. const char *elf_hwcap_str(uint32_t bit)
  781. {
  782. static const char *hwcap_str[] = {
  783. [__builtin_ctz(ARM_HWCAP_A64_FP )] = "fp",
  784. [__builtin_ctz(ARM_HWCAP_A64_ASIMD )] = "asimd",
  785. [__builtin_ctz(ARM_HWCAP_A64_EVTSTRM )] = "evtstrm",
  786. [__builtin_ctz(ARM_HWCAP_A64_AES )] = "aes",
  787. [__builtin_ctz(ARM_HWCAP_A64_PMULL )] = "pmull",
  788. [__builtin_ctz(ARM_HWCAP_A64_SHA1 )] = "sha1",
  789. [__builtin_ctz(ARM_HWCAP_A64_SHA2 )] = "sha2",
  790. [__builtin_ctz(ARM_HWCAP_A64_CRC32 )] = "crc32",
  791. [__builtin_ctz(ARM_HWCAP_A64_ATOMICS )] = "atomics",
  792. [__builtin_ctz(ARM_HWCAP_A64_FPHP )] = "fphp",
  793. [__builtin_ctz(ARM_HWCAP_A64_ASIMDHP )] = "asimdhp",
  794. [__builtin_ctz(ARM_HWCAP_A64_CPUID )] = "cpuid",
  795. [__builtin_ctz(ARM_HWCAP_A64_ASIMDRDM)] = "asimdrdm",
  796. [__builtin_ctz(ARM_HWCAP_A64_JSCVT )] = "jscvt",
  797. [__builtin_ctz(ARM_HWCAP_A64_FCMA )] = "fcma",
  798. [__builtin_ctz(ARM_HWCAP_A64_LRCPC )] = "lrcpc",
  799. [__builtin_ctz(ARM_HWCAP_A64_DCPOP )] = "dcpop",
  800. [__builtin_ctz(ARM_HWCAP_A64_SHA3 )] = "sha3",
  801. [__builtin_ctz(ARM_HWCAP_A64_SM3 )] = "sm3",
  802. [__builtin_ctz(ARM_HWCAP_A64_SM4 )] = "sm4",
  803. [__builtin_ctz(ARM_HWCAP_A64_ASIMDDP )] = "asimddp",
  804. [__builtin_ctz(ARM_HWCAP_A64_SHA512 )] = "sha512",
  805. [__builtin_ctz(ARM_HWCAP_A64_SVE )] = "sve",
  806. [__builtin_ctz(ARM_HWCAP_A64_ASIMDFHM)] = "asimdfhm",
  807. [__builtin_ctz(ARM_HWCAP_A64_DIT )] = "dit",
  808. [__builtin_ctz(ARM_HWCAP_A64_USCAT )] = "uscat",
  809. [__builtin_ctz(ARM_HWCAP_A64_ILRCPC )] = "ilrcpc",
  810. [__builtin_ctz(ARM_HWCAP_A64_FLAGM )] = "flagm",
  811. [__builtin_ctz(ARM_HWCAP_A64_SSBS )] = "ssbs",
  812. [__builtin_ctz(ARM_HWCAP_A64_SB )] = "sb",
  813. [__builtin_ctz(ARM_HWCAP_A64_PACA )] = "paca",
  814. [__builtin_ctz(ARM_HWCAP_A64_PACG )] = "pacg",
  815. };
  816. return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL;
  817. }
  818. const char *elf_hwcap2_str(uint32_t bit)
  819. {
  820. static const char *hwcap_str[] = {
  821. [__builtin_ctz(ARM_HWCAP2_A64_DCPODP )] = "dcpodp",
  822. [__builtin_ctz(ARM_HWCAP2_A64_SVE2 )] = "sve2",
  823. [__builtin_ctz(ARM_HWCAP2_A64_SVEAES )] = "sveaes",
  824. [__builtin_ctz(ARM_HWCAP2_A64_SVEPMULL )] = "svepmull",
  825. [__builtin_ctz(ARM_HWCAP2_A64_SVEBITPERM )] = "svebitperm",
  826. [__builtin_ctz(ARM_HWCAP2_A64_SVESHA3 )] = "svesha3",
  827. [__builtin_ctz(ARM_HWCAP2_A64_SVESM4 )] = "svesm4",
  828. [__builtin_ctz(ARM_HWCAP2_A64_FLAGM2 )] = "flagm2",
  829. [__builtin_ctz(ARM_HWCAP2_A64_FRINT )] = "frint",
  830. [__builtin_ctz(ARM_HWCAP2_A64_SVEI8MM )] = "svei8mm",
  831. [__builtin_ctz(ARM_HWCAP2_A64_SVEF32MM )] = "svef32mm",
  832. [__builtin_ctz(ARM_HWCAP2_A64_SVEF64MM )] = "svef64mm",
  833. [__builtin_ctz(ARM_HWCAP2_A64_SVEBF16 )] = "svebf16",
  834. [__builtin_ctz(ARM_HWCAP2_A64_I8MM )] = "i8mm",
  835. [__builtin_ctz(ARM_HWCAP2_A64_BF16 )] = "bf16",
  836. [__builtin_ctz(ARM_HWCAP2_A64_DGH )] = "dgh",
  837. [__builtin_ctz(ARM_HWCAP2_A64_RNG )] = "rng",
  838. [__builtin_ctz(ARM_HWCAP2_A64_BTI )] = "bti",
  839. [__builtin_ctz(ARM_HWCAP2_A64_MTE )] = "mte",
  840. [__builtin_ctz(ARM_HWCAP2_A64_ECV )] = "ecv",
  841. [__builtin_ctz(ARM_HWCAP2_A64_AFP )] = "afp",
  842. [__builtin_ctz(ARM_HWCAP2_A64_RPRES )] = "rpres",
  843. [__builtin_ctz(ARM_HWCAP2_A64_MTE3 )] = "mte3",
  844. [__builtin_ctz(ARM_HWCAP2_A64_SME )] = "sme",
  845. [__builtin_ctz(ARM_HWCAP2_A64_SME_I16I64 )] = "smei16i64",
  846. [__builtin_ctz(ARM_HWCAP2_A64_SME_F64F64 )] = "smef64f64",
  847. [__builtin_ctz(ARM_HWCAP2_A64_SME_I8I32 )] = "smei8i32",
  848. [__builtin_ctz(ARM_HWCAP2_A64_SME_F16F32 )] = "smef16f32",
  849. [__builtin_ctz(ARM_HWCAP2_A64_SME_B16F32 )] = "smeb16f32",
  850. [__builtin_ctz(ARM_HWCAP2_A64_SME_F32F32 )] = "smef32f32",
  851. [__builtin_ctz(ARM_HWCAP2_A64_SME_FA64 )] = "smefa64",
  852. [__builtin_ctz(ARM_HWCAP2_A64_WFXT )] = "wfxt",
  853. [__builtin_ctzll(ARM_HWCAP2_A64_EBF16 )] = "ebf16",
  854. [__builtin_ctzll(ARM_HWCAP2_A64_SVE_EBF16 )] = "sveebf16",
  855. [__builtin_ctzll(ARM_HWCAP2_A64_CSSC )] = "cssc",
  856. [__builtin_ctzll(ARM_HWCAP2_A64_RPRFM )] = "rprfm",
  857. [__builtin_ctzll(ARM_HWCAP2_A64_SVE2P1 )] = "sve2p1",
  858. [__builtin_ctzll(ARM_HWCAP2_A64_SME2 )] = "sme2",
  859. [__builtin_ctzll(ARM_HWCAP2_A64_SME2P1 )] = "sme2p1",
  860. [__builtin_ctzll(ARM_HWCAP2_A64_SME_I16I32 )] = "smei16i32",
  861. [__builtin_ctzll(ARM_HWCAP2_A64_SME_BI32I32)] = "smebi32i32",
  862. [__builtin_ctzll(ARM_HWCAP2_A64_SME_B16B16 )] = "smeb16b16",
  863. [__builtin_ctzll(ARM_HWCAP2_A64_SME_F16F16 )] = "smef16f16",
  864. [__builtin_ctzll(ARM_HWCAP2_A64_MOPS )] = "mops",
  865. [__builtin_ctzll(ARM_HWCAP2_A64_HBC )] = "hbc",
  866. };
  867. return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL;
  868. }
  869. #undef GET_FEATURE_ID
  870. #if TARGET_BIG_ENDIAN
  871. # define VDSO_HEADER "vdso-be.c.inc"
  872. #else
  873. # define VDSO_HEADER "vdso-le.c.inc"
  874. #endif
  875. #endif /* not TARGET_AARCH64 */
  876. #endif /* TARGET_ARM */
  877. #ifdef TARGET_SPARC
  878. #ifndef TARGET_SPARC64
  879. # define ELF_CLASS ELFCLASS32
  880. # define ELF_ARCH EM_SPARC
  881. #elif defined(TARGET_ABI32)
  882. # define ELF_CLASS ELFCLASS32
  883. # define elf_check_arch(x) ((x) == EM_SPARC32PLUS || (x) == EM_SPARC)
  884. #else
  885. # define ELF_CLASS ELFCLASS64
  886. # define ELF_ARCH EM_SPARCV9
  887. #endif
  888. #include "elf.h"
  889. #define ELF_HWCAP get_elf_hwcap()
  890. static uint32_t get_elf_hwcap(void)
  891. {
  892. /* There are not many sparc32 hwcap bits -- we have all of them. */
  893. uint32_t r = HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
  894. HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV;
  895. #ifdef TARGET_SPARC64
  896. CPUSPARCState *env = cpu_env(thread_cpu);
  897. uint32_t features = env->def.features;
  898. r |= HWCAP_SPARC_V9 | HWCAP_SPARC_V8PLUS;
  899. /* 32x32 multiply and divide are efficient. */
  900. r |= HWCAP_SPARC_MUL32 | HWCAP_SPARC_DIV32;
  901. /* We don't have an internal feature bit for this. */
  902. r |= HWCAP_SPARC_POPC;
  903. r |= features & CPU_FEATURE_FSMULD ? HWCAP_SPARC_FSMULD : 0;
  904. r |= features & CPU_FEATURE_VIS1 ? HWCAP_SPARC_VIS : 0;
  905. r |= features & CPU_FEATURE_VIS2 ? HWCAP_SPARC_VIS2 : 0;
  906. r |= features & CPU_FEATURE_FMAF ? HWCAP_SPARC_FMAF : 0;
  907. r |= features & CPU_FEATURE_VIS3 ? HWCAP_SPARC_VIS3 : 0;
  908. r |= features & CPU_FEATURE_IMA ? HWCAP_SPARC_IMA : 0;
  909. #endif
  910. return r;
  911. }
  912. static inline void init_thread(struct target_pt_regs *regs,
  913. struct image_info *infop)
  914. {
  915. /* Note that target_cpu_copy_regs does not read psr/tstate. */
  916. regs->pc = infop->entry;
  917. regs->npc = regs->pc + 4;
  918. regs->y = 0;
  919. regs->u_regs[14] = (infop->start_stack - 16 * sizeof(abi_ulong)
  920. - TARGET_STACK_BIAS);
  921. }
  922. #endif /* TARGET_SPARC */
  923. #ifdef TARGET_PPC
  924. #define ELF_MACHINE PPC_ELF_MACHINE
  925. #if defined(TARGET_PPC64)
  926. #define elf_check_arch(x) ( (x) == EM_PPC64 )
  927. #define ELF_CLASS ELFCLASS64
  928. #else
  929. #define ELF_CLASS ELFCLASS32
  930. #define EXSTACK_DEFAULT true
  931. #endif
  932. #define ELF_ARCH EM_PPC
  933. /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP).
  934. See arch/powerpc/include/asm/cputable.h. */
  935. enum {
  936. QEMU_PPC_FEATURE_32 = 0x80000000,
  937. QEMU_PPC_FEATURE_64 = 0x40000000,
  938. QEMU_PPC_FEATURE_601_INSTR = 0x20000000,
  939. QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000,
  940. QEMU_PPC_FEATURE_HAS_FPU = 0x08000000,
  941. QEMU_PPC_FEATURE_HAS_MMU = 0x04000000,
  942. QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000,
  943. QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000,
  944. QEMU_PPC_FEATURE_HAS_SPE = 0x00800000,
  945. QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000,
  946. QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000,
  947. QEMU_PPC_FEATURE_NO_TB = 0x00100000,
  948. QEMU_PPC_FEATURE_POWER4 = 0x00080000,
  949. QEMU_PPC_FEATURE_POWER5 = 0x00040000,
  950. QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000,
  951. QEMU_PPC_FEATURE_CELL = 0x00010000,
  952. QEMU_PPC_FEATURE_BOOKE = 0x00008000,
  953. QEMU_PPC_FEATURE_SMT = 0x00004000,
  954. QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000,
  955. QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000,
  956. QEMU_PPC_FEATURE_PA6T = 0x00000800,
  957. QEMU_PPC_FEATURE_HAS_DFP = 0x00000400,
  958. QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200,
  959. QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100,
  960. QEMU_PPC_FEATURE_HAS_VSX = 0x00000080,
  961. QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040,
  962. QEMU_PPC_FEATURE_TRUE_LE = 0x00000002,
  963. QEMU_PPC_FEATURE_PPC_LE = 0x00000001,
  964. /* Feature definitions in AT_HWCAP2. */
  965. QEMU_PPC_FEATURE2_ARCH_2_07 = 0x80000000, /* ISA 2.07 */
  966. QEMU_PPC_FEATURE2_HAS_HTM = 0x40000000, /* Hardware Transactional Memory */
  967. QEMU_PPC_FEATURE2_HAS_DSCR = 0x20000000, /* Data Stream Control Register */
  968. QEMU_PPC_FEATURE2_HAS_EBB = 0x10000000, /* Event Base Branching */
  969. QEMU_PPC_FEATURE2_HAS_ISEL = 0x08000000, /* Integer Select */
  970. QEMU_PPC_FEATURE2_HAS_TAR = 0x04000000, /* Target Address Register */
  971. QEMU_PPC_FEATURE2_VEC_CRYPTO = 0x02000000,
  972. QEMU_PPC_FEATURE2_HTM_NOSC = 0x01000000,
  973. QEMU_PPC_FEATURE2_ARCH_3_00 = 0x00800000, /* ISA 3.00 */
  974. QEMU_PPC_FEATURE2_HAS_IEEE128 = 0x00400000, /* VSX IEEE Bin Float 128-bit */
  975. QEMU_PPC_FEATURE2_DARN = 0x00200000, /* darn random number insn */
  976. QEMU_PPC_FEATURE2_SCV = 0x00100000, /* scv syscall */
  977. QEMU_PPC_FEATURE2_HTM_NO_SUSPEND = 0x00080000, /* TM w/o suspended state */
  978. QEMU_PPC_FEATURE2_ARCH_3_1 = 0x00040000, /* ISA 3.1 */
  979. QEMU_PPC_FEATURE2_MMA = 0x00020000, /* Matrix-Multiply Assist */
  980. };
  981. #define ELF_HWCAP get_elf_hwcap()
  982. static uint32_t get_elf_hwcap(void)
  983. {
  984. PowerPCCPU *cpu = POWERPC_CPU(thread_cpu);
  985. uint32_t features = 0;
  986. /* We don't have to be terribly complete here; the high points are
  987. Altivec/FP/SPE support. Anything else is just a bonus. */
  988. #define GET_FEATURE(flag, feature) \
  989. do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0)
  990. #define GET_FEATURE2(flags, feature) \
  991. do { \
  992. if ((cpu->env.insns_flags2 & flags) == flags) { \
  993. features |= feature; \
  994. } \
  995. } while (0)
  996. GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64);
  997. GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU);
  998. GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC);
  999. GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE);
  1000. GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE);
  1001. GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE);
  1002. GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE);
  1003. GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC);
  1004. GET_FEATURE2(PPC2_DFP, QEMU_PPC_FEATURE_HAS_DFP);
  1005. GET_FEATURE2(PPC2_VSX, QEMU_PPC_FEATURE_HAS_VSX);
  1006. GET_FEATURE2((PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 |
  1007. PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206),
  1008. QEMU_PPC_FEATURE_ARCH_2_06);
  1009. #undef GET_FEATURE
  1010. #undef GET_FEATURE2
  1011. return features;
  1012. }
  1013. #define ELF_HWCAP2 get_elf_hwcap2()
  1014. static uint32_t get_elf_hwcap2(void)
  1015. {
  1016. PowerPCCPU *cpu = POWERPC_CPU(thread_cpu);
  1017. uint32_t features = 0;
  1018. #define GET_FEATURE(flag, feature) \
  1019. do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0)
  1020. #define GET_FEATURE2(flag, feature) \
  1021. do { if (cpu->env.insns_flags2 & flag) { features |= feature; } } while (0)
  1022. GET_FEATURE(PPC_ISEL, QEMU_PPC_FEATURE2_HAS_ISEL);
  1023. GET_FEATURE2(PPC2_BCTAR_ISA207, QEMU_PPC_FEATURE2_HAS_TAR);
  1024. GET_FEATURE2((PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 |
  1025. PPC2_ISA207S), QEMU_PPC_FEATURE2_ARCH_2_07 |
  1026. QEMU_PPC_FEATURE2_VEC_CRYPTO);
  1027. GET_FEATURE2(PPC2_ISA300, QEMU_PPC_FEATURE2_ARCH_3_00 |
  1028. QEMU_PPC_FEATURE2_DARN | QEMU_PPC_FEATURE2_HAS_IEEE128);
  1029. GET_FEATURE2(PPC2_ISA310, QEMU_PPC_FEATURE2_ARCH_3_1 |
  1030. QEMU_PPC_FEATURE2_MMA);
  1031. #undef GET_FEATURE
  1032. #undef GET_FEATURE2
  1033. return features;
  1034. }
  1035. /*
  1036. * The requirements here are:
  1037. * - keep the final alignment of sp (sp & 0xf)
  1038. * - make sure the 32-bit value at the first 16 byte aligned position of
  1039. * AUXV is greater than 16 for glibc compatibility.
  1040. * AT_IGNOREPPC is used for that.
  1041. * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
  1042. * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
  1043. */
  1044. #define DLINFO_ARCH_ITEMS 5
  1045. #define ARCH_DLINFO \
  1046. do { \
  1047. PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); \
  1048. /* \
  1049. * Handle glibc compatibility: these magic entries must \
  1050. * be at the lowest addresses in the final auxv. \
  1051. */ \
  1052. NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
  1053. NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
  1054. NEW_AUX_ENT(AT_DCACHEBSIZE, cpu->env.dcache_line_size); \
  1055. NEW_AUX_ENT(AT_ICACHEBSIZE, cpu->env.icache_line_size); \
  1056. NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
  1057. } while (0)
  1058. static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
  1059. {
  1060. _regs->gpr[1] = infop->start_stack;
  1061. #if defined(TARGET_PPC64)
  1062. if (get_ppc64_abi(infop) < 2) {
  1063. uint64_t val;
  1064. get_user_u64(val, infop->entry + 8);
  1065. _regs->gpr[2] = val + infop->load_bias;
  1066. get_user_u64(val, infop->entry);
  1067. infop->entry = val + infop->load_bias;
  1068. } else {
  1069. _regs->gpr[12] = infop->entry; /* r12 set to global entry address */
  1070. }
  1071. #endif
  1072. _regs->nip = infop->entry;
  1073. }
  1074. /* See linux kernel: arch/powerpc/include/asm/elf.h. */
  1075. #define ELF_NREG 48
  1076. typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
  1077. static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *env)
  1078. {
  1079. int i;
  1080. target_ulong ccr = 0;
  1081. for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
  1082. (*regs)[i] = tswapreg(env->gpr[i]);
  1083. }
  1084. (*regs)[32] = tswapreg(env->nip);
  1085. (*regs)[33] = tswapreg(env->msr);
  1086. (*regs)[35] = tswapreg(env->ctr);
  1087. (*regs)[36] = tswapreg(env->lr);
  1088. (*regs)[37] = tswapreg(cpu_read_xer(env));
  1089. ccr = ppc_get_cr(env);
  1090. (*regs)[38] = tswapreg(ccr);
  1091. }
  1092. #define USE_ELF_CORE_DUMP
  1093. #define ELF_EXEC_PAGESIZE 4096
  1094. #ifndef TARGET_PPC64
  1095. # define VDSO_HEADER "vdso-32.c.inc"
  1096. #elif TARGET_BIG_ENDIAN
  1097. # define VDSO_HEADER "vdso-64.c.inc"
  1098. #else
  1099. # define VDSO_HEADER "vdso-64le.c.inc"
  1100. #endif
  1101. #endif
  1102. #ifdef TARGET_LOONGARCH64
  1103. #define ELF_CLASS ELFCLASS64
  1104. #define ELF_ARCH EM_LOONGARCH
  1105. #define EXSTACK_DEFAULT true
  1106. #define elf_check_arch(x) ((x) == EM_LOONGARCH)
  1107. #define VDSO_HEADER "vdso.c.inc"
  1108. static inline void init_thread(struct target_pt_regs *regs,
  1109. struct image_info *infop)
  1110. {
  1111. /*Set crmd PG,DA = 1,0 */
  1112. regs->csr.crmd = 2 << 3;
  1113. regs->csr.era = infop->entry;
  1114. regs->regs[3] = infop->start_stack;
  1115. }
  1116. /* See linux kernel: arch/loongarch/include/asm/elf.h */
  1117. #define ELF_NREG 45
  1118. typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
  1119. enum {
  1120. TARGET_EF_R0 = 0,
  1121. TARGET_EF_CSR_ERA = TARGET_EF_R0 + 33,
  1122. TARGET_EF_CSR_BADV = TARGET_EF_R0 + 34,
  1123. };
  1124. static void elf_core_copy_regs(target_elf_gregset_t *regs,
  1125. const CPULoongArchState *env)
  1126. {
  1127. int i;
  1128. (*regs)[TARGET_EF_R0] = 0;
  1129. for (i = 1; i < ARRAY_SIZE(env->gpr); i++) {
  1130. (*regs)[TARGET_EF_R0 + i] = tswapreg(env->gpr[i]);
  1131. }
  1132. (*regs)[TARGET_EF_CSR_ERA] = tswapreg(env->pc);
  1133. (*regs)[TARGET_EF_CSR_BADV] = tswapreg(env->CSR_BADV);
  1134. }
  1135. #define USE_ELF_CORE_DUMP
  1136. #define ELF_EXEC_PAGESIZE 4096
  1137. #define ELF_HWCAP get_elf_hwcap()
  1138. /* See arch/loongarch/include/uapi/asm/hwcap.h */
  1139. enum {
  1140. HWCAP_LOONGARCH_CPUCFG = (1 << 0),
  1141. HWCAP_LOONGARCH_LAM = (1 << 1),
  1142. HWCAP_LOONGARCH_UAL = (1 << 2),
  1143. HWCAP_LOONGARCH_FPU = (1 << 3),
  1144. HWCAP_LOONGARCH_LSX = (1 << 4),
  1145. HWCAP_LOONGARCH_LASX = (1 << 5),
  1146. HWCAP_LOONGARCH_CRC32 = (1 << 6),
  1147. HWCAP_LOONGARCH_COMPLEX = (1 << 7),
  1148. HWCAP_LOONGARCH_CRYPTO = (1 << 8),
  1149. HWCAP_LOONGARCH_LVZ = (1 << 9),
  1150. HWCAP_LOONGARCH_LBT_X86 = (1 << 10),
  1151. HWCAP_LOONGARCH_LBT_ARM = (1 << 11),
  1152. HWCAP_LOONGARCH_LBT_MIPS = (1 << 12),
  1153. };
  1154. static uint32_t get_elf_hwcap(void)
  1155. {
  1156. LoongArchCPU *cpu = LOONGARCH_CPU(thread_cpu);
  1157. uint32_t hwcaps = 0;
  1158. hwcaps |= HWCAP_LOONGARCH_CRC32;
  1159. if (FIELD_EX32(cpu->env.cpucfg[1], CPUCFG1, UAL)) {
  1160. hwcaps |= HWCAP_LOONGARCH_UAL;
  1161. }
  1162. if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, FP)) {
  1163. hwcaps |= HWCAP_LOONGARCH_FPU;
  1164. }
  1165. if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LAM)) {
  1166. hwcaps |= HWCAP_LOONGARCH_LAM;
  1167. }
  1168. if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LSX)) {
  1169. hwcaps |= HWCAP_LOONGARCH_LSX;
  1170. }
  1171. if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LASX)) {
  1172. hwcaps |= HWCAP_LOONGARCH_LASX;
  1173. }
  1174. return hwcaps;
  1175. }
  1176. #define ELF_PLATFORM "loongarch"
  1177. #endif /* TARGET_LOONGARCH64 */
  1178. #ifdef TARGET_MIPS
  1179. #ifdef TARGET_MIPS64
  1180. #define ELF_CLASS ELFCLASS64
  1181. #else
  1182. #define ELF_CLASS ELFCLASS32
  1183. #endif
  1184. #define ELF_ARCH EM_MIPS
  1185. #define EXSTACK_DEFAULT true
  1186. #ifdef TARGET_ABI_MIPSN32
  1187. #define elf_check_abi(x) ((x) & EF_MIPS_ABI2)
  1188. #else
  1189. #define elf_check_abi(x) (!((x) & EF_MIPS_ABI2))
  1190. #endif
  1191. #define ELF_BASE_PLATFORM get_elf_base_platform()
  1192. #define MATCH_PLATFORM_INSN(_flags, _base_platform) \
  1193. do { if ((cpu->env.insn_flags & (_flags)) == _flags) \
  1194. { return _base_platform; } } while (0)
  1195. static const char *get_elf_base_platform(void)
  1196. {
  1197. MIPSCPU *cpu = MIPS_CPU(thread_cpu);
  1198. /* 64 bit ISAs goes first */
  1199. MATCH_PLATFORM_INSN(CPU_MIPS64R6, "mips64r6");
  1200. MATCH_PLATFORM_INSN(CPU_MIPS64R5, "mips64r5");
  1201. MATCH_PLATFORM_INSN(CPU_MIPS64R2, "mips64r2");
  1202. MATCH_PLATFORM_INSN(CPU_MIPS64R1, "mips64");
  1203. MATCH_PLATFORM_INSN(CPU_MIPS5, "mips5");
  1204. MATCH_PLATFORM_INSN(CPU_MIPS4, "mips4");
  1205. MATCH_PLATFORM_INSN(CPU_MIPS3, "mips3");
  1206. /* 32 bit ISAs */
  1207. MATCH_PLATFORM_INSN(CPU_MIPS32R6, "mips32r6");
  1208. MATCH_PLATFORM_INSN(CPU_MIPS32R5, "mips32r5");
  1209. MATCH_PLATFORM_INSN(CPU_MIPS32R2, "mips32r2");
  1210. MATCH_PLATFORM_INSN(CPU_MIPS32R1, "mips32");
  1211. MATCH_PLATFORM_INSN(CPU_MIPS2, "mips2");
  1212. /* Fallback */
  1213. return "mips";
  1214. }
  1215. #undef MATCH_PLATFORM_INSN
  1216. static inline void init_thread(struct target_pt_regs *regs,
  1217. struct image_info *infop)
  1218. {
  1219. regs->cp0_status = 2 << CP0St_KSU;
  1220. regs->cp0_epc = infop->entry;
  1221. regs->regs[29] = infop->start_stack;
  1222. }
  1223. /* See linux kernel: arch/mips/include/asm/elf.h. */
  1224. #define ELF_NREG 45
  1225. typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
  1226. /* See linux kernel: arch/mips/include/asm/reg.h. */
  1227. enum {
  1228. #ifdef TARGET_MIPS64
  1229. TARGET_EF_R0 = 0,
  1230. #else
  1231. TARGET_EF_R0 = 6,
  1232. #endif
  1233. TARGET_EF_R26 = TARGET_EF_R0 + 26,
  1234. TARGET_EF_R27 = TARGET_EF_R0 + 27,
  1235. TARGET_EF_LO = TARGET_EF_R0 + 32,
  1236. TARGET_EF_HI = TARGET_EF_R0 + 33,
  1237. TARGET_EF_CP0_EPC = TARGET_EF_R0 + 34,
  1238. TARGET_EF_CP0_BADVADDR = TARGET_EF_R0 + 35,
  1239. TARGET_EF_CP0_STATUS = TARGET_EF_R0 + 36,
  1240. TARGET_EF_CP0_CAUSE = TARGET_EF_R0 + 37
  1241. };
  1242. /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */
  1243. static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMIPSState *env)
  1244. {
  1245. int i;
  1246. for (i = 0; i < TARGET_EF_R0; i++) {
  1247. (*regs)[i] = 0;
  1248. }
  1249. (*regs)[TARGET_EF_R0] = 0;
  1250. for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) {
  1251. (*regs)[TARGET_EF_R0 + i] = tswapreg(env->active_tc.gpr[i]);
  1252. }
  1253. (*regs)[TARGET_EF_R26] = 0;
  1254. (*regs)[TARGET_EF_R27] = 0;
  1255. (*regs)[TARGET_EF_LO] = tswapreg(env->active_tc.LO[0]);
  1256. (*regs)[TARGET_EF_HI] = tswapreg(env->active_tc.HI[0]);
  1257. (*regs)[TARGET_EF_CP0_EPC] = tswapreg(env->active_tc.PC);
  1258. (*regs)[TARGET_EF_CP0_BADVADDR] = tswapreg(env->CP0_BadVAddr);
  1259. (*regs)[TARGET_EF_CP0_STATUS] = tswapreg(env->CP0_Status);
  1260. (*regs)[TARGET_EF_CP0_CAUSE] = tswapreg(env->CP0_Cause);
  1261. }
  1262. #define USE_ELF_CORE_DUMP
  1263. #define ELF_EXEC_PAGESIZE 4096
  1264. /* See arch/mips/include/uapi/asm/hwcap.h. */
  1265. enum {
  1266. HWCAP_MIPS_R6 = (1 << 0),
  1267. HWCAP_MIPS_MSA = (1 << 1),
  1268. HWCAP_MIPS_CRC32 = (1 << 2),
  1269. HWCAP_MIPS_MIPS16 = (1 << 3),
  1270. HWCAP_MIPS_MDMX = (1 << 4),
  1271. HWCAP_MIPS_MIPS3D = (1 << 5),
  1272. HWCAP_MIPS_SMARTMIPS = (1 << 6),
  1273. HWCAP_MIPS_DSP = (1 << 7),
  1274. HWCAP_MIPS_DSP2 = (1 << 8),
  1275. HWCAP_MIPS_DSP3 = (1 << 9),
  1276. HWCAP_MIPS_MIPS16E2 = (1 << 10),
  1277. HWCAP_LOONGSON_MMI = (1 << 11),
  1278. HWCAP_LOONGSON_EXT = (1 << 12),
  1279. HWCAP_LOONGSON_EXT2 = (1 << 13),
  1280. HWCAP_LOONGSON_CPUCFG = (1 << 14),
  1281. };
  1282. #define ELF_HWCAP get_elf_hwcap()
  1283. #define GET_FEATURE_INSN(_flag, _hwcap) \
  1284. do { if (cpu->env.insn_flags & (_flag)) { hwcaps |= _hwcap; } } while (0)
  1285. #define GET_FEATURE_REG_SET(_reg, _mask, _hwcap) \
  1286. do { if (cpu->env._reg & (_mask)) { hwcaps |= _hwcap; } } while (0)
  1287. #define GET_FEATURE_REG_EQU(_reg, _start, _length, _val, _hwcap) \
  1288. do { \
  1289. if (extract32(cpu->env._reg, (_start), (_length)) == (_val)) { \
  1290. hwcaps |= _hwcap; \
  1291. } \
  1292. } while (0)
  1293. static uint32_t get_elf_hwcap(void)
  1294. {
  1295. MIPSCPU *cpu = MIPS_CPU(thread_cpu);
  1296. uint32_t hwcaps = 0;
  1297. GET_FEATURE_REG_EQU(CP0_Config0, CP0C0_AR, CP0C0_AR_LENGTH,
  1298. 2, HWCAP_MIPS_R6);
  1299. GET_FEATURE_REG_SET(CP0_Config3, 1 << CP0C3_MSAP, HWCAP_MIPS_MSA);
  1300. GET_FEATURE_INSN(ASE_LMMI, HWCAP_LOONGSON_MMI);
  1301. GET_FEATURE_INSN(ASE_LEXT, HWCAP_LOONGSON_EXT);
  1302. return hwcaps;
  1303. }
  1304. #undef GET_FEATURE_REG_EQU
  1305. #undef GET_FEATURE_REG_SET
  1306. #undef GET_FEATURE_INSN
  1307. #endif /* TARGET_MIPS */
  1308. #ifdef TARGET_MICROBLAZE
  1309. #define elf_check_arch(x) ( (x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD)
  1310. #define ELF_CLASS ELFCLASS32
  1311. #define ELF_ARCH EM_MICROBLAZE
  1312. static inline void init_thread(struct target_pt_regs *regs,
  1313. struct image_info *infop)
  1314. {
  1315. regs->pc = infop->entry;
  1316. regs->r1 = infop->start_stack;
  1317. }
  1318. #define ELF_EXEC_PAGESIZE 4096
  1319. #define USE_ELF_CORE_DUMP
  1320. #define ELF_NREG 38
  1321. typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
  1322. /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */
  1323. static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMBState *env)
  1324. {
  1325. int i, pos = 0;
  1326. for (i = 0; i < 32; i++) {
  1327. (*regs)[pos++] = tswapreg(env->regs[i]);
  1328. }
  1329. (*regs)[pos++] = tswapreg(env->pc);
  1330. (*regs)[pos++] = tswapreg(mb_cpu_read_msr(env));
  1331. (*regs)[pos++] = 0;
  1332. (*regs)[pos++] = tswapreg(env->ear);
  1333. (*regs)[pos++] = 0;
  1334. (*regs)[pos++] = tswapreg(env->esr);
  1335. }
  1336. #endif /* TARGET_MICROBLAZE */
  1337. #ifdef TARGET_OPENRISC
  1338. #define ELF_ARCH EM_OPENRISC
  1339. #define ELF_CLASS ELFCLASS32
  1340. #define ELF_DATA ELFDATA2MSB
  1341. static inline void init_thread(struct target_pt_regs *regs,
  1342. struct image_info *infop)
  1343. {
  1344. regs->pc = infop->entry;
  1345. regs->gpr[1] = infop->start_stack;
  1346. }
  1347. #define USE_ELF_CORE_DUMP
  1348. #define ELF_EXEC_PAGESIZE 8192
  1349. /* See linux kernel arch/openrisc/include/asm/elf.h. */
  1350. #define ELF_NREG 34 /* gprs and pc, sr */
  1351. typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
  1352. static void elf_core_copy_regs(target_elf_gregset_t *regs,
  1353. const CPUOpenRISCState *env)
  1354. {
  1355. int i;
  1356. for (i = 0; i < 32; i++) {
  1357. (*regs)[i] = tswapreg(cpu_get_gpr(env, i));
  1358. }
  1359. (*regs)[32] = tswapreg(env->pc);
  1360. (*regs)[33] = tswapreg(cpu_get_sr(env));
  1361. }
  1362. #define ELF_HWCAP 0
  1363. #define ELF_PLATFORM NULL
  1364. #endif /* TARGET_OPENRISC */
  1365. #ifdef TARGET_SH4
  1366. #define ELF_CLASS ELFCLASS32
  1367. #define ELF_ARCH EM_SH
  1368. static inline void init_thread(struct target_pt_regs *regs,
  1369. struct image_info *infop)
  1370. {
  1371. /* Check other registers XXXXX */
  1372. regs->pc = infop->entry;
  1373. regs->regs[15] = infop->start_stack;
  1374. }
  1375. /* See linux kernel: arch/sh/include/asm/elf.h. */
  1376. #define ELF_NREG 23
  1377. typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
  1378. /* See linux kernel: arch/sh/include/asm/ptrace.h. */
  1379. enum {
  1380. TARGET_REG_PC = 16,
  1381. TARGET_REG_PR = 17,
  1382. TARGET_REG_SR = 18,
  1383. TARGET_REG_GBR = 19,
  1384. TARGET_REG_MACH = 20,
  1385. TARGET_REG_MACL = 21,
  1386. TARGET_REG_SYSCALL = 22
  1387. };
  1388. static inline void elf_core_copy_regs(target_elf_gregset_t *regs,
  1389. const CPUSH4State *env)
  1390. {
  1391. int i;
  1392. for (i = 0; i < 16; i++) {
  1393. (*regs)[i] = tswapreg(env->gregs[i]);
  1394. }
  1395. (*regs)[TARGET_REG_PC] = tswapreg(env->pc);
  1396. (*regs)[TARGET_REG_PR] = tswapreg(env->pr);
  1397. (*regs)[TARGET_REG_SR] = tswapreg(env->sr);
  1398. (*regs)[TARGET_REG_GBR] = tswapreg(env->gbr);
  1399. (*regs)[TARGET_REG_MACH] = tswapreg(env->mach);
  1400. (*regs)[TARGET_REG_MACL] = tswapreg(env->macl);
  1401. (*regs)[TARGET_REG_SYSCALL] = 0; /* FIXME */
  1402. }
  1403. #define USE_ELF_CORE_DUMP
  1404. #define ELF_EXEC_PAGESIZE 4096
  1405. enum {
  1406. SH_CPU_HAS_FPU = 0x0001, /* Hardware FPU support */
  1407. SH_CPU_HAS_P2_FLUSH_BUG = 0x0002, /* Need to flush the cache in P2 area */
  1408. SH_CPU_HAS_MMU_PAGE_ASSOC = 0x0004, /* SH3: TLB way selection bit support */
  1409. SH_CPU_HAS_DSP = 0x0008, /* SH-DSP: DSP support */
  1410. SH_CPU_HAS_PERF_COUNTER = 0x0010, /* Hardware performance counters */
  1411. SH_CPU_HAS_PTEA = 0x0020, /* PTEA register */
  1412. SH_CPU_HAS_LLSC = 0x0040, /* movli.l/movco.l */
  1413. SH_CPU_HAS_L2_CACHE = 0x0080, /* Secondary cache / URAM */
  1414. SH_CPU_HAS_OP32 = 0x0100, /* 32-bit instruction support */
  1415. SH_CPU_HAS_PTEAEX = 0x0200, /* PTE ASID Extension support */
  1416. };
  1417. #define ELF_HWCAP get_elf_hwcap()
  1418. static uint32_t get_elf_hwcap(void)
  1419. {
  1420. SuperHCPU *cpu = SUPERH_CPU(thread_cpu);
  1421. uint32_t hwcap = 0;
  1422. hwcap |= SH_CPU_HAS_FPU;
  1423. if (cpu->env.features & SH_FEATURE_SH4A) {
  1424. hwcap |= SH_CPU_HAS_LLSC;
  1425. }
  1426. return hwcap;
  1427. }
  1428. #endif
  1429. #ifdef TARGET_M68K
  1430. #define ELF_CLASS ELFCLASS32
  1431. #define ELF_ARCH EM_68K
  1432. /* ??? Does this need to do anything?
  1433. #define ELF_PLAT_INIT(_r) */
  1434. static inline void init_thread(struct target_pt_regs *regs,
  1435. struct image_info *infop)
  1436. {
  1437. regs->usp = infop->start_stack;
  1438. regs->sr = 0;
  1439. regs->pc = infop->entry;
  1440. }
  1441. /* See linux kernel: arch/m68k/include/asm/elf.h. */
  1442. #define ELF_NREG 20
  1443. typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
  1444. static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUM68KState *env)
  1445. {
  1446. (*regs)[0] = tswapreg(env->dregs[1]);
  1447. (*regs)[1] = tswapreg(env->dregs[2]);
  1448. (*regs)[2] = tswapreg(env->dregs[3]);
  1449. (*regs)[3] = tswapreg(env->dregs[4]);
  1450. (*regs)[4] = tswapreg(env->dregs[5]);
  1451. (*regs)[5] = tswapreg(env->dregs[6]);
  1452. (*regs)[6] = tswapreg(env->dregs[7]);
  1453. (*regs)[7] = tswapreg(env->aregs[0]);
  1454. (*regs)[8] = tswapreg(env->aregs[1]);
  1455. (*regs)[9] = tswapreg(env->aregs[2]);
  1456. (*regs)[10] = tswapreg(env->aregs[3]);
  1457. (*regs)[11] = tswapreg(env->aregs[4]);
  1458. (*regs)[12] = tswapreg(env->aregs[5]);
  1459. (*regs)[13] = tswapreg(env->aregs[6]);
  1460. (*regs)[14] = tswapreg(env->dregs[0]);
  1461. (*regs)[15] = tswapreg(env->aregs[7]);
  1462. (*regs)[16] = tswapreg(env->dregs[0]); /* FIXME: orig_d0 */
  1463. (*regs)[17] = tswapreg(env->sr);
  1464. (*regs)[18] = tswapreg(env->pc);
  1465. (*regs)[19] = 0; /* FIXME: regs->format | regs->vector */
  1466. }
  1467. #define USE_ELF_CORE_DUMP
  1468. #define ELF_EXEC_PAGESIZE 8192
  1469. #endif
  1470. #ifdef TARGET_ALPHA
  1471. #define ELF_CLASS ELFCLASS64
  1472. #define ELF_ARCH EM_ALPHA
  1473. static inline void init_thread(struct target_pt_regs *regs,
  1474. struct image_info *infop)
  1475. {
  1476. regs->pc = infop->entry;
  1477. regs->ps = 8;
  1478. regs->usp = infop->start_stack;
  1479. }
  1480. #define ELF_EXEC_PAGESIZE 8192
  1481. #endif /* TARGET_ALPHA */
  1482. #ifdef TARGET_S390X
  1483. #define ELF_CLASS ELFCLASS64
  1484. #define ELF_DATA ELFDATA2MSB
  1485. #define ELF_ARCH EM_S390
  1486. #include "elf.h"
  1487. #define ELF_HWCAP get_elf_hwcap()
  1488. #define GET_FEATURE(_feat, _hwcap) \
  1489. do { if (s390_has_feat(_feat)) { hwcap |= _hwcap; } } while (0)
  1490. uint32_t get_elf_hwcap(void)
  1491. {
  1492. /*
  1493. * Let's assume we always have esan3 and zarch.
  1494. * 31-bit processes can use 64-bit registers (high gprs).
  1495. */
  1496. uint32_t hwcap = HWCAP_S390_ESAN3 | HWCAP_S390_ZARCH | HWCAP_S390_HIGH_GPRS;
  1497. GET_FEATURE(S390_FEAT_STFLE, HWCAP_S390_STFLE);
  1498. GET_FEATURE(S390_FEAT_MSA, HWCAP_S390_MSA);
  1499. GET_FEATURE(S390_FEAT_LONG_DISPLACEMENT, HWCAP_S390_LDISP);
  1500. GET_FEATURE(S390_FEAT_EXTENDED_IMMEDIATE, HWCAP_S390_EIMM);
  1501. if (s390_has_feat(S390_FEAT_EXTENDED_TRANSLATION_3) &&
  1502. s390_has_feat(S390_FEAT_ETF3_ENH)) {
  1503. hwcap |= HWCAP_S390_ETF3EH;
  1504. }
  1505. GET_FEATURE(S390_FEAT_VECTOR, HWCAP_S390_VXRS);
  1506. GET_FEATURE(S390_FEAT_VECTOR_ENH, HWCAP_S390_VXRS_EXT);
  1507. GET_FEATURE(S390_FEAT_VECTOR_ENH2, HWCAP_S390_VXRS_EXT2);
  1508. return hwcap;
  1509. }
  1510. const char *elf_hwcap_str(uint32_t bit)
  1511. {
  1512. static const char *hwcap_str[] = {
  1513. [HWCAP_S390_NR_ESAN3] = "esan3",
  1514. [HWCAP_S390_NR_ZARCH] = "zarch",
  1515. [HWCAP_S390_NR_STFLE] = "stfle",
  1516. [HWCAP_S390_NR_MSA] = "msa",
  1517. [HWCAP_S390_NR_LDISP] = "ldisp",
  1518. [HWCAP_S390_NR_EIMM] = "eimm",
  1519. [HWCAP_S390_NR_DFP] = "dfp",
  1520. [HWCAP_S390_NR_HPAGE] = "edat",
  1521. [HWCAP_S390_NR_ETF3EH] = "etf3eh",
  1522. [HWCAP_S390_NR_HIGH_GPRS] = "highgprs",
  1523. [HWCAP_S390_NR_TE] = "te",
  1524. [HWCAP_S390_NR_VXRS] = "vx",
  1525. [HWCAP_S390_NR_VXRS_BCD] = "vxd",
  1526. [HWCAP_S390_NR_VXRS_EXT] = "vxe",
  1527. [HWCAP_S390_NR_GS] = "gs",
  1528. [HWCAP_S390_NR_VXRS_EXT2] = "vxe2",
  1529. [HWCAP_S390_NR_VXRS_PDE] = "vxp",
  1530. [HWCAP_S390_NR_SORT] = "sort",
  1531. [HWCAP_S390_NR_DFLT] = "dflt",
  1532. [HWCAP_S390_NR_NNPA] = "nnpa",
  1533. [HWCAP_S390_NR_PCI_MIO] = "pcimio",
  1534. [HWCAP_S390_NR_SIE] = "sie",
  1535. };
  1536. return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL;
  1537. }
  1538. static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
  1539. {
  1540. regs->psw.addr = infop->entry;
  1541. regs->psw.mask = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | \
  1542. PSW_MASK_MCHECK | PSW_MASK_PSTATE | PSW_MASK_64 | \
  1543. PSW_MASK_32;
  1544. regs->gprs[15] = infop->start_stack;
  1545. }
  1546. /* See linux kernel: arch/s390/include/uapi/asm/ptrace.h (s390_regs). */
  1547. #define ELF_NREG 27
  1548. typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
  1549. enum {
  1550. TARGET_REG_PSWM = 0,
  1551. TARGET_REG_PSWA = 1,
  1552. TARGET_REG_GPRS = 2,
  1553. TARGET_REG_ARS = 18,
  1554. TARGET_REG_ORIG_R2 = 26,
  1555. };
  1556. static void elf_core_copy_regs(target_elf_gregset_t *regs,
  1557. const CPUS390XState *env)
  1558. {
  1559. int i;
  1560. uint32_t *aregs;
  1561. (*regs)[TARGET_REG_PSWM] = tswapreg(env->psw.mask);
  1562. (*regs)[TARGET_REG_PSWA] = tswapreg(env->psw.addr);
  1563. for (i = 0; i < 16; i++) {
  1564. (*regs)[TARGET_REG_GPRS + i] = tswapreg(env->regs[i]);
  1565. }
  1566. aregs = (uint32_t *)&((*regs)[TARGET_REG_ARS]);
  1567. for (i = 0; i < 16; i++) {
  1568. aregs[i] = tswap32(env->aregs[i]);
  1569. }
  1570. (*regs)[TARGET_REG_ORIG_R2] = 0;
  1571. }
  1572. #define USE_ELF_CORE_DUMP
  1573. #define ELF_EXEC_PAGESIZE 4096
  1574. #define VDSO_HEADER "vdso.c.inc"
  1575. #endif /* TARGET_S390X */
  1576. #ifdef TARGET_RISCV
  1577. #define ELF_ARCH EM_RISCV
  1578. #ifdef TARGET_RISCV32
  1579. #define ELF_CLASS ELFCLASS32
  1580. #define VDSO_HEADER "vdso-32.c.inc"
  1581. #else
  1582. #define ELF_CLASS ELFCLASS64
  1583. #define VDSO_HEADER "vdso-64.c.inc"
  1584. #endif
  1585. #define ELF_HWCAP get_elf_hwcap()
  1586. static uint32_t get_elf_hwcap(void)
  1587. {
  1588. #define MISA_BIT(EXT) (1 << (EXT - 'A'))
  1589. RISCVCPU *cpu = RISCV_CPU(thread_cpu);
  1590. uint32_t mask = MISA_BIT('I') | MISA_BIT('M') | MISA_BIT('A')
  1591. | MISA_BIT('F') | MISA_BIT('D') | MISA_BIT('C')
  1592. | MISA_BIT('V');
  1593. return cpu->env.misa_ext & mask;
  1594. #undef MISA_BIT
  1595. }
  1596. static inline void init_thread(struct target_pt_regs *regs,
  1597. struct image_info *infop)
  1598. {
  1599. regs->sepc = infop->entry;
  1600. regs->sp = infop->start_stack;
  1601. }
  1602. #define ELF_EXEC_PAGESIZE 4096
  1603. #endif /* TARGET_RISCV */
  1604. #ifdef TARGET_HPPA
  1605. #define ELF_CLASS ELFCLASS32
  1606. #define ELF_ARCH EM_PARISC
  1607. #define ELF_PLATFORM "PARISC"
  1608. #define STACK_GROWS_DOWN 0
  1609. #define STACK_ALIGNMENT 64
  1610. #define VDSO_HEADER "vdso.c.inc"
  1611. static inline void init_thread(struct target_pt_regs *regs,
  1612. struct image_info *infop)
  1613. {
  1614. regs->iaoq[0] = infop->entry | PRIV_USER;
  1615. regs->iaoq[1] = regs->iaoq[0] + 4;
  1616. regs->gr[23] = 0;
  1617. regs->gr[24] = infop->argv;
  1618. regs->gr[25] = infop->argc;
  1619. /* The top-of-stack contains a linkage buffer. */
  1620. regs->gr[30] = infop->start_stack + 64;
  1621. regs->gr[31] = infop->entry;
  1622. }
  1623. #define LO_COMMPAGE 0
  1624. static bool init_guest_commpage(void)
  1625. {
  1626. /* If reserved_va, then we have already mapped 0 page on the host. */
  1627. if (!reserved_va) {
  1628. void *want, *addr;
  1629. want = g2h_untagged(LO_COMMPAGE);
  1630. addr = mmap(want, TARGET_PAGE_SIZE, PROT_NONE,
  1631. MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED_NOREPLACE, -1, 0);
  1632. if (addr == MAP_FAILED) {
  1633. perror("Allocating guest commpage");
  1634. exit(EXIT_FAILURE);
  1635. }
  1636. if (addr != want) {
  1637. return false;
  1638. }
  1639. }
  1640. /*
  1641. * On Linux, page zero is normally marked execute only + gateway.
  1642. * Normal read or write is supposed to fail (thus PROT_NONE above),
  1643. * but specific offsets have kernel code mapped to raise permissions
  1644. * and implement syscalls. Here, simply mark the page executable.
  1645. * Special case the entry points during translation (see do_page_zero).
  1646. */
  1647. page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK,
  1648. PAGE_EXEC | PAGE_VALID);
  1649. return true;
  1650. }
  1651. #endif /* TARGET_HPPA */
  1652. #ifdef TARGET_XTENSA
  1653. #define ELF_CLASS ELFCLASS32
  1654. #define ELF_ARCH EM_XTENSA
  1655. static inline void init_thread(struct target_pt_regs *regs,
  1656. struct image_info *infop)
  1657. {
  1658. regs->windowbase = 0;
  1659. regs->windowstart = 1;
  1660. regs->areg[1] = infop->start_stack;
  1661. regs->pc = infop->entry;
  1662. if (info_is_fdpic(infop)) {
  1663. regs->areg[4] = infop->loadmap_addr;
  1664. regs->areg[5] = infop->interpreter_loadmap_addr;
  1665. if (infop->interpreter_loadmap_addr) {
  1666. regs->areg[6] = infop->interpreter_pt_dynamic_addr;
  1667. } else {
  1668. regs->areg[6] = infop->pt_dynamic_addr;
  1669. }
  1670. }
  1671. }
  1672. /* See linux kernel: arch/xtensa/include/asm/elf.h. */
  1673. #define ELF_NREG 128
  1674. typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
  1675. enum {
  1676. TARGET_REG_PC,
  1677. TARGET_REG_PS,
  1678. TARGET_REG_LBEG,
  1679. TARGET_REG_LEND,
  1680. TARGET_REG_LCOUNT,
  1681. TARGET_REG_SAR,
  1682. TARGET_REG_WINDOWSTART,
  1683. TARGET_REG_WINDOWBASE,
  1684. TARGET_REG_THREADPTR,
  1685. TARGET_REG_AR0 = 64,
  1686. };
  1687. static void elf_core_copy_regs(target_elf_gregset_t *regs,
  1688. const CPUXtensaState *env)
  1689. {
  1690. unsigned i;
  1691. (*regs)[TARGET_REG_PC] = tswapreg(env->pc);
  1692. (*regs)[TARGET_REG_PS] = tswapreg(env->sregs[PS] & ~PS_EXCM);
  1693. (*regs)[TARGET_REG_LBEG] = tswapreg(env->sregs[LBEG]);
  1694. (*regs)[TARGET_REG_LEND] = tswapreg(env->sregs[LEND]);
  1695. (*regs)[TARGET_REG_LCOUNT] = tswapreg(env->sregs[LCOUNT]);
  1696. (*regs)[TARGET_REG_SAR] = tswapreg(env->sregs[SAR]);
  1697. (*regs)[TARGET_REG_WINDOWSTART] = tswapreg(env->sregs[WINDOW_START]);
  1698. (*regs)[TARGET_REG_WINDOWBASE] = tswapreg(env->sregs[WINDOW_BASE]);
  1699. (*regs)[TARGET_REG_THREADPTR] = tswapreg(env->uregs[THREADPTR]);
  1700. xtensa_sync_phys_from_window((CPUXtensaState *)env);
  1701. for (i = 0; i < env->config->nareg; ++i) {
  1702. (*regs)[TARGET_REG_AR0 + i] = tswapreg(env->phys_regs[i]);
  1703. }
  1704. }
  1705. #define USE_ELF_CORE_DUMP
  1706. #define ELF_EXEC_PAGESIZE 4096
  1707. #endif /* TARGET_XTENSA */
  1708. #ifdef TARGET_HEXAGON
  1709. #define ELF_CLASS ELFCLASS32
  1710. #define ELF_ARCH EM_HEXAGON
  1711. static inline void init_thread(struct target_pt_regs *regs,
  1712. struct image_info *infop)
  1713. {
  1714. regs->sepc = infop->entry;
  1715. regs->sp = infop->start_stack;
  1716. }
  1717. #endif /* TARGET_HEXAGON */
  1718. #ifndef ELF_BASE_PLATFORM
  1719. #define ELF_BASE_PLATFORM (NULL)
  1720. #endif
  1721. #ifndef ELF_PLATFORM
  1722. #define ELF_PLATFORM (NULL)
  1723. #endif
  1724. #ifndef ELF_MACHINE
  1725. #define ELF_MACHINE ELF_ARCH
  1726. #endif
  1727. #ifndef elf_check_arch
  1728. #define elf_check_arch(x) ((x) == ELF_ARCH)
  1729. #endif
  1730. #ifndef elf_check_abi
  1731. #define elf_check_abi(x) (1)
  1732. #endif
  1733. #ifndef ELF_HWCAP
  1734. #define ELF_HWCAP 0
  1735. #endif
  1736. #ifndef STACK_GROWS_DOWN
  1737. #define STACK_GROWS_DOWN 1
  1738. #endif
  1739. #ifndef STACK_ALIGNMENT
  1740. #define STACK_ALIGNMENT 16
  1741. #endif
  1742. #ifdef TARGET_ABI32
  1743. #undef ELF_CLASS
  1744. #define ELF_CLASS ELFCLASS32
  1745. #undef bswaptls
  1746. #define bswaptls(ptr) bswap32s(ptr)
  1747. #endif
  1748. #ifndef EXSTACK_DEFAULT
  1749. #define EXSTACK_DEFAULT false
  1750. #endif
  1751. #include "elf.h"
  1752. /* We must delay the following stanzas until after "elf.h". */
  1753. #if defined(TARGET_AARCH64)
  1754. static bool arch_parse_elf_property(uint32_t pr_type, uint32_t pr_datasz,
  1755. const uint32_t *data,
  1756. struct image_info *info,
  1757. Error **errp)
  1758. {
  1759. if (pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) {
  1760. if (pr_datasz != sizeof(uint32_t)) {
  1761. error_setg(errp, "Ill-formed GNU_PROPERTY_AARCH64_FEATURE_1_AND");
  1762. return false;
  1763. }
  1764. /* We will extract GNU_PROPERTY_AARCH64_FEATURE_1_BTI later. */
  1765. info->note_flags = *data;
  1766. }
  1767. return true;
  1768. }
  1769. #define ARCH_USE_GNU_PROPERTY 1
  1770. #else
  1771. static bool arch_parse_elf_property(uint32_t pr_type, uint32_t pr_datasz,
  1772. const uint32_t *data,
  1773. struct image_info *info,
  1774. Error **errp)
  1775. {
  1776. g_assert_not_reached();
  1777. }
  1778. #define ARCH_USE_GNU_PROPERTY 0
  1779. #endif
  1780. struct exec
  1781. {
  1782. unsigned int a_info; /* Use macros N_MAGIC, etc for access */
  1783. unsigned int a_text; /* length of text, in bytes */
  1784. unsigned int a_data; /* length of data, in bytes */
  1785. unsigned int a_bss; /* length of uninitialized data area, in bytes */
  1786. unsigned int a_syms; /* length of symbol table data in file, in bytes */
  1787. unsigned int a_entry; /* start address */
  1788. unsigned int a_trsize; /* length of relocation info for text, in bytes */
  1789. unsigned int a_drsize; /* length of relocation info for data, in bytes */
  1790. };
  1791. #define N_MAGIC(exec) ((exec).a_info & 0xffff)
  1792. #define OMAGIC 0407
  1793. #define NMAGIC 0410
  1794. #define ZMAGIC 0413
  1795. #define QMAGIC 0314
  1796. #define DLINFO_ITEMS 16
  1797. static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
  1798. {
  1799. memcpy(to, from, n);
  1800. }
  1801. #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
  1802. static void bswap_ehdr(struct elfhdr *ehdr)
  1803. {
  1804. bswap16s(&ehdr->e_type); /* Object file type */
  1805. bswap16s(&ehdr->e_machine); /* Architecture */
  1806. bswap32s(&ehdr->e_version); /* Object file version */
  1807. bswaptls(&ehdr->e_entry); /* Entry point virtual address */
  1808. bswaptls(&ehdr->e_phoff); /* Program header table file offset */
  1809. bswaptls(&ehdr->e_shoff); /* Section header table file offset */
  1810. bswap32s(&ehdr->e_flags); /* Processor-specific flags */
  1811. bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
  1812. bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
  1813. bswap16s(&ehdr->e_phnum); /* Program header table entry count */
  1814. bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
  1815. bswap16s(&ehdr->e_shnum); /* Section header table entry count */
  1816. bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
  1817. }
  1818. static void bswap_phdr(struct elf_phdr *phdr, int phnum)
  1819. {
  1820. int i;
  1821. for (i = 0; i < phnum; ++i, ++phdr) {
  1822. bswap32s(&phdr->p_type); /* Segment type */
  1823. bswap32s(&phdr->p_flags); /* Segment flags */
  1824. bswaptls(&phdr->p_offset); /* Segment file offset */
  1825. bswaptls(&phdr->p_vaddr); /* Segment virtual address */
  1826. bswaptls(&phdr->p_paddr); /* Segment physical address */
  1827. bswaptls(&phdr->p_filesz); /* Segment size in file */
  1828. bswaptls(&phdr->p_memsz); /* Segment size in memory */
  1829. bswaptls(&phdr->p_align); /* Segment alignment */
  1830. }
  1831. }
  1832. static void bswap_shdr(struct elf_shdr *shdr, int shnum)
  1833. {
  1834. int i;
  1835. for (i = 0; i < shnum; ++i, ++shdr) {
  1836. bswap32s(&shdr->sh_name);
  1837. bswap32s(&shdr->sh_type);
  1838. bswaptls(&shdr->sh_flags);
  1839. bswaptls(&shdr->sh_addr);
  1840. bswaptls(&shdr->sh_offset);
  1841. bswaptls(&shdr->sh_size);
  1842. bswap32s(&shdr->sh_link);
  1843. bswap32s(&shdr->sh_info);
  1844. bswaptls(&shdr->sh_addralign);
  1845. bswaptls(&shdr->sh_entsize);
  1846. }
  1847. }
  1848. static void bswap_sym(struct elf_sym *sym)
  1849. {
  1850. bswap32s(&sym->st_name);
  1851. bswaptls(&sym->st_value);
  1852. bswaptls(&sym->st_size);
  1853. bswap16s(&sym->st_shndx);
  1854. }
  1855. #ifdef TARGET_MIPS
  1856. static void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags)
  1857. {
  1858. bswap16s(&abiflags->version);
  1859. bswap32s(&abiflags->ases);
  1860. bswap32s(&abiflags->isa_ext);
  1861. bswap32s(&abiflags->flags1);
  1862. bswap32s(&abiflags->flags2);
  1863. }
  1864. #endif
  1865. #else
  1866. static inline void bswap_ehdr(struct elfhdr *ehdr) { }
  1867. static inline void bswap_phdr(struct elf_phdr *phdr, int phnum) { }
  1868. static inline void bswap_shdr(struct elf_shdr *shdr, int shnum) { }
  1869. static inline void bswap_sym(struct elf_sym *sym) { }
  1870. #ifdef TARGET_MIPS
  1871. static inline void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags) { }
  1872. #endif
  1873. #endif
  1874. #ifdef USE_ELF_CORE_DUMP
  1875. static int elf_core_dump(int, const CPUArchState *);
  1876. #endif /* USE_ELF_CORE_DUMP */
  1877. static void load_symbols(struct elfhdr *hdr, const ImageSource *src,
  1878. abi_ulong load_bias);
  1879. /* Verify the portions of EHDR within E_IDENT for the target.
  1880. This can be performed before bswapping the entire header. */
  1881. static bool elf_check_ident(struct elfhdr *ehdr)
  1882. {
  1883. return (ehdr->e_ident[EI_MAG0] == ELFMAG0
  1884. && ehdr->e_ident[EI_MAG1] == ELFMAG1
  1885. && ehdr->e_ident[EI_MAG2] == ELFMAG2
  1886. && ehdr->e_ident[EI_MAG3] == ELFMAG3
  1887. && ehdr->e_ident[EI_CLASS] == ELF_CLASS
  1888. && ehdr->e_ident[EI_DATA] == ELF_DATA
  1889. && ehdr->e_ident[EI_VERSION] == EV_CURRENT);
  1890. }
  1891. /* Verify the portions of EHDR outside of E_IDENT for the target.
  1892. This has to wait until after bswapping the header. */
  1893. static bool elf_check_ehdr(struct elfhdr *ehdr)
  1894. {
  1895. return (elf_check_arch(ehdr->e_machine)
  1896. && elf_check_abi(ehdr->e_flags)
  1897. && ehdr->e_ehsize == sizeof(struct elfhdr)
  1898. && ehdr->e_phentsize == sizeof(struct elf_phdr)
  1899. && (ehdr->e_type == ET_EXEC || ehdr->e_type == ET_DYN));
  1900. }
  1901. /*
  1902. * 'copy_elf_strings()' copies argument/envelope strings from user
  1903. * memory to free pages in kernel mem. These are in a format ready
  1904. * to be put directly into the top of new user memory.
  1905. *
  1906. */
  1907. static abi_ulong copy_elf_strings(int argc, char **argv, char *scratch,
  1908. abi_ulong p, abi_ulong stack_limit)
  1909. {
  1910. char *tmp;
  1911. int len, i;
  1912. abi_ulong top = p;
  1913. if (!p) {
  1914. return 0; /* bullet-proofing */
  1915. }
  1916. if (STACK_GROWS_DOWN) {
  1917. int offset = ((p - 1) % TARGET_PAGE_SIZE) + 1;
  1918. for (i = argc - 1; i >= 0; --i) {
  1919. tmp = argv[i];
  1920. if (!tmp) {
  1921. fprintf(stderr, "VFS: argc is wrong");
  1922. exit(-1);
  1923. }
  1924. len = strlen(tmp) + 1;
  1925. tmp += len;
  1926. if (len > (p - stack_limit)) {
  1927. return 0;
  1928. }
  1929. while (len) {
  1930. int bytes_to_copy = (len > offset) ? offset : len;
  1931. tmp -= bytes_to_copy;
  1932. p -= bytes_to_copy;
  1933. offset -= bytes_to_copy;
  1934. len -= bytes_to_copy;
  1935. memcpy_fromfs(scratch + offset, tmp, bytes_to_copy);
  1936. if (offset == 0) {
  1937. memcpy_to_target(p, scratch, top - p);
  1938. top = p;
  1939. offset = TARGET_PAGE_SIZE;
  1940. }
  1941. }
  1942. }
  1943. if (p != top) {
  1944. memcpy_to_target(p, scratch + offset, top - p);
  1945. }
  1946. } else {
  1947. int remaining = TARGET_PAGE_SIZE - (p % TARGET_PAGE_SIZE);
  1948. for (i = 0; i < argc; ++i) {
  1949. tmp = argv[i];
  1950. if (!tmp) {
  1951. fprintf(stderr, "VFS: argc is wrong");
  1952. exit(-1);
  1953. }
  1954. len = strlen(tmp) + 1;
  1955. if (len > (stack_limit - p)) {
  1956. return 0;
  1957. }
  1958. while (len) {
  1959. int bytes_to_copy = (len > remaining) ? remaining : len;
  1960. memcpy_fromfs(scratch + (p - top), tmp, bytes_to_copy);
  1961. tmp += bytes_to_copy;
  1962. remaining -= bytes_to_copy;
  1963. p += bytes_to_copy;
  1964. len -= bytes_to_copy;
  1965. if (remaining == 0) {
  1966. memcpy_to_target(top, scratch, p - top);
  1967. top = p;
  1968. remaining = TARGET_PAGE_SIZE;
  1969. }
  1970. }
  1971. }
  1972. if (p != top) {
  1973. memcpy_to_target(top, scratch, p - top);
  1974. }
  1975. }
  1976. return p;
  1977. }
  1978. /* Older linux kernels provide up to MAX_ARG_PAGES (default: 32) of
  1979. * argument/environment space. Newer kernels (>2.6.33) allow more,
  1980. * dependent on stack size, but guarantee at least 32 pages for
  1981. * backwards compatibility.
  1982. */
  1983. #define STACK_LOWER_LIMIT (32 * TARGET_PAGE_SIZE)
  1984. static abi_ulong setup_arg_pages(struct linux_binprm *bprm,
  1985. struct image_info *info)
  1986. {
  1987. abi_ulong size, error, guard;
  1988. int prot;
  1989. size = guest_stack_size;
  1990. if (size < STACK_LOWER_LIMIT) {
  1991. size = STACK_LOWER_LIMIT;
  1992. }
  1993. if (STACK_GROWS_DOWN) {
  1994. guard = TARGET_PAGE_SIZE;
  1995. if (guard < qemu_real_host_page_size()) {
  1996. guard = qemu_real_host_page_size();
  1997. }
  1998. } else {
  1999. /* no guard page for hppa target where stack grows upwards. */
  2000. guard = 0;
  2001. }
  2002. prot = PROT_READ | PROT_WRITE;
  2003. if (info->exec_stack) {
  2004. prot |= PROT_EXEC;
  2005. }
  2006. error = target_mmap(0, size + guard, prot,
  2007. MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  2008. if (error == -1) {
  2009. perror("mmap stack");
  2010. exit(-1);
  2011. }
  2012. /* We reserve one extra page at the top of the stack as guard. */
  2013. if (STACK_GROWS_DOWN) {
  2014. target_mprotect(error, guard, PROT_NONE);
  2015. info->stack_limit = error + guard;
  2016. return info->stack_limit + size - sizeof(void *);
  2017. } else {
  2018. info->stack_limit = error + size;
  2019. return error;
  2020. }
  2021. }
  2022. /**
  2023. * zero_bss:
  2024. *
  2025. * Map and zero the bss. We need to explicitly zero any fractional pages
  2026. * after the data section (i.e. bss). Return false on mapping failure.
  2027. */
  2028. static bool zero_bss(abi_ulong start_bss, abi_ulong end_bss,
  2029. int prot, Error **errp)
  2030. {
  2031. abi_ulong align_bss;
  2032. /* We only expect writable bss; the code segment shouldn't need this. */
  2033. if (!(prot & PROT_WRITE)) {
  2034. error_setg(errp, "PT_LOAD with non-writable bss");
  2035. return false;
  2036. }
  2037. align_bss = TARGET_PAGE_ALIGN(start_bss);
  2038. end_bss = TARGET_PAGE_ALIGN(end_bss);
  2039. if (start_bss < align_bss) {
  2040. int flags = page_get_flags(start_bss);
  2041. if (!(flags & PAGE_RWX)) {
  2042. /*
  2043. * The whole address space of the executable was reserved
  2044. * at the start, therefore all pages will be VALID.
  2045. * But assuming there are no PROT_NONE PT_LOAD segments,
  2046. * a PROT_NONE page means no data all bss, and we can
  2047. * simply extend the new anon mapping back to the start
  2048. * of the page of bss.
  2049. */
  2050. align_bss -= TARGET_PAGE_SIZE;
  2051. } else {
  2052. /*
  2053. * The start of the bss shares a page with something.
  2054. * The only thing that we expect is the data section,
  2055. * which would already be marked writable.
  2056. * Overlapping the RX code segment seems malformed.
  2057. */
  2058. if (!(flags & PAGE_WRITE)) {
  2059. error_setg(errp, "PT_LOAD with bss overlapping "
  2060. "non-writable page");
  2061. return false;
  2062. }
  2063. /* The page is already mapped and writable. */
  2064. memset(g2h_untagged(start_bss), 0, align_bss - start_bss);
  2065. }
  2066. }
  2067. if (align_bss < end_bss &&
  2068. target_mmap(align_bss, end_bss - align_bss, prot,
  2069. MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) == -1) {
  2070. error_setg_errno(errp, errno, "Error mapping bss");
  2071. return false;
  2072. }
  2073. return true;
  2074. }
  2075. #if defined(TARGET_ARM)
  2076. static int elf_is_fdpic(struct elfhdr *exec)
  2077. {
  2078. return exec->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC;
  2079. }
  2080. #elif defined(TARGET_XTENSA)
  2081. static int elf_is_fdpic(struct elfhdr *exec)
  2082. {
  2083. return exec->e_ident[EI_OSABI] == ELFOSABI_XTENSA_FDPIC;
  2084. }
  2085. #else
  2086. /* Default implementation, always false. */
  2087. static int elf_is_fdpic(struct elfhdr *exec)
  2088. {
  2089. return 0;
  2090. }
  2091. #endif
  2092. static abi_ulong loader_build_fdpic_loadmap(struct image_info *info, abi_ulong sp)
  2093. {
  2094. uint16_t n;
  2095. struct elf32_fdpic_loadseg *loadsegs = info->loadsegs;
  2096. /* elf32_fdpic_loadseg */
  2097. n = info->nsegs;
  2098. while (n--) {
  2099. sp -= 12;
  2100. put_user_u32(loadsegs[n].addr, sp+0);
  2101. put_user_u32(loadsegs[n].p_vaddr, sp+4);
  2102. put_user_u32(loadsegs[n].p_memsz, sp+8);
  2103. }
  2104. /* elf32_fdpic_loadmap */
  2105. sp -= 4;
  2106. put_user_u16(0, sp+0); /* version */
  2107. put_user_u16(info->nsegs, sp+2); /* nsegs */
  2108. info->personality = PER_LINUX_FDPIC;
  2109. info->loadmap_addr = sp;
  2110. return sp;
  2111. }
  2112. static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
  2113. struct elfhdr *exec,
  2114. struct image_info *info,
  2115. struct image_info *interp_info,
  2116. struct image_info *vdso_info)
  2117. {
  2118. abi_ulong sp;
  2119. abi_ulong u_argc, u_argv, u_envp, u_auxv;
  2120. int size;
  2121. int i;
  2122. abi_ulong u_rand_bytes;
  2123. uint8_t k_rand_bytes[16];
  2124. abi_ulong u_platform, u_base_platform;
  2125. const char *k_platform, *k_base_platform;
  2126. const int n = sizeof(elf_addr_t);
  2127. sp = p;
  2128. /* Needs to be before we load the env/argc/... */
  2129. if (elf_is_fdpic(exec)) {
  2130. /* Need 4 byte alignment for these structs */
  2131. sp &= ~3;
  2132. sp = loader_build_fdpic_loadmap(info, sp);
  2133. info->other_info = interp_info;
  2134. if (interp_info) {
  2135. interp_info->other_info = info;
  2136. sp = loader_build_fdpic_loadmap(interp_info, sp);
  2137. info->interpreter_loadmap_addr = interp_info->loadmap_addr;
  2138. info->interpreter_pt_dynamic_addr = interp_info->pt_dynamic_addr;
  2139. } else {
  2140. info->interpreter_loadmap_addr = 0;
  2141. info->interpreter_pt_dynamic_addr = 0;
  2142. }
  2143. }
  2144. u_base_platform = 0;
  2145. k_base_platform = ELF_BASE_PLATFORM;
  2146. if (k_base_platform) {
  2147. size_t len = strlen(k_base_platform) + 1;
  2148. if (STACK_GROWS_DOWN) {
  2149. sp -= (len + n - 1) & ~(n - 1);
  2150. u_base_platform = sp;
  2151. /* FIXME - check return value of memcpy_to_target() for failure */
  2152. memcpy_to_target(sp, k_base_platform, len);
  2153. } else {
  2154. memcpy_to_target(sp, k_base_platform, len);
  2155. u_base_platform = sp;
  2156. sp += len + 1;
  2157. }
  2158. }
  2159. u_platform = 0;
  2160. k_platform = ELF_PLATFORM;
  2161. if (k_platform) {
  2162. size_t len = strlen(k_platform) + 1;
  2163. if (STACK_GROWS_DOWN) {
  2164. sp -= (len + n - 1) & ~(n - 1);
  2165. u_platform = sp;
  2166. /* FIXME - check return value of memcpy_to_target() for failure */
  2167. memcpy_to_target(sp, k_platform, len);
  2168. } else {
  2169. memcpy_to_target(sp, k_platform, len);
  2170. u_platform = sp;
  2171. sp += len + 1;
  2172. }
  2173. }
  2174. /* Provide 16 byte alignment for the PRNG, and basic alignment for
  2175. * the argv and envp pointers.
  2176. */
  2177. if (STACK_GROWS_DOWN) {
  2178. sp = QEMU_ALIGN_DOWN(sp, 16);
  2179. } else {
  2180. sp = QEMU_ALIGN_UP(sp, 16);
  2181. }
  2182. /*
  2183. * Generate 16 random bytes for userspace PRNG seeding.
  2184. */
  2185. qemu_guest_getrandom_nofail(k_rand_bytes, sizeof(k_rand_bytes));
  2186. if (STACK_GROWS_DOWN) {
  2187. sp -= 16;
  2188. u_rand_bytes = sp;
  2189. /* FIXME - check return value of memcpy_to_target() for failure */
  2190. memcpy_to_target(sp, k_rand_bytes, 16);
  2191. } else {
  2192. memcpy_to_target(sp, k_rand_bytes, 16);
  2193. u_rand_bytes = sp;
  2194. sp += 16;
  2195. }
  2196. size = (DLINFO_ITEMS + 1) * 2;
  2197. if (k_base_platform) {
  2198. size += 2;
  2199. }
  2200. if (k_platform) {
  2201. size += 2;
  2202. }
  2203. if (vdso_info) {
  2204. size += 2;
  2205. }
  2206. #ifdef DLINFO_ARCH_ITEMS
  2207. size += DLINFO_ARCH_ITEMS * 2;
  2208. #endif
  2209. #ifdef ELF_HWCAP2
  2210. size += 2;
  2211. #endif
  2212. info->auxv_len = size * n;
  2213. size += envc + argc + 2;
  2214. size += 1; /* argc itself */
  2215. size *= n;
  2216. /* Allocate space and finalize stack alignment for entry now. */
  2217. if (STACK_GROWS_DOWN) {
  2218. u_argc = QEMU_ALIGN_DOWN(sp - size, STACK_ALIGNMENT);
  2219. sp = u_argc;
  2220. } else {
  2221. u_argc = sp;
  2222. sp = QEMU_ALIGN_UP(sp + size, STACK_ALIGNMENT);
  2223. }
  2224. u_argv = u_argc + n;
  2225. u_envp = u_argv + (argc + 1) * n;
  2226. u_auxv = u_envp + (envc + 1) * n;
  2227. info->saved_auxv = u_auxv;
  2228. info->argc = argc;
  2229. info->envc = envc;
  2230. info->argv = u_argv;
  2231. info->envp = u_envp;
  2232. /* This is correct because Linux defines
  2233. * elf_addr_t as Elf32_Off / Elf64_Off
  2234. */
  2235. #define NEW_AUX_ENT(id, val) do { \
  2236. put_user_ual(id, u_auxv); u_auxv += n; \
  2237. put_user_ual(val, u_auxv); u_auxv += n; \
  2238. } while(0)
  2239. #ifdef ARCH_DLINFO
  2240. /*
  2241. * ARCH_DLINFO must come first so platform specific code can enforce
  2242. * special alignment requirements on the AUXV if necessary (eg. PPC).
  2243. */
  2244. ARCH_DLINFO;
  2245. #endif
  2246. /* There must be exactly DLINFO_ITEMS entries here, or the assert
  2247. * on info->auxv_len will trigger.
  2248. */
  2249. NEW_AUX_ENT(AT_PHDR, (abi_ulong)(info->load_addr + exec->e_phoff));
  2250. NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
  2251. NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
  2252. NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
  2253. NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_info ? interp_info->load_addr : 0));
  2254. NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
  2255. NEW_AUX_ENT(AT_ENTRY, info->entry);
  2256. NEW_AUX_ENT(AT_UID, (abi_ulong) getuid());
  2257. NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid());
  2258. NEW_AUX_ENT(AT_GID, (abi_ulong) getgid());
  2259. NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid());
  2260. NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP);
  2261. NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK));
  2262. NEW_AUX_ENT(AT_RANDOM, (abi_ulong) u_rand_bytes);
  2263. NEW_AUX_ENT(AT_SECURE, (abi_ulong) qemu_getauxval(AT_SECURE));
  2264. NEW_AUX_ENT(AT_EXECFN, info->file_string);
  2265. #ifdef ELF_HWCAP2
  2266. NEW_AUX_ENT(AT_HWCAP2, (abi_ulong) ELF_HWCAP2);
  2267. #endif
  2268. if (u_base_platform) {
  2269. NEW_AUX_ENT(AT_BASE_PLATFORM, u_base_platform);
  2270. }
  2271. if (u_platform) {
  2272. NEW_AUX_ENT(AT_PLATFORM, u_platform);
  2273. }
  2274. if (vdso_info) {
  2275. NEW_AUX_ENT(AT_SYSINFO_EHDR, vdso_info->load_addr);
  2276. }
  2277. NEW_AUX_ENT (AT_NULL, 0);
  2278. #undef NEW_AUX_ENT
  2279. /* Check that our initial calculation of the auxv length matches how much
  2280. * we actually put into it.
  2281. */
  2282. assert(info->auxv_len == u_auxv - info->saved_auxv);
  2283. put_user_ual(argc, u_argc);
  2284. p = info->arg_strings;
  2285. for (i = 0; i < argc; ++i) {
  2286. put_user_ual(p, u_argv);
  2287. u_argv += n;
  2288. p += target_strlen(p) + 1;
  2289. }
  2290. put_user_ual(0, u_argv);
  2291. p = info->env_strings;
  2292. for (i = 0; i < envc; ++i) {
  2293. put_user_ual(p, u_envp);
  2294. u_envp += n;
  2295. p += target_strlen(p) + 1;
  2296. }
  2297. put_user_ual(0, u_envp);
  2298. return sp;
  2299. }
  2300. #if defined(HI_COMMPAGE)
  2301. #define LO_COMMPAGE -1
  2302. #elif defined(LO_COMMPAGE)
  2303. #define HI_COMMPAGE 0
  2304. #else
  2305. #define HI_COMMPAGE 0
  2306. #define LO_COMMPAGE -1
  2307. #ifndef INIT_GUEST_COMMPAGE
  2308. #define init_guest_commpage() true
  2309. #endif
  2310. #endif
  2311. /**
  2312. * pgb_try_mmap:
  2313. * @addr: host start address
  2314. * @addr_last: host last address
  2315. * @keep: do not unmap the probe region
  2316. *
  2317. * Return 1 if [@addr, @addr_last] is not mapped in the host,
  2318. * return 0 if it is not available to map, and -1 on mmap error.
  2319. * If @keep, the region is left mapped on success, otherwise unmapped.
  2320. */
  2321. static int pgb_try_mmap(uintptr_t addr, uintptr_t addr_last, bool keep)
  2322. {
  2323. size_t size = addr_last - addr + 1;
  2324. void *p = mmap((void *)addr, size, PROT_NONE,
  2325. MAP_ANONYMOUS | MAP_PRIVATE |
  2326. MAP_NORESERVE | MAP_FIXED_NOREPLACE, -1, 0);
  2327. int ret;
  2328. if (p == MAP_FAILED) {
  2329. return errno == EEXIST ? 0 : -1;
  2330. }
  2331. ret = p == (void *)addr;
  2332. if (!keep || !ret) {
  2333. munmap(p, size);
  2334. }
  2335. return ret;
  2336. }
  2337. /**
  2338. * pgb_try_mmap_skip_brk(uintptr_t addr, uintptr_t size, uintptr_t brk)
  2339. * @addr: host address
  2340. * @addr_last: host last address
  2341. * @brk: host brk
  2342. *
  2343. * Like pgb_try_mmap, but additionally reserve some memory following brk.
  2344. */
  2345. static int pgb_try_mmap_skip_brk(uintptr_t addr, uintptr_t addr_last,
  2346. uintptr_t brk, bool keep)
  2347. {
  2348. uintptr_t brk_last = brk + 16 * MiB - 1;
  2349. /* Do not map anything close to the host brk. */
  2350. if (addr <= brk_last && brk <= addr_last) {
  2351. return 0;
  2352. }
  2353. return pgb_try_mmap(addr, addr_last, keep);
  2354. }
  2355. /**
  2356. * pgb_try_mmap_set:
  2357. * @ga: set of guest addrs
  2358. * @base: guest_base
  2359. * @brk: host brk
  2360. *
  2361. * Return true if all @ga can be mapped by the host at @base.
  2362. * On success, retain the mapping at index 0 for reserved_va.
  2363. */
  2364. typedef struct PGBAddrs {
  2365. uintptr_t bounds[3][2]; /* start/last pairs */
  2366. int nbounds;
  2367. } PGBAddrs;
  2368. static bool pgb_try_mmap_set(const PGBAddrs *ga, uintptr_t base, uintptr_t brk)
  2369. {
  2370. for (int i = ga->nbounds - 1; i >= 0; --i) {
  2371. if (pgb_try_mmap_skip_brk(ga->bounds[i][0] + base,
  2372. ga->bounds[i][1] + base,
  2373. brk, i == 0 && reserved_va) <= 0) {
  2374. return false;
  2375. }
  2376. }
  2377. return true;
  2378. }
  2379. /**
  2380. * pgb_addr_set:
  2381. * @ga: output set of guest addrs
  2382. * @guest_loaddr: guest image low address
  2383. * @guest_loaddr: guest image high address
  2384. * @identity: create for identity mapping
  2385. *
  2386. * Fill in @ga with the image, COMMPAGE and NULL page.
  2387. */
  2388. static bool pgb_addr_set(PGBAddrs *ga, abi_ulong guest_loaddr,
  2389. abi_ulong guest_hiaddr, bool try_identity)
  2390. {
  2391. int n;
  2392. /*
  2393. * With a low commpage, or a guest mapped very low,
  2394. * we may not be able to use the identity map.
  2395. */
  2396. if (try_identity) {
  2397. if (LO_COMMPAGE != -1 && LO_COMMPAGE < mmap_min_addr) {
  2398. return false;
  2399. }
  2400. if (guest_loaddr != 0 && guest_loaddr < mmap_min_addr) {
  2401. return false;
  2402. }
  2403. }
  2404. memset(ga, 0, sizeof(*ga));
  2405. n = 0;
  2406. if (reserved_va) {
  2407. ga->bounds[n][0] = try_identity ? mmap_min_addr : 0;
  2408. ga->bounds[n][1] = reserved_va;
  2409. n++;
  2410. /* LO_COMMPAGE and NULL handled by reserving from 0. */
  2411. } else {
  2412. /* Add any LO_COMMPAGE or NULL page. */
  2413. if (LO_COMMPAGE != -1) {
  2414. ga->bounds[n][0] = 0;
  2415. ga->bounds[n][1] = LO_COMMPAGE + TARGET_PAGE_SIZE - 1;
  2416. n++;
  2417. } else if (!try_identity) {
  2418. ga->bounds[n][0] = 0;
  2419. ga->bounds[n][1] = TARGET_PAGE_SIZE - 1;
  2420. n++;
  2421. }
  2422. /* Add the guest image for ET_EXEC. */
  2423. if (guest_loaddr) {
  2424. ga->bounds[n][0] = guest_loaddr;
  2425. ga->bounds[n][1] = guest_hiaddr;
  2426. n++;
  2427. }
  2428. }
  2429. /*
  2430. * Temporarily disable
  2431. * "comparison is always false due to limited range of data type"
  2432. * due to comparison between unsigned and (possible) 0.
  2433. */
  2434. #pragma GCC diagnostic push
  2435. #pragma GCC diagnostic ignored "-Wtype-limits"
  2436. /* Add any HI_COMMPAGE not covered by reserved_va. */
  2437. if (reserved_va < HI_COMMPAGE) {
  2438. ga->bounds[n][0] = HI_COMMPAGE & qemu_real_host_page_mask();
  2439. ga->bounds[n][1] = HI_COMMPAGE + TARGET_PAGE_SIZE - 1;
  2440. n++;
  2441. }
  2442. #pragma GCC diagnostic pop
  2443. ga->nbounds = n;
  2444. return true;
  2445. }
  2446. static void pgb_fail_in_use(const char *image_name)
  2447. {
  2448. error_report("%s: requires virtual address space that is in use "
  2449. "(omit the -B option or choose a different value)",
  2450. image_name);
  2451. exit(EXIT_FAILURE);
  2452. }
  2453. static void pgb_fixed(const char *image_name, uintptr_t guest_loaddr,
  2454. uintptr_t guest_hiaddr, uintptr_t align)
  2455. {
  2456. PGBAddrs ga;
  2457. uintptr_t brk = (uintptr_t)sbrk(0);
  2458. if (!QEMU_IS_ALIGNED(guest_base, align)) {
  2459. fprintf(stderr, "Requested guest base %p does not satisfy "
  2460. "host minimum alignment (0x%" PRIxPTR ")\n",
  2461. (void *)guest_base, align);
  2462. exit(EXIT_FAILURE);
  2463. }
  2464. if (!pgb_addr_set(&ga, guest_loaddr, guest_hiaddr, !guest_base)
  2465. || !pgb_try_mmap_set(&ga, guest_base, brk)) {
  2466. pgb_fail_in_use(image_name);
  2467. }
  2468. }
  2469. /**
  2470. * pgb_find_fallback:
  2471. *
  2472. * This is a fallback method for finding holes in the host address space
  2473. * if we don't have the benefit of being able to access /proc/self/map.
  2474. * It can potentially take a very long time as we can only dumbly iterate
  2475. * up the host address space seeing if the allocation would work.
  2476. */
  2477. static uintptr_t pgb_find_fallback(const PGBAddrs *ga, uintptr_t align,
  2478. uintptr_t brk)
  2479. {
  2480. /* TODO: come up with a better estimate of how much to skip. */
  2481. uintptr_t skip = sizeof(uintptr_t) == 4 ? MiB : GiB;
  2482. for (uintptr_t base = skip; ; base += skip) {
  2483. base = ROUND_UP(base, align);
  2484. if (pgb_try_mmap_set(ga, base, brk)) {
  2485. return base;
  2486. }
  2487. if (base >= -skip) {
  2488. return -1;
  2489. }
  2490. }
  2491. }
  2492. static uintptr_t pgb_try_itree(const PGBAddrs *ga, uintptr_t base,
  2493. IntervalTreeRoot *root)
  2494. {
  2495. for (int i = ga->nbounds - 1; i >= 0; --i) {
  2496. uintptr_t s = base + ga->bounds[i][0];
  2497. uintptr_t l = base + ga->bounds[i][1];
  2498. IntervalTreeNode *n;
  2499. if (l < s) {
  2500. /* Wraparound. Skip to advance S to mmap_min_addr. */
  2501. return mmap_min_addr - s;
  2502. }
  2503. n = interval_tree_iter_first(root, s, l);
  2504. if (n != NULL) {
  2505. /* Conflict. Skip to advance S to LAST + 1. */
  2506. return n->last - s + 1;
  2507. }
  2508. }
  2509. return 0; /* success */
  2510. }
  2511. static uintptr_t pgb_find_itree(const PGBAddrs *ga, IntervalTreeRoot *root,
  2512. uintptr_t align, uintptr_t brk)
  2513. {
  2514. uintptr_t last = sizeof(uintptr_t) == 4 ? MiB : GiB;
  2515. uintptr_t base, skip;
  2516. while (true) {
  2517. base = ROUND_UP(last, align);
  2518. if (base < last) {
  2519. return -1;
  2520. }
  2521. skip = pgb_try_itree(ga, base, root);
  2522. if (skip == 0) {
  2523. break;
  2524. }
  2525. last = base + skip;
  2526. if (last < base) {
  2527. return -1;
  2528. }
  2529. }
  2530. /*
  2531. * We've chosen 'base' based on holes in the interval tree,
  2532. * but we don't yet know if it is a valid host address.
  2533. * Because it is the first matching hole, if the host addresses
  2534. * are invalid we know there are no further matches.
  2535. */
  2536. return pgb_try_mmap_set(ga, base, brk) ? base : -1;
  2537. }
  2538. static void pgb_dynamic(const char *image_name, uintptr_t guest_loaddr,
  2539. uintptr_t guest_hiaddr, uintptr_t align)
  2540. {
  2541. IntervalTreeRoot *root;
  2542. uintptr_t brk, ret;
  2543. PGBAddrs ga;
  2544. /* Try the identity map first. */
  2545. if (pgb_addr_set(&ga, guest_loaddr, guest_hiaddr, true)) {
  2546. brk = (uintptr_t)sbrk(0);
  2547. if (pgb_try_mmap_set(&ga, 0, brk)) {
  2548. guest_base = 0;
  2549. return;
  2550. }
  2551. }
  2552. /*
  2553. * Rebuild the address set for non-identity map.
  2554. * This differs in the mapping of the guest NULL page.
  2555. */
  2556. pgb_addr_set(&ga, guest_loaddr, guest_hiaddr, false);
  2557. root = read_self_maps();
  2558. /* Read brk after we've read the maps, which will malloc. */
  2559. brk = (uintptr_t)sbrk(0);
  2560. if (!root) {
  2561. ret = pgb_find_fallback(&ga, align, brk);
  2562. } else {
  2563. /*
  2564. * Reserve the area close to the host brk.
  2565. * This will be freed with the rest of the tree.
  2566. */
  2567. IntervalTreeNode *b = g_new0(IntervalTreeNode, 1);
  2568. b->start = brk;
  2569. b->last = brk + 16 * MiB - 1;
  2570. interval_tree_insert(b, root);
  2571. ret = pgb_find_itree(&ga, root, align, brk);
  2572. free_self_maps(root);
  2573. }
  2574. if (ret == -1) {
  2575. int w = TARGET_LONG_BITS / 4;
  2576. error_report("%s: Unable to find a guest_base to satisfy all "
  2577. "guest address mapping requirements", image_name);
  2578. for (int i = 0; i < ga.nbounds; ++i) {
  2579. error_printf(" %0*" PRIx64 "-%0*" PRIx64 "\n",
  2580. w, (uint64_t)ga.bounds[i][0],
  2581. w, (uint64_t)ga.bounds[i][1]);
  2582. }
  2583. exit(EXIT_FAILURE);
  2584. }
  2585. guest_base = ret;
  2586. }
  2587. void probe_guest_base(const char *image_name, abi_ulong guest_loaddr,
  2588. abi_ulong guest_hiaddr)
  2589. {
  2590. /* In order to use host shmat, we must be able to honor SHMLBA. */
  2591. uintptr_t align = MAX(SHMLBA, TARGET_PAGE_SIZE);
  2592. /* Sanity check the guest binary. */
  2593. if (reserved_va) {
  2594. if (guest_hiaddr > reserved_va) {
  2595. error_report("%s: requires more than reserved virtual "
  2596. "address space (0x%" PRIx64 " > 0x%lx)",
  2597. image_name, (uint64_t)guest_hiaddr, reserved_va);
  2598. exit(EXIT_FAILURE);
  2599. }
  2600. } else {
  2601. if (guest_hiaddr != (uintptr_t)guest_hiaddr) {
  2602. error_report("%s: requires more virtual address space "
  2603. "than the host can provide (0x%" PRIx64 ")",
  2604. image_name, (uint64_t)guest_hiaddr + 1);
  2605. exit(EXIT_FAILURE);
  2606. }
  2607. }
  2608. if (have_guest_base) {
  2609. pgb_fixed(image_name, guest_loaddr, guest_hiaddr, align);
  2610. } else {
  2611. pgb_dynamic(image_name, guest_loaddr, guest_hiaddr, align);
  2612. }
  2613. /* Reserve and initialize the commpage. */
  2614. if (!init_guest_commpage()) {
  2615. /* We have already probed for the commpage being free. */
  2616. g_assert_not_reached();
  2617. }
  2618. assert(QEMU_IS_ALIGNED(guest_base, align));
  2619. qemu_log_mask(CPU_LOG_PAGE, "Locating guest address space "
  2620. "@ 0x%" PRIx64 "\n", (uint64_t)guest_base);
  2621. }
  2622. enum {
  2623. /* The string "GNU\0" as a magic number. */
  2624. GNU0_MAGIC = const_le32('G' | 'N' << 8 | 'U' << 16),
  2625. NOTE_DATA_SZ = 1 * KiB,
  2626. NOTE_NAME_SZ = 4,
  2627. ELF_GNU_PROPERTY_ALIGN = ELF_CLASS == ELFCLASS32 ? 4 : 8,
  2628. };
  2629. /*
  2630. * Process a single gnu_property entry.
  2631. * Return false for error.
  2632. */
  2633. static bool parse_elf_property(const uint32_t *data, int *off, int datasz,
  2634. struct image_info *info, bool have_prev_type,
  2635. uint32_t *prev_type, Error **errp)
  2636. {
  2637. uint32_t pr_type, pr_datasz, step;
  2638. if (*off > datasz || !QEMU_IS_ALIGNED(*off, ELF_GNU_PROPERTY_ALIGN)) {
  2639. goto error_data;
  2640. }
  2641. datasz -= *off;
  2642. data += *off / sizeof(uint32_t);
  2643. if (datasz < 2 * sizeof(uint32_t)) {
  2644. goto error_data;
  2645. }
  2646. pr_type = data[0];
  2647. pr_datasz = data[1];
  2648. data += 2;
  2649. datasz -= 2 * sizeof(uint32_t);
  2650. step = ROUND_UP(pr_datasz, ELF_GNU_PROPERTY_ALIGN);
  2651. if (step > datasz) {
  2652. goto error_data;
  2653. }
  2654. /* Properties are supposed to be unique and sorted on pr_type. */
  2655. if (have_prev_type && pr_type <= *prev_type) {
  2656. if (pr_type == *prev_type) {
  2657. error_setg(errp, "Duplicate property in PT_GNU_PROPERTY");
  2658. } else {
  2659. error_setg(errp, "Unsorted property in PT_GNU_PROPERTY");
  2660. }
  2661. return false;
  2662. }
  2663. *prev_type = pr_type;
  2664. if (!arch_parse_elf_property(pr_type, pr_datasz, data, info, errp)) {
  2665. return false;
  2666. }
  2667. *off += 2 * sizeof(uint32_t) + step;
  2668. return true;
  2669. error_data:
  2670. error_setg(errp, "Ill-formed property in PT_GNU_PROPERTY");
  2671. return false;
  2672. }
  2673. /* Process NT_GNU_PROPERTY_TYPE_0. */
  2674. static bool parse_elf_properties(const ImageSource *src,
  2675. struct image_info *info,
  2676. const struct elf_phdr *phdr,
  2677. Error **errp)
  2678. {
  2679. union {
  2680. struct elf_note nhdr;
  2681. uint32_t data[NOTE_DATA_SZ / sizeof(uint32_t)];
  2682. } note;
  2683. int n, off, datasz;
  2684. bool have_prev_type;
  2685. uint32_t prev_type;
  2686. /* Unless the arch requires properties, ignore them. */
  2687. if (!ARCH_USE_GNU_PROPERTY) {
  2688. return true;
  2689. }
  2690. /* If the properties are crazy large, that's too bad. */
  2691. n = phdr->p_filesz;
  2692. if (n > sizeof(note)) {
  2693. error_setg(errp, "PT_GNU_PROPERTY too large");
  2694. return false;
  2695. }
  2696. if (n < sizeof(note.nhdr)) {
  2697. error_setg(errp, "PT_GNU_PROPERTY too small");
  2698. return false;
  2699. }
  2700. if (!imgsrc_read(&note, phdr->p_offset, n, src, errp)) {
  2701. return false;
  2702. }
  2703. /*
  2704. * The contents of a valid PT_GNU_PROPERTY is a sequence of uint32_t.
  2705. * Swap most of them now, beyond the header and namesz.
  2706. */
  2707. #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
  2708. for (int i = 4; i < n / 4; i++) {
  2709. bswap32s(note.data + i);
  2710. }
  2711. #endif
  2712. /*
  2713. * Note that nhdr is 3 words, and that the "name" described by namesz
  2714. * immediately follows nhdr and is thus at the 4th word. Further, all
  2715. * of the inputs to the kernel's round_up are multiples of 4.
  2716. */
  2717. if (tswap32(note.nhdr.n_type) != NT_GNU_PROPERTY_TYPE_0 ||
  2718. tswap32(note.nhdr.n_namesz) != NOTE_NAME_SZ ||
  2719. note.data[3] != GNU0_MAGIC) {
  2720. error_setg(errp, "Invalid note in PT_GNU_PROPERTY");
  2721. return false;
  2722. }
  2723. off = sizeof(note.nhdr) + NOTE_NAME_SZ;
  2724. datasz = tswap32(note.nhdr.n_descsz) + off;
  2725. if (datasz > n) {
  2726. error_setg(errp, "Invalid note size in PT_GNU_PROPERTY");
  2727. return false;
  2728. }
  2729. have_prev_type = false;
  2730. prev_type = 0;
  2731. while (1) {
  2732. if (off == datasz) {
  2733. return true; /* end, exit ok */
  2734. }
  2735. if (!parse_elf_property(note.data, &off, datasz, info,
  2736. have_prev_type, &prev_type, errp)) {
  2737. return false;
  2738. }
  2739. have_prev_type = true;
  2740. }
  2741. }
  2742. /**
  2743. * load_elf_image: Load an ELF image into the address space.
  2744. * @image_name: the filename of the image, to use in error messages.
  2745. * @src: the ImageSource from which to read.
  2746. * @info: info collected from the loaded image.
  2747. * @ehdr: the ELF header, not yet bswapped.
  2748. * @pinterp_name: record any PT_INTERP string found.
  2749. *
  2750. * On return: @info values will be filled in, as necessary or available.
  2751. */
  2752. static void load_elf_image(const char *image_name, const ImageSource *src,
  2753. struct image_info *info, struct elfhdr *ehdr,
  2754. char **pinterp_name)
  2755. {
  2756. g_autofree struct elf_phdr *phdr = NULL;
  2757. abi_ulong load_addr, load_bias, loaddr, hiaddr, error, align;
  2758. size_t reserve_size, align_size;
  2759. int i, prot_exec;
  2760. Error *err = NULL;
  2761. /*
  2762. * First of all, some simple consistency checks.
  2763. * Note that we rely on the bswapped ehdr staying in bprm_buf,
  2764. * for later use by load_elf_binary and create_elf_tables.
  2765. */
  2766. if (!imgsrc_read(ehdr, 0, sizeof(*ehdr), src, &err)) {
  2767. goto exit_errmsg;
  2768. }
  2769. if (!elf_check_ident(ehdr)) {
  2770. error_setg(&err, "Invalid ELF image for this architecture");
  2771. goto exit_errmsg;
  2772. }
  2773. bswap_ehdr(ehdr);
  2774. if (!elf_check_ehdr(ehdr)) {
  2775. error_setg(&err, "Invalid ELF image for this architecture");
  2776. goto exit_errmsg;
  2777. }
  2778. phdr = imgsrc_read_alloc(ehdr->e_phoff,
  2779. ehdr->e_phnum * sizeof(struct elf_phdr),
  2780. src, &err);
  2781. if (phdr == NULL) {
  2782. goto exit_errmsg;
  2783. }
  2784. bswap_phdr(phdr, ehdr->e_phnum);
  2785. info->nsegs = 0;
  2786. info->pt_dynamic_addr = 0;
  2787. mmap_lock();
  2788. /*
  2789. * Find the maximum size of the image and allocate an appropriate
  2790. * amount of memory to handle that. Locate the interpreter, if any.
  2791. */
  2792. loaddr = -1, hiaddr = 0;
  2793. align = 0;
  2794. info->exec_stack = EXSTACK_DEFAULT;
  2795. for (i = 0; i < ehdr->e_phnum; ++i) {
  2796. struct elf_phdr *eppnt = phdr + i;
  2797. if (eppnt->p_type == PT_LOAD) {
  2798. abi_ulong a = eppnt->p_vaddr & TARGET_PAGE_MASK;
  2799. if (a < loaddr) {
  2800. loaddr = a;
  2801. }
  2802. a = eppnt->p_vaddr + eppnt->p_memsz - 1;
  2803. if (a > hiaddr) {
  2804. hiaddr = a;
  2805. }
  2806. ++info->nsegs;
  2807. align |= eppnt->p_align;
  2808. } else if (eppnt->p_type == PT_INTERP && pinterp_name) {
  2809. g_autofree char *interp_name = NULL;
  2810. if (*pinterp_name) {
  2811. error_setg(&err, "Multiple PT_INTERP entries");
  2812. goto exit_errmsg;
  2813. }
  2814. interp_name = imgsrc_read_alloc(eppnt->p_offset, eppnt->p_filesz,
  2815. src, &err);
  2816. if (interp_name == NULL) {
  2817. goto exit_errmsg;
  2818. }
  2819. if (interp_name[eppnt->p_filesz - 1] != 0) {
  2820. error_setg(&err, "Invalid PT_INTERP entry");
  2821. goto exit_errmsg;
  2822. }
  2823. *pinterp_name = g_steal_pointer(&interp_name);
  2824. } else if (eppnt->p_type == PT_GNU_PROPERTY) {
  2825. if (!parse_elf_properties(src, info, eppnt, &err)) {
  2826. goto exit_errmsg;
  2827. }
  2828. } else if (eppnt->p_type == PT_GNU_STACK) {
  2829. info->exec_stack = eppnt->p_flags & PF_X;
  2830. }
  2831. }
  2832. load_addr = loaddr;
  2833. align = pow2ceil(align);
  2834. if (pinterp_name != NULL) {
  2835. if (ehdr->e_type == ET_EXEC) {
  2836. /*
  2837. * Make sure that the low address does not conflict with
  2838. * MMAP_MIN_ADDR or the QEMU application itself.
  2839. */
  2840. probe_guest_base(image_name, loaddr, hiaddr);
  2841. } else {
  2842. /*
  2843. * The binary is dynamic, but we still need to
  2844. * select guest_base. In this case we pass a size.
  2845. */
  2846. probe_guest_base(image_name, 0, hiaddr - loaddr);
  2847. /*
  2848. * Avoid collision with the loader by providing a different
  2849. * default load address.
  2850. */
  2851. load_addr += elf_et_dyn_base;
  2852. /*
  2853. * TODO: Better support for mmap alignment is desirable.
  2854. * Since we do not have complete control over the guest
  2855. * address space, we prefer the kernel to choose some address
  2856. * rather than force the use of LOAD_ADDR via MAP_FIXED.
  2857. */
  2858. if (align) {
  2859. load_addr &= -align;
  2860. }
  2861. }
  2862. }
  2863. /*
  2864. * Reserve address space for all of this.
  2865. *
  2866. * In the case of ET_EXEC, we supply MAP_FIXED_NOREPLACE so that we get
  2867. * exactly the address range that is required. Without reserved_va,
  2868. * the guest address space is not isolated. We have attempted to avoid
  2869. * conflict with the host program itself via probe_guest_base, but using
  2870. * MAP_FIXED_NOREPLACE instead of MAP_FIXED provides an extra check.
  2871. *
  2872. * Otherwise this is ET_DYN, and we are searching for a location
  2873. * that can hold the memory space required. If the image is
  2874. * pre-linked, LOAD_ADDR will be non-zero, and the kernel should
  2875. * honor that address if it happens to be free.
  2876. *
  2877. * In both cases, we will overwrite pages in this range with mappings
  2878. * from the executable.
  2879. */
  2880. reserve_size = (size_t)hiaddr - loaddr + 1;
  2881. align_size = reserve_size;
  2882. if (ehdr->e_type != ET_EXEC && align > qemu_real_host_page_size()) {
  2883. align_size += align - 1;
  2884. }
  2885. load_addr = target_mmap(load_addr, align_size, PROT_NONE,
  2886. MAP_PRIVATE | MAP_ANON | MAP_NORESERVE |
  2887. (ehdr->e_type == ET_EXEC ? MAP_FIXED_NOREPLACE : 0),
  2888. -1, 0);
  2889. if (load_addr == -1) {
  2890. goto exit_mmap;
  2891. }
  2892. if (align_size != reserve_size) {
  2893. abi_ulong align_addr = ROUND_UP(load_addr, align);
  2894. abi_ulong align_end = TARGET_PAGE_ALIGN(align_addr + reserve_size);
  2895. abi_ulong load_end = TARGET_PAGE_ALIGN(load_addr + align_size);
  2896. if (align_addr != load_addr) {
  2897. target_munmap(load_addr, align_addr - load_addr);
  2898. }
  2899. if (align_end != load_end) {
  2900. target_munmap(align_end, load_end - align_end);
  2901. }
  2902. load_addr = align_addr;
  2903. }
  2904. load_bias = load_addr - loaddr;
  2905. if (elf_is_fdpic(ehdr)) {
  2906. struct elf32_fdpic_loadseg *loadsegs = info->loadsegs =
  2907. g_malloc(sizeof(*loadsegs) * info->nsegs);
  2908. for (i = 0; i < ehdr->e_phnum; ++i) {
  2909. switch (phdr[i].p_type) {
  2910. case PT_DYNAMIC:
  2911. info->pt_dynamic_addr = phdr[i].p_vaddr + load_bias;
  2912. break;
  2913. case PT_LOAD:
  2914. loadsegs->addr = phdr[i].p_vaddr + load_bias;
  2915. loadsegs->p_vaddr = phdr[i].p_vaddr;
  2916. loadsegs->p_memsz = phdr[i].p_memsz;
  2917. ++loadsegs;
  2918. break;
  2919. }
  2920. }
  2921. }
  2922. info->load_bias = load_bias;
  2923. info->code_offset = load_bias;
  2924. info->data_offset = load_bias;
  2925. info->load_addr = load_addr;
  2926. info->entry = ehdr->e_entry + load_bias;
  2927. info->start_code = -1;
  2928. info->end_code = 0;
  2929. info->start_data = -1;
  2930. info->end_data = 0;
  2931. /* Usual start for brk is after all sections of the main executable. */
  2932. info->brk = TARGET_PAGE_ALIGN(hiaddr + load_bias);
  2933. info->elf_flags = ehdr->e_flags;
  2934. prot_exec = PROT_EXEC;
  2935. #ifdef TARGET_AARCH64
  2936. /*
  2937. * If the BTI feature is present, this indicates that the executable
  2938. * pages of the startup binary should be mapped with PROT_BTI, so that
  2939. * branch targets are enforced.
  2940. *
  2941. * The startup binary is either the interpreter or the static executable.
  2942. * The interpreter is responsible for all pages of a dynamic executable.
  2943. *
  2944. * Elf notes are backward compatible to older cpus.
  2945. * Do not enable BTI unless it is supported.
  2946. */
  2947. if ((info->note_flags & GNU_PROPERTY_AARCH64_FEATURE_1_BTI)
  2948. && (pinterp_name == NULL || *pinterp_name == 0)
  2949. && cpu_isar_feature(aa64_bti, ARM_CPU(thread_cpu))) {
  2950. prot_exec |= TARGET_PROT_BTI;
  2951. }
  2952. #endif
  2953. for (i = 0; i < ehdr->e_phnum; i++) {
  2954. struct elf_phdr *eppnt = phdr + i;
  2955. if (eppnt->p_type == PT_LOAD) {
  2956. abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em;
  2957. int elf_prot = 0;
  2958. if (eppnt->p_flags & PF_R) {
  2959. elf_prot |= PROT_READ;
  2960. }
  2961. if (eppnt->p_flags & PF_W) {
  2962. elf_prot |= PROT_WRITE;
  2963. }
  2964. if (eppnt->p_flags & PF_X) {
  2965. elf_prot |= prot_exec;
  2966. }
  2967. vaddr = load_bias + eppnt->p_vaddr;
  2968. vaddr_po = vaddr & ~TARGET_PAGE_MASK;
  2969. vaddr_ps = vaddr & TARGET_PAGE_MASK;
  2970. vaddr_ef = vaddr + eppnt->p_filesz;
  2971. vaddr_em = vaddr + eppnt->p_memsz;
  2972. /*
  2973. * Some segments may be completely empty, with a non-zero p_memsz
  2974. * but no backing file segment.
  2975. */
  2976. if (eppnt->p_filesz != 0) {
  2977. error = imgsrc_mmap(vaddr_ps, eppnt->p_filesz + vaddr_po,
  2978. elf_prot, MAP_PRIVATE | MAP_FIXED,
  2979. src, eppnt->p_offset - vaddr_po);
  2980. if (error == -1) {
  2981. goto exit_mmap;
  2982. }
  2983. }
  2984. /* If the load segment requests extra zeros (e.g. bss), map it. */
  2985. if (vaddr_ef < vaddr_em &&
  2986. !zero_bss(vaddr_ef, vaddr_em, elf_prot, &err)) {
  2987. goto exit_errmsg;
  2988. }
  2989. /* Find the full program boundaries. */
  2990. if (elf_prot & PROT_EXEC) {
  2991. if (vaddr < info->start_code) {
  2992. info->start_code = vaddr;
  2993. }
  2994. if (vaddr_ef > info->end_code) {
  2995. info->end_code = vaddr_ef;
  2996. }
  2997. }
  2998. if (elf_prot & PROT_WRITE) {
  2999. if (vaddr < info->start_data) {
  3000. info->start_data = vaddr;
  3001. }
  3002. if (vaddr_ef > info->end_data) {
  3003. info->end_data = vaddr_ef;
  3004. }
  3005. }
  3006. #ifdef TARGET_MIPS
  3007. } else if (eppnt->p_type == PT_MIPS_ABIFLAGS) {
  3008. Mips_elf_abiflags_v0 abiflags;
  3009. if (!imgsrc_read(&abiflags, eppnt->p_offset, sizeof(abiflags),
  3010. src, &err)) {
  3011. goto exit_errmsg;
  3012. }
  3013. bswap_mips_abiflags(&abiflags);
  3014. info->fp_abi = abiflags.fp_abi;
  3015. #endif
  3016. }
  3017. }
  3018. if (info->end_data == 0) {
  3019. info->start_data = info->end_code;
  3020. info->end_data = info->end_code;
  3021. }
  3022. if (qemu_log_enabled()) {
  3023. load_symbols(ehdr, src, load_bias);
  3024. }
  3025. debuginfo_report_elf(image_name, src->fd, load_bias);
  3026. mmap_unlock();
  3027. close(src->fd);
  3028. return;
  3029. exit_mmap:
  3030. error_setg_errno(&err, errno, "Error mapping file");
  3031. goto exit_errmsg;
  3032. exit_errmsg:
  3033. error_reportf_err(err, "%s: ", image_name);
  3034. exit(-1);
  3035. }
  3036. static void load_elf_interp(const char *filename, struct image_info *info,
  3037. char bprm_buf[BPRM_BUF_SIZE])
  3038. {
  3039. struct elfhdr ehdr;
  3040. ImageSource src;
  3041. int fd, retval;
  3042. Error *err = NULL;
  3043. fd = open(path(filename), O_RDONLY);
  3044. if (fd < 0) {
  3045. error_setg_file_open(&err, errno, filename);
  3046. error_report_err(err);
  3047. exit(-1);
  3048. }
  3049. retval = read(fd, bprm_buf, BPRM_BUF_SIZE);
  3050. if (retval < 0) {
  3051. error_setg_errno(&err, errno, "Error reading file header");
  3052. error_reportf_err(err, "%s: ", filename);
  3053. exit(-1);
  3054. }
  3055. src.fd = fd;
  3056. src.cache = bprm_buf;
  3057. src.cache_size = retval;
  3058. load_elf_image(filename, &src, info, &ehdr, NULL);
  3059. }
  3060. #ifndef vdso_image_info
  3061. #ifdef VDSO_HEADER
  3062. #include VDSO_HEADER
  3063. #define vdso_image_info(flags) &vdso_image_info
  3064. #else
  3065. #define vdso_image_info(flags) NULL
  3066. #endif /* VDSO_HEADER */
  3067. #endif /* vdso_image_info */
  3068. static void load_elf_vdso(struct image_info *info, const VdsoImageInfo *vdso)
  3069. {
  3070. ImageSource src;
  3071. struct elfhdr ehdr;
  3072. abi_ulong load_bias, load_addr;
  3073. src.fd = -1;
  3074. src.cache = vdso->image;
  3075. src.cache_size = vdso->image_size;
  3076. load_elf_image("<internal-vdso>", &src, info, &ehdr, NULL);
  3077. load_addr = info->load_addr;
  3078. load_bias = info->load_bias;
  3079. /*
  3080. * We need to relocate the VDSO image. The one built into the kernel
  3081. * is built for a fixed address. The one built for QEMU is not, since
  3082. * that requires close control of the guest address space.
  3083. * We pre-processed the image to locate all of the addresses that need
  3084. * to be updated.
  3085. */
  3086. for (unsigned i = 0, n = vdso->reloc_count; i < n; i++) {
  3087. abi_ulong *addr = g2h_untagged(load_addr + vdso->relocs[i]);
  3088. *addr = tswapal(tswapal(*addr) + load_bias);
  3089. }
  3090. /* Install signal trampolines, if present. */
  3091. if (vdso->sigreturn_ofs) {
  3092. default_sigreturn = load_addr + vdso->sigreturn_ofs;
  3093. }
  3094. if (vdso->rt_sigreturn_ofs) {
  3095. default_rt_sigreturn = load_addr + vdso->rt_sigreturn_ofs;
  3096. }
  3097. /* Remove write from VDSO segment. */
  3098. target_mprotect(info->start_data, info->end_data - info->start_data,
  3099. PROT_READ | PROT_EXEC);
  3100. }
  3101. static int symfind(const void *s0, const void *s1)
  3102. {
  3103. struct elf_sym *sym = (struct elf_sym *)s1;
  3104. __typeof(sym->st_value) addr = *(uint64_t *)s0;
  3105. int result = 0;
  3106. if (addr < sym->st_value) {
  3107. result = -1;
  3108. } else if (addr >= sym->st_value + sym->st_size) {
  3109. result = 1;
  3110. }
  3111. return result;
  3112. }
  3113. static const char *lookup_symbolxx(struct syminfo *s, uint64_t orig_addr)
  3114. {
  3115. #if ELF_CLASS == ELFCLASS32
  3116. struct elf_sym *syms = s->disas_symtab.elf32;
  3117. #else
  3118. struct elf_sym *syms = s->disas_symtab.elf64;
  3119. #endif
  3120. // binary search
  3121. struct elf_sym *sym;
  3122. sym = bsearch(&orig_addr, syms, s->disas_num_syms, sizeof(*syms), symfind);
  3123. if (sym != NULL) {
  3124. return s->disas_strtab + sym->st_name;
  3125. }
  3126. return "";
  3127. }
  3128. /* FIXME: This should use elf_ops.h.inc */
  3129. static int symcmp(const void *s0, const void *s1)
  3130. {
  3131. struct elf_sym *sym0 = (struct elf_sym *)s0;
  3132. struct elf_sym *sym1 = (struct elf_sym *)s1;
  3133. return (sym0->st_value < sym1->st_value)
  3134. ? -1
  3135. : ((sym0->st_value > sym1->st_value) ? 1 : 0);
  3136. }
  3137. /* Best attempt to load symbols from this ELF object. */
  3138. static void load_symbols(struct elfhdr *hdr, const ImageSource *src,
  3139. abi_ulong load_bias)
  3140. {
  3141. int i, shnum, nsyms, sym_idx = 0, str_idx = 0;
  3142. g_autofree struct elf_shdr *shdr = NULL;
  3143. char *strings = NULL;
  3144. struct elf_sym *syms = NULL;
  3145. struct elf_sym *new_syms;
  3146. uint64_t segsz;
  3147. shnum = hdr->e_shnum;
  3148. shdr = imgsrc_read_alloc(hdr->e_shoff, shnum * sizeof(struct elf_shdr),
  3149. src, NULL);
  3150. if (shdr == NULL) {
  3151. return;
  3152. }
  3153. bswap_shdr(shdr, shnum);
  3154. for (i = 0; i < shnum; ++i) {
  3155. if (shdr[i].sh_type == SHT_SYMTAB) {
  3156. sym_idx = i;
  3157. str_idx = shdr[i].sh_link;
  3158. goto found;
  3159. }
  3160. }
  3161. /* There will be no symbol table if the file was stripped. */
  3162. return;
  3163. found:
  3164. /* Now know where the strtab and symtab are. Snarf them. */
  3165. segsz = shdr[str_idx].sh_size;
  3166. strings = g_try_malloc(segsz);
  3167. if (!strings) {
  3168. goto give_up;
  3169. }
  3170. if (!imgsrc_read(strings, shdr[str_idx].sh_offset, segsz, src, NULL)) {
  3171. goto give_up;
  3172. }
  3173. segsz = shdr[sym_idx].sh_size;
  3174. if (segsz / sizeof(struct elf_sym) > INT_MAX) {
  3175. /*
  3176. * Implausibly large symbol table: give up rather than ploughing
  3177. * on with the number of symbols calculation overflowing.
  3178. */
  3179. goto give_up;
  3180. }
  3181. nsyms = segsz / sizeof(struct elf_sym);
  3182. syms = g_try_malloc(segsz);
  3183. if (!syms) {
  3184. goto give_up;
  3185. }
  3186. if (!imgsrc_read(syms, shdr[sym_idx].sh_offset, segsz, src, NULL)) {
  3187. goto give_up;
  3188. }
  3189. for (i = 0; i < nsyms; ) {
  3190. bswap_sym(syms + i);
  3191. /* Throw away entries which we do not need. */
  3192. if (syms[i].st_shndx == SHN_UNDEF
  3193. || syms[i].st_shndx >= SHN_LORESERVE
  3194. || ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
  3195. if (i < --nsyms) {
  3196. syms[i] = syms[nsyms];
  3197. }
  3198. } else {
  3199. #if defined(TARGET_ARM) || defined (TARGET_MIPS)
  3200. /* The bottom address bit marks a Thumb or MIPS16 symbol. */
  3201. syms[i].st_value &= ~(target_ulong)1;
  3202. #endif
  3203. syms[i].st_value += load_bias;
  3204. i++;
  3205. }
  3206. }
  3207. /* No "useful" symbol. */
  3208. if (nsyms == 0) {
  3209. goto give_up;
  3210. }
  3211. /*
  3212. * Attempt to free the storage associated with the local symbols
  3213. * that we threw away. Whether or not this has any effect on the
  3214. * memory allocation depends on the malloc implementation and how
  3215. * many symbols we managed to discard.
  3216. */
  3217. new_syms = g_try_renew(struct elf_sym, syms, nsyms);
  3218. if (new_syms == NULL) {
  3219. goto give_up;
  3220. }
  3221. syms = new_syms;
  3222. qsort(syms, nsyms, sizeof(*syms), symcmp);
  3223. {
  3224. struct syminfo *s = g_new(struct syminfo, 1);
  3225. s->disas_strtab = strings;
  3226. s->disas_num_syms = nsyms;
  3227. #if ELF_CLASS == ELFCLASS32
  3228. s->disas_symtab.elf32 = syms;
  3229. #else
  3230. s->disas_symtab.elf64 = syms;
  3231. #endif
  3232. s->lookup_symbol = lookup_symbolxx;
  3233. s->next = syminfos;
  3234. syminfos = s;
  3235. }
  3236. return;
  3237. give_up:
  3238. g_free(strings);
  3239. g_free(syms);
  3240. }
  3241. uint32_t get_elf_eflags(int fd)
  3242. {
  3243. struct elfhdr ehdr;
  3244. off_t offset;
  3245. int ret;
  3246. /* Read ELF header */
  3247. offset = lseek(fd, 0, SEEK_SET);
  3248. if (offset == (off_t) -1) {
  3249. return 0;
  3250. }
  3251. ret = read(fd, &ehdr, sizeof(ehdr));
  3252. if (ret < sizeof(ehdr)) {
  3253. return 0;
  3254. }
  3255. offset = lseek(fd, offset, SEEK_SET);
  3256. if (offset == (off_t) -1) {
  3257. return 0;
  3258. }
  3259. /* Check ELF signature */
  3260. if (!elf_check_ident(&ehdr)) {
  3261. return 0;
  3262. }
  3263. /* check header */
  3264. bswap_ehdr(&ehdr);
  3265. if (!elf_check_ehdr(&ehdr)) {
  3266. return 0;
  3267. }
  3268. /* return architecture id */
  3269. return ehdr.e_flags;
  3270. }
  3271. int load_elf_binary(struct linux_binprm *bprm, struct image_info *info)
  3272. {
  3273. /*
  3274. * We need a copy of the elf header for passing to create_elf_tables.
  3275. * We will have overwritten the original when we re-use bprm->buf
  3276. * while loading the interpreter. Allocate the storage for this now
  3277. * and let elf_load_image do any swapping that may be required.
  3278. */
  3279. struct elfhdr ehdr;
  3280. struct image_info interp_info, vdso_info;
  3281. char *elf_interpreter = NULL;
  3282. char *scratch;
  3283. memset(&interp_info, 0, sizeof(interp_info));
  3284. #ifdef TARGET_MIPS
  3285. interp_info.fp_abi = MIPS_ABI_FP_UNKNOWN;
  3286. #endif
  3287. load_elf_image(bprm->filename, &bprm->src, info, &ehdr, &elf_interpreter);
  3288. /* Do this so that we can load the interpreter, if need be. We will
  3289. change some of these later */
  3290. bprm->p = setup_arg_pages(bprm, info);
  3291. scratch = g_new0(char, TARGET_PAGE_SIZE);
  3292. if (STACK_GROWS_DOWN) {
  3293. bprm->p = copy_elf_strings(1, &bprm->filename, scratch,
  3294. bprm->p, info->stack_limit);
  3295. info->file_string = bprm->p;
  3296. bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch,
  3297. bprm->p, info->stack_limit);
  3298. info->env_strings = bprm->p;
  3299. bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch,
  3300. bprm->p, info->stack_limit);
  3301. info->arg_strings = bprm->p;
  3302. } else {
  3303. info->arg_strings = bprm->p;
  3304. bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch,
  3305. bprm->p, info->stack_limit);
  3306. info->env_strings = bprm->p;
  3307. bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch,
  3308. bprm->p, info->stack_limit);
  3309. info->file_string = bprm->p;
  3310. bprm->p = copy_elf_strings(1, &bprm->filename, scratch,
  3311. bprm->p, info->stack_limit);
  3312. }
  3313. g_free(scratch);
  3314. if (!bprm->p) {
  3315. fprintf(stderr, "%s: %s\n", bprm->filename, strerror(E2BIG));
  3316. exit(-1);
  3317. }
  3318. if (elf_interpreter) {
  3319. load_elf_interp(elf_interpreter, &interp_info, bprm->buf);
  3320. /*
  3321. * While unusual because of ELF_ET_DYN_BASE, if we are unlucky
  3322. * with the mappings the interpreter can be loaded above but
  3323. * near the main executable, which can leave very little room
  3324. * for the heap.
  3325. * If the current brk has less than 16MB, use the end of the
  3326. * interpreter.
  3327. */
  3328. if (interp_info.brk > info->brk &&
  3329. interp_info.load_bias - info->brk < 16 * MiB) {
  3330. info->brk = interp_info.brk;
  3331. }
  3332. /* If the program interpreter is one of these two, then assume
  3333. an iBCS2 image. Otherwise assume a native linux image. */
  3334. if (strcmp(elf_interpreter, "/usr/lib/libc.so.1") == 0
  3335. || strcmp(elf_interpreter, "/usr/lib/ld.so.1") == 0) {
  3336. info->personality = PER_SVR4;
  3337. /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
  3338. and some applications "depend" upon this behavior. Since
  3339. we do not have the power to recompile these, we emulate
  3340. the SVr4 behavior. Sigh. */
  3341. target_mmap(0, TARGET_PAGE_SIZE, PROT_READ | PROT_EXEC,
  3342. MAP_FIXED_NOREPLACE | MAP_PRIVATE | MAP_ANONYMOUS,
  3343. -1, 0);
  3344. }
  3345. #ifdef TARGET_MIPS
  3346. info->interp_fp_abi = interp_info.fp_abi;
  3347. #endif
  3348. }
  3349. /*
  3350. * Load a vdso if available, which will amongst other things contain the
  3351. * signal trampolines. Otherwise, allocate a separate page for them.
  3352. */
  3353. const VdsoImageInfo *vdso = vdso_image_info(info->elf_flags);
  3354. if (vdso) {
  3355. load_elf_vdso(&vdso_info, vdso);
  3356. info->vdso = vdso_info.load_bias;
  3357. } else if (TARGET_ARCH_HAS_SIGTRAMP_PAGE) {
  3358. abi_long tramp_page = target_mmap(0, TARGET_PAGE_SIZE,
  3359. PROT_READ | PROT_WRITE,
  3360. MAP_PRIVATE | MAP_ANON, -1, 0);
  3361. if (tramp_page == -1) {
  3362. return -errno;
  3363. }
  3364. setup_sigtramp(tramp_page);
  3365. target_mprotect(tramp_page, TARGET_PAGE_SIZE, PROT_READ | PROT_EXEC);
  3366. }
  3367. bprm->p = create_elf_tables(bprm->p, bprm->argc, bprm->envc, &ehdr, info,
  3368. elf_interpreter ? &interp_info : NULL,
  3369. vdso ? &vdso_info : NULL);
  3370. info->start_stack = bprm->p;
  3371. /* If we have an interpreter, set that as the program's entry point.
  3372. Copy the load_bias as well, to help PPC64 interpret the entry
  3373. point as a function descriptor. Do this after creating elf tables
  3374. so that we copy the original program entry point into the AUXV. */
  3375. if (elf_interpreter) {
  3376. info->load_bias = interp_info.load_bias;
  3377. info->entry = interp_info.entry;
  3378. g_free(elf_interpreter);
  3379. }
  3380. #ifdef USE_ELF_CORE_DUMP
  3381. bprm->core_dump = &elf_core_dump;
  3382. #endif
  3383. return 0;
  3384. }
  3385. #ifdef USE_ELF_CORE_DUMP
  3386. /*
  3387. * Definitions to generate Intel SVR4-like core files.
  3388. * These mostly have the same names as the SVR4 types with "target_elf_"
  3389. * tacked on the front to prevent clashes with linux definitions,
  3390. * and the typedef forms have been avoided. This is mostly like
  3391. * the SVR4 structure, but more Linuxy, with things that Linux does
  3392. * not support and which gdb doesn't really use excluded.
  3393. *
  3394. * Fields we don't dump (their contents is zero) in linux-user qemu
  3395. * are marked with XXX.
  3396. *
  3397. * Core dump code is copied from linux kernel (fs/binfmt_elf.c).
  3398. *
  3399. * Porting ELF coredump for target is (quite) simple process. First you
  3400. * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for
  3401. * the target resides):
  3402. *
  3403. * #define USE_ELF_CORE_DUMP
  3404. *
  3405. * Next you define type of register set used for dumping. ELF specification
  3406. * says that it needs to be array of elf_greg_t that has size of ELF_NREG.
  3407. *
  3408. * typedef <target_regtype> target_elf_greg_t;
  3409. * #define ELF_NREG <number of registers>
  3410. * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG];
  3411. *
  3412. * Last step is to implement target specific function that copies registers
  3413. * from given cpu into just specified register set. Prototype is:
  3414. *
  3415. * static void elf_core_copy_regs(taret_elf_gregset_t *regs,
  3416. * const CPUArchState *env);
  3417. *
  3418. * Parameters:
  3419. * regs - copy register values into here (allocated and zeroed by caller)
  3420. * env - copy registers from here
  3421. *
  3422. * Example for ARM target is provided in this file.
  3423. */
  3424. struct target_elf_siginfo {
  3425. abi_int si_signo; /* signal number */
  3426. abi_int si_code; /* extra code */
  3427. abi_int si_errno; /* errno */
  3428. };
  3429. struct target_elf_prstatus {
  3430. struct target_elf_siginfo pr_info; /* Info associated with signal */
  3431. abi_short pr_cursig; /* Current signal */
  3432. abi_ulong pr_sigpend; /* XXX */
  3433. abi_ulong pr_sighold; /* XXX */
  3434. target_pid_t pr_pid;
  3435. target_pid_t pr_ppid;
  3436. target_pid_t pr_pgrp;
  3437. target_pid_t pr_sid;
  3438. struct target_timeval pr_utime; /* XXX User time */
  3439. struct target_timeval pr_stime; /* XXX System time */
  3440. struct target_timeval pr_cutime; /* XXX Cumulative user time */
  3441. struct target_timeval pr_cstime; /* XXX Cumulative system time */
  3442. target_elf_gregset_t pr_reg; /* GP registers */
  3443. abi_int pr_fpvalid; /* XXX */
  3444. };
  3445. #define ELF_PRARGSZ (80) /* Number of chars for args */
  3446. struct target_elf_prpsinfo {
  3447. char pr_state; /* numeric process state */
  3448. char pr_sname; /* char for pr_state */
  3449. char pr_zomb; /* zombie */
  3450. char pr_nice; /* nice val */
  3451. abi_ulong pr_flag; /* flags */
  3452. target_uid_t pr_uid;
  3453. target_gid_t pr_gid;
  3454. target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
  3455. /* Lots missing */
  3456. char pr_fname[16] QEMU_NONSTRING; /* filename of executable */
  3457. char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
  3458. };
  3459. #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
  3460. static void bswap_prstatus(struct target_elf_prstatus *prstatus)
  3461. {
  3462. prstatus->pr_info.si_signo = tswap32(prstatus->pr_info.si_signo);
  3463. prstatus->pr_info.si_code = tswap32(prstatus->pr_info.si_code);
  3464. prstatus->pr_info.si_errno = tswap32(prstatus->pr_info.si_errno);
  3465. prstatus->pr_cursig = tswap16(prstatus->pr_cursig);
  3466. prstatus->pr_sigpend = tswapal(prstatus->pr_sigpend);
  3467. prstatus->pr_sighold = tswapal(prstatus->pr_sighold);
  3468. prstatus->pr_pid = tswap32(prstatus->pr_pid);
  3469. prstatus->pr_ppid = tswap32(prstatus->pr_ppid);
  3470. prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp);
  3471. prstatus->pr_sid = tswap32(prstatus->pr_sid);
  3472. /* cpu times are not filled, so we skip them */
  3473. /* regs should be in correct format already */
  3474. prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid);
  3475. }
  3476. static void bswap_psinfo(struct target_elf_prpsinfo *psinfo)
  3477. {
  3478. psinfo->pr_flag = tswapal(psinfo->pr_flag);
  3479. psinfo->pr_uid = tswap16(psinfo->pr_uid);
  3480. psinfo->pr_gid = tswap16(psinfo->pr_gid);
  3481. psinfo->pr_pid = tswap32(psinfo->pr_pid);
  3482. psinfo->pr_ppid = tswap32(psinfo->pr_ppid);
  3483. psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp);
  3484. psinfo->pr_sid = tswap32(psinfo->pr_sid);
  3485. }
  3486. static void bswap_note(struct elf_note *en)
  3487. {
  3488. bswap32s(&en->n_namesz);
  3489. bswap32s(&en->n_descsz);
  3490. bswap32s(&en->n_type);
  3491. }
  3492. #else
  3493. static inline void bswap_prstatus(struct target_elf_prstatus *p) { }
  3494. static inline void bswap_psinfo(struct target_elf_prpsinfo *p) {}
  3495. static inline void bswap_note(struct elf_note *en) { }
  3496. #endif /* HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN */
  3497. /*
  3498. * Calculate file (dump) size of given memory region.
  3499. */
  3500. static size_t vma_dump_size(target_ulong start, target_ulong end,
  3501. unsigned long flags)
  3502. {
  3503. /* The area must be readable. */
  3504. if (!(flags & PAGE_READ)) {
  3505. return 0;
  3506. }
  3507. /*
  3508. * Usually we don't dump executable pages as they contain
  3509. * non-writable code that debugger can read directly from
  3510. * target library etc. If there is no elf header, we dump it.
  3511. */
  3512. if (!(flags & PAGE_WRITE_ORG) &&
  3513. (flags & PAGE_EXEC) &&
  3514. memcmp(g2h_untagged(start), ELFMAG, SELFMAG) == 0) {
  3515. return 0;
  3516. }
  3517. return end - start;
  3518. }
  3519. static size_t size_note(const char *name, size_t datasz)
  3520. {
  3521. size_t namesz = strlen(name) + 1;
  3522. namesz = ROUND_UP(namesz, 4);
  3523. datasz = ROUND_UP(datasz, 4);
  3524. return sizeof(struct elf_note) + namesz + datasz;
  3525. }
  3526. static void *fill_note(void **pptr, int type, const char *name, size_t datasz)
  3527. {
  3528. void *ptr = *pptr;
  3529. struct elf_note *n = ptr;
  3530. size_t namesz = strlen(name) + 1;
  3531. n->n_namesz = namesz;
  3532. n->n_descsz = datasz;
  3533. n->n_type = type;
  3534. bswap_note(n);
  3535. ptr += sizeof(*n);
  3536. memcpy(ptr, name, namesz);
  3537. namesz = ROUND_UP(namesz, 4);
  3538. datasz = ROUND_UP(datasz, 4);
  3539. *pptr = ptr + namesz + datasz;
  3540. return ptr + namesz;
  3541. }
  3542. static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine,
  3543. uint32_t flags)
  3544. {
  3545. memcpy(elf->e_ident, ELFMAG, SELFMAG);
  3546. elf->e_ident[EI_CLASS] = ELF_CLASS;
  3547. elf->e_ident[EI_DATA] = ELF_DATA;
  3548. elf->e_ident[EI_VERSION] = EV_CURRENT;
  3549. elf->e_ident[EI_OSABI] = ELF_OSABI;
  3550. elf->e_type = ET_CORE;
  3551. elf->e_machine = machine;
  3552. elf->e_version = EV_CURRENT;
  3553. elf->e_phoff = sizeof(struct elfhdr);
  3554. elf->e_flags = flags;
  3555. elf->e_ehsize = sizeof(struct elfhdr);
  3556. elf->e_phentsize = sizeof(struct elf_phdr);
  3557. elf->e_phnum = segs;
  3558. bswap_ehdr(elf);
  3559. }
  3560. static void fill_elf_note_phdr(struct elf_phdr *phdr, size_t sz, off_t offset)
  3561. {
  3562. phdr->p_type = PT_NOTE;
  3563. phdr->p_offset = offset;
  3564. phdr->p_filesz = sz;
  3565. bswap_phdr(phdr, 1);
  3566. }
  3567. static void fill_prstatus_note(void *data, CPUState *cpu, int signr)
  3568. {
  3569. /*
  3570. * Because note memory is only aligned to 4, and target_elf_prstatus
  3571. * may well have higher alignment requirements, fill locally and
  3572. * memcpy to the destination afterward.
  3573. */
  3574. struct target_elf_prstatus prstatus = {
  3575. .pr_info.si_signo = signr,
  3576. .pr_cursig = signr,
  3577. .pr_pid = get_task_state(cpu)->ts_tid,
  3578. .pr_ppid = getppid(),
  3579. .pr_pgrp = getpgrp(),
  3580. .pr_sid = getsid(0),
  3581. };
  3582. elf_core_copy_regs(&prstatus.pr_reg, cpu_env(cpu));
  3583. bswap_prstatus(&prstatus);
  3584. memcpy(data, &prstatus, sizeof(prstatus));
  3585. }
  3586. static void fill_prpsinfo_note(void *data, const TaskState *ts)
  3587. {
  3588. /*
  3589. * Because note memory is only aligned to 4, and target_elf_prpsinfo
  3590. * may well have higher alignment requirements, fill locally and
  3591. * memcpy to the destination afterward.
  3592. */
  3593. struct target_elf_prpsinfo psinfo = {
  3594. .pr_pid = getpid(),
  3595. .pr_ppid = getppid(),
  3596. .pr_pgrp = getpgrp(),
  3597. .pr_sid = getsid(0),
  3598. .pr_uid = getuid(),
  3599. .pr_gid = getgid(),
  3600. };
  3601. char *base_filename;
  3602. size_t len;
  3603. len = ts->info->env_strings - ts->info->arg_strings;
  3604. len = MIN(len, ELF_PRARGSZ);
  3605. memcpy(&psinfo.pr_psargs, g2h_untagged(ts->info->arg_strings), len);
  3606. for (size_t i = 0; i < len; i++) {
  3607. if (psinfo.pr_psargs[i] == 0) {
  3608. psinfo.pr_psargs[i] = ' ';
  3609. }
  3610. }
  3611. base_filename = g_path_get_basename(ts->bprm->filename);
  3612. /*
  3613. * Using strncpy here is fine: at max-length,
  3614. * this field is not NUL-terminated.
  3615. */
  3616. strncpy(psinfo.pr_fname, base_filename, sizeof(psinfo.pr_fname));
  3617. g_free(base_filename);
  3618. bswap_psinfo(&psinfo);
  3619. memcpy(data, &psinfo, sizeof(psinfo));
  3620. }
  3621. static void fill_auxv_note(void *data, const TaskState *ts)
  3622. {
  3623. memcpy(data, g2h_untagged(ts->info->saved_auxv), ts->info->auxv_len);
  3624. }
  3625. /*
  3626. * Constructs name of coredump file. We have following convention
  3627. * for the name:
  3628. * qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core
  3629. *
  3630. * Returns the filename
  3631. */
  3632. static char *core_dump_filename(const TaskState *ts)
  3633. {
  3634. g_autoptr(GDateTime) now = g_date_time_new_now_local();
  3635. g_autofree char *nowstr = g_date_time_format(now, "%Y%m%d-%H%M%S");
  3636. g_autofree char *base_filename = g_path_get_basename(ts->bprm->filename);
  3637. return g_strdup_printf("qemu_%s_%s_%d.core",
  3638. base_filename, nowstr, (int)getpid());
  3639. }
  3640. static int dump_write(int fd, const void *ptr, size_t size)
  3641. {
  3642. const char *bufp = (const char *)ptr;
  3643. ssize_t bytes_written, bytes_left;
  3644. bytes_written = 0;
  3645. bytes_left = size;
  3646. /*
  3647. * In normal conditions, single write(2) should do but
  3648. * in case of socket etc. this mechanism is more portable.
  3649. */
  3650. do {
  3651. bytes_written = write(fd, bufp, bytes_left);
  3652. if (bytes_written < 0) {
  3653. if (errno == EINTR)
  3654. continue;
  3655. return (-1);
  3656. } else if (bytes_written == 0) { /* eof */
  3657. return (-1);
  3658. }
  3659. bufp += bytes_written;
  3660. bytes_left -= bytes_written;
  3661. } while (bytes_left > 0);
  3662. return (0);
  3663. }
  3664. static int wmr_page_unprotect_regions(void *opaque, target_ulong start,
  3665. target_ulong end, unsigned long flags)
  3666. {
  3667. if ((flags & (PAGE_WRITE | PAGE_WRITE_ORG)) == PAGE_WRITE_ORG) {
  3668. size_t step = MAX(TARGET_PAGE_SIZE, qemu_real_host_page_size());
  3669. while (1) {
  3670. page_unprotect(start, 0);
  3671. if (end - start <= step) {
  3672. break;
  3673. }
  3674. start += step;
  3675. }
  3676. }
  3677. return 0;
  3678. }
  3679. typedef struct {
  3680. unsigned count;
  3681. size_t size;
  3682. } CountAndSizeRegions;
  3683. static int wmr_count_and_size_regions(void *opaque, target_ulong start,
  3684. target_ulong end, unsigned long flags)
  3685. {
  3686. CountAndSizeRegions *css = opaque;
  3687. css->count++;
  3688. css->size += vma_dump_size(start, end, flags);
  3689. return 0;
  3690. }
  3691. typedef struct {
  3692. struct elf_phdr *phdr;
  3693. off_t offset;
  3694. } FillRegionPhdr;
  3695. static int wmr_fill_region_phdr(void *opaque, target_ulong start,
  3696. target_ulong end, unsigned long flags)
  3697. {
  3698. FillRegionPhdr *d = opaque;
  3699. struct elf_phdr *phdr = d->phdr;
  3700. phdr->p_type = PT_LOAD;
  3701. phdr->p_vaddr = start;
  3702. phdr->p_paddr = 0;
  3703. phdr->p_filesz = vma_dump_size(start, end, flags);
  3704. phdr->p_offset = d->offset;
  3705. d->offset += phdr->p_filesz;
  3706. phdr->p_memsz = end - start;
  3707. phdr->p_flags = (flags & PAGE_READ ? PF_R : 0)
  3708. | (flags & PAGE_WRITE_ORG ? PF_W : 0)
  3709. | (flags & PAGE_EXEC ? PF_X : 0);
  3710. phdr->p_align = ELF_EXEC_PAGESIZE;
  3711. bswap_phdr(phdr, 1);
  3712. d->phdr = phdr + 1;
  3713. return 0;
  3714. }
  3715. static int wmr_write_region(void *opaque, target_ulong start,
  3716. target_ulong end, unsigned long flags)
  3717. {
  3718. int fd = *(int *)opaque;
  3719. size_t size = vma_dump_size(start, end, flags);
  3720. if (!size) {
  3721. return 0;
  3722. }
  3723. return dump_write(fd, g2h_untagged(start), size);
  3724. }
  3725. /*
  3726. * Write out ELF coredump.
  3727. *
  3728. * See documentation of ELF object file format in:
  3729. * http://www.caldera.com/developers/devspecs/gabi41.pdf
  3730. *
  3731. * Coredump format in linux is following:
  3732. *
  3733. * 0 +----------------------+ \
  3734. * | ELF header | ET_CORE |
  3735. * +----------------------+ |
  3736. * | ELF program headers | |--- headers
  3737. * | - NOTE section | |
  3738. * | - PT_LOAD sections | |
  3739. * +----------------------+ /
  3740. * | NOTEs: |
  3741. * | - NT_PRSTATUS |
  3742. * | - NT_PRSINFO |
  3743. * | - NT_AUXV |
  3744. * +----------------------+ <-- aligned to target page
  3745. * | Process memory dump |
  3746. * : :
  3747. * . .
  3748. * : :
  3749. * | |
  3750. * +----------------------+
  3751. *
  3752. * NT_PRSTATUS -> struct elf_prstatus (per thread)
  3753. * NT_PRSINFO -> struct elf_prpsinfo
  3754. * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()).
  3755. *
  3756. * Format follows System V format as close as possible. Current
  3757. * version limitations are as follows:
  3758. * - no floating point registers are dumped
  3759. *
  3760. * Function returns 0 in case of success, negative errno otherwise.
  3761. *
  3762. * TODO: make this work also during runtime: it should be
  3763. * possible to force coredump from running process and then
  3764. * continue processing. For example qemu could set up SIGUSR2
  3765. * handler (provided that target process haven't registered
  3766. * handler for that) that does the dump when signal is received.
  3767. */
  3768. static int elf_core_dump(int signr, const CPUArchState *env)
  3769. {
  3770. const CPUState *cpu = env_cpu_const(env);
  3771. const TaskState *ts = (const TaskState *)get_task_state((CPUState *)cpu);
  3772. struct rlimit dumpsize;
  3773. CountAndSizeRegions css;
  3774. off_t offset, note_offset, data_offset;
  3775. size_t note_size;
  3776. int cpus, ret;
  3777. int fd = -1;
  3778. CPUState *cpu_iter;
  3779. if (prctl(PR_GET_DUMPABLE) == 0) {
  3780. return 0;
  3781. }
  3782. if (getrlimit(RLIMIT_CORE, &dumpsize) < 0 || dumpsize.rlim_cur == 0) {
  3783. return 0;
  3784. }
  3785. cpu_list_lock();
  3786. mmap_lock();
  3787. /* By unprotecting, we merge vmas that might be split. */
  3788. walk_memory_regions(NULL, wmr_page_unprotect_regions);
  3789. /*
  3790. * Walk through target process memory mappings and
  3791. * set up structure containing this information.
  3792. */
  3793. memset(&css, 0, sizeof(css));
  3794. walk_memory_regions(&css, wmr_count_and_size_regions);
  3795. cpus = 0;
  3796. CPU_FOREACH(cpu_iter) {
  3797. cpus++;
  3798. }
  3799. offset = sizeof(struct elfhdr);
  3800. offset += (css.count + 1) * sizeof(struct elf_phdr);
  3801. note_offset = offset;
  3802. offset += size_note("CORE", ts->info->auxv_len);
  3803. offset += size_note("CORE", sizeof(struct target_elf_prpsinfo));
  3804. offset += size_note("CORE", sizeof(struct target_elf_prstatus)) * cpus;
  3805. note_size = offset - note_offset;
  3806. data_offset = ROUND_UP(offset, ELF_EXEC_PAGESIZE);
  3807. /* Do not dump if the corefile size exceeds the limit. */
  3808. if (dumpsize.rlim_cur != RLIM_INFINITY
  3809. && dumpsize.rlim_cur < data_offset + css.size) {
  3810. errno = 0;
  3811. goto out;
  3812. }
  3813. {
  3814. g_autofree char *corefile = core_dump_filename(ts);
  3815. fd = open(corefile, O_WRONLY | O_CREAT | O_TRUNC,
  3816. S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
  3817. }
  3818. if (fd < 0) {
  3819. goto out;
  3820. }
  3821. /*
  3822. * There is a fair amount of alignment padding within the notes
  3823. * as well as preceeding the process memory. Allocate a zeroed
  3824. * block to hold it all. Write all of the headers directly into
  3825. * this buffer and then write it out as a block.
  3826. */
  3827. {
  3828. g_autofree void *header = g_malloc0(data_offset);
  3829. FillRegionPhdr frp;
  3830. void *hptr, *dptr;
  3831. /* Create elf file header. */
  3832. hptr = header;
  3833. fill_elf_header(hptr, css.count + 1, ELF_MACHINE, 0);
  3834. hptr += sizeof(struct elfhdr);
  3835. /* Create elf program headers. */
  3836. fill_elf_note_phdr(hptr, note_size, note_offset);
  3837. hptr += sizeof(struct elf_phdr);
  3838. frp.phdr = hptr;
  3839. frp.offset = data_offset;
  3840. walk_memory_regions(&frp, wmr_fill_region_phdr);
  3841. hptr = frp.phdr;
  3842. /* Create the notes. */
  3843. dptr = fill_note(&hptr, NT_AUXV, "CORE", ts->info->auxv_len);
  3844. fill_auxv_note(dptr, ts);
  3845. dptr = fill_note(&hptr, NT_PRPSINFO, "CORE",
  3846. sizeof(struct target_elf_prpsinfo));
  3847. fill_prpsinfo_note(dptr, ts);
  3848. CPU_FOREACH(cpu_iter) {
  3849. dptr = fill_note(&hptr, NT_PRSTATUS, "CORE",
  3850. sizeof(struct target_elf_prstatus));
  3851. fill_prstatus_note(dptr, cpu_iter, cpu_iter == cpu ? signr : 0);
  3852. }
  3853. if (dump_write(fd, header, data_offset) < 0) {
  3854. goto out;
  3855. }
  3856. }
  3857. /*
  3858. * Finally write process memory into the corefile as well.
  3859. */
  3860. if (walk_memory_regions(&fd, wmr_write_region) < 0) {
  3861. goto out;
  3862. }
  3863. errno = 0;
  3864. out:
  3865. ret = -errno;
  3866. mmap_unlock();
  3867. cpu_list_unlock();
  3868. if (fd >= 0) {
  3869. close(fd);
  3870. }
  3871. return ret;
  3872. }
  3873. #endif /* USE_ELF_CORE_DUMP */
  3874. void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
  3875. {
  3876. init_thread(regs, infop);
  3877. }