exec.c 139 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748
  1. /*
  2. * virtual page mapping and translated block handling
  3. *
  4. * Copyright (c) 2003 Fabrice Bellard
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include "config.h"
  20. #ifdef _WIN32
  21. #include <windows.h>
  22. #else
  23. #include <sys/types.h>
  24. #include <sys/mman.h>
  25. #endif
  26. #include "qemu-common.h"
  27. #include "cpu.h"
  28. #include "tcg.h"
  29. #include "hw/hw.h"
  30. #include "hw/qdev.h"
  31. #include "osdep.h"
  32. #include "kvm.h"
  33. #include "hw/xen.h"
  34. #include "qemu-timer.h"
  35. #if defined(CONFIG_USER_ONLY)
  36. #include <qemu.h>
  37. #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
  38. #include <sys/param.h>
  39. #if __FreeBSD_version >= 700104
  40. #define HAVE_KINFO_GETVMMAP
  41. #define sigqueue sigqueue_freebsd /* avoid redefinition */
  42. #include <sys/time.h>
  43. #include <sys/proc.h>
  44. #include <machine/profile.h>
  45. #define _KERNEL
  46. #include <sys/user.h>
  47. #undef _KERNEL
  48. #undef sigqueue
  49. #include <libutil.h>
  50. #endif
  51. #endif
  52. #else /* !CONFIG_USER_ONLY */
  53. #include "xen-mapcache.h"
  54. #include "trace.h"
  55. #endif
  56. //#define DEBUG_TB_INVALIDATE
  57. //#define DEBUG_FLUSH
  58. //#define DEBUG_TLB
  59. //#define DEBUG_UNASSIGNED
  60. /* make various TB consistency checks */
  61. //#define DEBUG_TB_CHECK
  62. //#define DEBUG_TLB_CHECK
  63. //#define DEBUG_IOPORT
  64. //#define DEBUG_SUBPAGE
  65. #if !defined(CONFIG_USER_ONLY)
  66. /* TB consistency checks only implemented for usermode emulation. */
  67. #undef DEBUG_TB_CHECK
  68. #endif
  69. #define SMC_BITMAP_USE_THRESHOLD 10
  70. static TranslationBlock *tbs;
  71. static int code_gen_max_blocks;
  72. TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
  73. static int nb_tbs;
  74. /* any access to the tbs or the page table must use this lock */
  75. spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
  76. #if defined(__arm__) || defined(__sparc_v9__)
  77. /* The prologue must be reachable with a direct jump. ARM and Sparc64
  78. have limited branch ranges (possibly also PPC) so place it in a
  79. section close to code segment. */
  80. #define code_gen_section \
  81. __attribute__((__section__(".gen_code"))) \
  82. __attribute__((aligned (32)))
  83. #elif defined(_WIN32)
  84. /* Maximum alignment for Win32 is 16. */
  85. #define code_gen_section \
  86. __attribute__((aligned (16)))
  87. #else
  88. #define code_gen_section \
  89. __attribute__((aligned (32)))
  90. #endif
  91. uint8_t code_gen_prologue[1024] code_gen_section;
  92. static uint8_t *code_gen_buffer;
  93. static unsigned long code_gen_buffer_size;
  94. /* threshold to flush the translated code buffer */
  95. static unsigned long code_gen_buffer_max_size;
  96. static uint8_t *code_gen_ptr;
  97. #if !defined(CONFIG_USER_ONLY)
  98. int phys_ram_fd;
  99. static int in_migration;
  100. RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
  101. #endif
  102. CPUState *first_cpu;
  103. /* current CPU in the current thread. It is only valid inside
  104. cpu_exec() */
  105. CPUState *cpu_single_env;
  106. /* 0 = Do not count executed instructions.
  107. 1 = Precise instruction counting.
  108. 2 = Adaptive rate instruction counting. */
  109. int use_icount = 0;
  110. /* Current instruction counter. While executing translated code this may
  111. include some instructions that have not yet been executed. */
  112. int64_t qemu_icount;
  113. typedef struct PageDesc {
  114. /* list of TBs intersecting this ram page */
  115. TranslationBlock *first_tb;
  116. /* in order to optimize self modifying code, we count the number
  117. of lookups we do to a given page to use a bitmap */
  118. unsigned int code_write_count;
  119. uint8_t *code_bitmap;
  120. #if defined(CONFIG_USER_ONLY)
  121. unsigned long flags;
  122. #endif
  123. } PageDesc;
  124. /* In system mode we want L1_MAP to be based on ram offsets,
  125. while in user mode we want it to be based on virtual addresses. */
  126. #if !defined(CONFIG_USER_ONLY)
  127. #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
  128. # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
  129. #else
  130. # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
  131. #endif
  132. #else
  133. # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
  134. #endif
  135. /* Size of the L2 (and L3, etc) page tables. */
  136. #define L2_BITS 10
  137. #define L2_SIZE (1 << L2_BITS)
  138. /* The bits remaining after N lower levels of page tables. */
  139. #define P_L1_BITS_REM \
  140. ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
  141. #define V_L1_BITS_REM \
  142. ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
  143. /* Size of the L1 page table. Avoid silly small sizes. */
  144. #if P_L1_BITS_REM < 4
  145. #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
  146. #else
  147. #define P_L1_BITS P_L1_BITS_REM
  148. #endif
  149. #if V_L1_BITS_REM < 4
  150. #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
  151. #else
  152. #define V_L1_BITS V_L1_BITS_REM
  153. #endif
  154. #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
  155. #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
  156. #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
  157. #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
  158. unsigned long qemu_real_host_page_size;
  159. unsigned long qemu_host_page_bits;
  160. unsigned long qemu_host_page_size;
  161. unsigned long qemu_host_page_mask;
  162. /* This is a multi-level map on the virtual address space.
  163. The bottom level has pointers to PageDesc. */
  164. static void *l1_map[V_L1_SIZE];
  165. #if !defined(CONFIG_USER_ONLY)
  166. typedef struct PhysPageDesc {
  167. /* offset in host memory of the page + io_index in the low bits */
  168. ram_addr_t phys_offset;
  169. ram_addr_t region_offset;
  170. } PhysPageDesc;
  171. /* This is a multi-level map on the physical address space.
  172. The bottom level has pointers to PhysPageDesc. */
  173. static void *l1_phys_map[P_L1_SIZE];
  174. static void io_mem_init(void);
  175. /* io memory support */
  176. CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
  177. CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
  178. void *io_mem_opaque[IO_MEM_NB_ENTRIES];
  179. static char io_mem_used[IO_MEM_NB_ENTRIES];
  180. static int io_mem_watch;
  181. #endif
  182. /* log support */
  183. #ifdef WIN32
  184. static const char *logfilename = "qemu.log";
  185. #else
  186. static const char *logfilename = "/tmp/qemu.log";
  187. #endif
  188. FILE *logfile;
  189. int loglevel;
  190. static int log_append = 0;
  191. /* statistics */
  192. #if !defined(CONFIG_USER_ONLY)
  193. static int tlb_flush_count;
  194. #endif
  195. static int tb_flush_count;
  196. static int tb_phys_invalidate_count;
  197. #ifdef _WIN32
  198. static void map_exec(void *addr, long size)
  199. {
  200. DWORD old_protect;
  201. VirtualProtect(addr, size,
  202. PAGE_EXECUTE_READWRITE, &old_protect);
  203. }
  204. #else
  205. static void map_exec(void *addr, long size)
  206. {
  207. unsigned long start, end, page_size;
  208. page_size = getpagesize();
  209. start = (unsigned long)addr;
  210. start &= ~(page_size - 1);
  211. end = (unsigned long)addr + size;
  212. end += page_size - 1;
  213. end &= ~(page_size - 1);
  214. mprotect((void *)start, end - start,
  215. PROT_READ | PROT_WRITE | PROT_EXEC);
  216. }
  217. #endif
  218. static void page_init(void)
  219. {
  220. /* NOTE: we can always suppose that qemu_host_page_size >=
  221. TARGET_PAGE_SIZE */
  222. #ifdef _WIN32
  223. {
  224. SYSTEM_INFO system_info;
  225. GetSystemInfo(&system_info);
  226. qemu_real_host_page_size = system_info.dwPageSize;
  227. }
  228. #else
  229. qemu_real_host_page_size = getpagesize();
  230. #endif
  231. if (qemu_host_page_size == 0)
  232. qemu_host_page_size = qemu_real_host_page_size;
  233. if (qemu_host_page_size < TARGET_PAGE_SIZE)
  234. qemu_host_page_size = TARGET_PAGE_SIZE;
  235. qemu_host_page_bits = 0;
  236. while ((1 << qemu_host_page_bits) < qemu_host_page_size)
  237. qemu_host_page_bits++;
  238. qemu_host_page_mask = ~(qemu_host_page_size - 1);
  239. #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
  240. {
  241. #ifdef HAVE_KINFO_GETVMMAP
  242. struct kinfo_vmentry *freep;
  243. int i, cnt;
  244. freep = kinfo_getvmmap(getpid(), &cnt);
  245. if (freep) {
  246. mmap_lock();
  247. for (i = 0; i < cnt; i++) {
  248. unsigned long startaddr, endaddr;
  249. startaddr = freep[i].kve_start;
  250. endaddr = freep[i].kve_end;
  251. if (h2g_valid(startaddr)) {
  252. startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
  253. if (h2g_valid(endaddr)) {
  254. endaddr = h2g(endaddr);
  255. page_set_flags(startaddr, endaddr, PAGE_RESERVED);
  256. } else {
  257. #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
  258. endaddr = ~0ul;
  259. page_set_flags(startaddr, endaddr, PAGE_RESERVED);
  260. #endif
  261. }
  262. }
  263. }
  264. free(freep);
  265. mmap_unlock();
  266. }
  267. #else
  268. FILE *f;
  269. last_brk = (unsigned long)sbrk(0);
  270. f = fopen("/compat/linux/proc/self/maps", "r");
  271. if (f) {
  272. mmap_lock();
  273. do {
  274. unsigned long startaddr, endaddr;
  275. int n;
  276. n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
  277. if (n == 2 && h2g_valid(startaddr)) {
  278. startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
  279. if (h2g_valid(endaddr)) {
  280. endaddr = h2g(endaddr);
  281. } else {
  282. endaddr = ~0ul;
  283. }
  284. page_set_flags(startaddr, endaddr, PAGE_RESERVED);
  285. }
  286. } while (!feof(f));
  287. fclose(f);
  288. mmap_unlock();
  289. }
  290. #endif
  291. }
  292. #endif
  293. }
  294. static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
  295. {
  296. PageDesc *pd;
  297. void **lp;
  298. int i;
  299. #if defined(CONFIG_USER_ONLY)
  300. /* We can't use qemu_malloc because it may recurse into a locked mutex. */
  301. # define ALLOC(P, SIZE) \
  302. do { \
  303. P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
  304. MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
  305. } while (0)
  306. #else
  307. # define ALLOC(P, SIZE) \
  308. do { P = qemu_mallocz(SIZE); } while (0)
  309. #endif
  310. /* Level 1. Always allocated. */
  311. lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
  312. /* Level 2..N-1. */
  313. for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
  314. void **p = *lp;
  315. if (p == NULL) {
  316. if (!alloc) {
  317. return NULL;
  318. }
  319. ALLOC(p, sizeof(void *) * L2_SIZE);
  320. *lp = p;
  321. }
  322. lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
  323. }
  324. pd = *lp;
  325. if (pd == NULL) {
  326. if (!alloc) {
  327. return NULL;
  328. }
  329. ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
  330. *lp = pd;
  331. }
  332. #undef ALLOC
  333. return pd + (index & (L2_SIZE - 1));
  334. }
  335. static inline PageDesc *page_find(tb_page_addr_t index)
  336. {
  337. return page_find_alloc(index, 0);
  338. }
  339. #if !defined(CONFIG_USER_ONLY)
  340. static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
  341. {
  342. PhysPageDesc *pd;
  343. void **lp;
  344. int i;
  345. /* Level 1. Always allocated. */
  346. lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
  347. /* Level 2..N-1. */
  348. for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
  349. void **p = *lp;
  350. if (p == NULL) {
  351. if (!alloc) {
  352. return NULL;
  353. }
  354. *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
  355. }
  356. lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
  357. }
  358. pd = *lp;
  359. if (pd == NULL) {
  360. int i;
  361. if (!alloc) {
  362. return NULL;
  363. }
  364. *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
  365. for (i = 0; i < L2_SIZE; i++) {
  366. pd[i].phys_offset = IO_MEM_UNASSIGNED;
  367. pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
  368. }
  369. }
  370. return pd + (index & (L2_SIZE - 1));
  371. }
  372. static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
  373. {
  374. return phys_page_find_alloc(index, 0);
  375. }
  376. static void tlb_protect_code(ram_addr_t ram_addr);
  377. static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
  378. target_ulong vaddr);
  379. #define mmap_lock() do { } while(0)
  380. #define mmap_unlock() do { } while(0)
  381. #endif
  382. #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
  383. #if defined(CONFIG_USER_ONLY)
  384. /* Currently it is not recommended to allocate big chunks of data in
  385. user mode. It will change when a dedicated libc will be used */
  386. #define USE_STATIC_CODE_GEN_BUFFER
  387. #endif
  388. #ifdef USE_STATIC_CODE_GEN_BUFFER
  389. static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
  390. __attribute__((aligned (CODE_GEN_ALIGN)));
  391. #endif
  392. static void code_gen_alloc(unsigned long tb_size)
  393. {
  394. #ifdef USE_STATIC_CODE_GEN_BUFFER
  395. code_gen_buffer = static_code_gen_buffer;
  396. code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
  397. map_exec(code_gen_buffer, code_gen_buffer_size);
  398. #else
  399. code_gen_buffer_size = tb_size;
  400. if (code_gen_buffer_size == 0) {
  401. #if defined(CONFIG_USER_ONLY)
  402. /* in user mode, phys_ram_size is not meaningful */
  403. code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
  404. #else
  405. /* XXX: needs adjustments */
  406. code_gen_buffer_size = (unsigned long)(ram_size / 4);
  407. #endif
  408. }
  409. if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
  410. code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
  411. /* The code gen buffer location may have constraints depending on
  412. the host cpu and OS */
  413. #if defined(__linux__)
  414. {
  415. int flags;
  416. void *start = NULL;
  417. flags = MAP_PRIVATE | MAP_ANONYMOUS;
  418. #if defined(__x86_64__)
  419. flags |= MAP_32BIT;
  420. /* Cannot map more than that */
  421. if (code_gen_buffer_size > (800 * 1024 * 1024))
  422. code_gen_buffer_size = (800 * 1024 * 1024);
  423. #elif defined(__sparc_v9__)
  424. // Map the buffer below 2G, so we can use direct calls and branches
  425. flags |= MAP_FIXED;
  426. start = (void *) 0x60000000UL;
  427. if (code_gen_buffer_size > (512 * 1024 * 1024))
  428. code_gen_buffer_size = (512 * 1024 * 1024);
  429. #elif defined(__arm__)
  430. /* Map the buffer below 32M, so we can use direct calls and branches */
  431. flags |= MAP_FIXED;
  432. start = (void *) 0x01000000UL;
  433. if (code_gen_buffer_size > 16 * 1024 * 1024)
  434. code_gen_buffer_size = 16 * 1024 * 1024;
  435. #elif defined(__s390x__)
  436. /* Map the buffer so that we can use direct calls and branches. */
  437. /* We have a +- 4GB range on the branches; leave some slop. */
  438. if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
  439. code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
  440. }
  441. start = (void *)0x90000000UL;
  442. #endif
  443. code_gen_buffer = mmap(start, code_gen_buffer_size,
  444. PROT_WRITE | PROT_READ | PROT_EXEC,
  445. flags, -1, 0);
  446. if (code_gen_buffer == MAP_FAILED) {
  447. fprintf(stderr, "Could not allocate dynamic translator buffer\n");
  448. exit(1);
  449. }
  450. }
  451. #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
  452. || defined(__DragonFly__) || defined(__OpenBSD__)
  453. {
  454. int flags;
  455. void *addr = NULL;
  456. flags = MAP_PRIVATE | MAP_ANONYMOUS;
  457. #if defined(__x86_64__)
  458. /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
  459. * 0x40000000 is free */
  460. flags |= MAP_FIXED;
  461. addr = (void *)0x40000000;
  462. /* Cannot map more than that */
  463. if (code_gen_buffer_size > (800 * 1024 * 1024))
  464. code_gen_buffer_size = (800 * 1024 * 1024);
  465. #elif defined(__sparc_v9__)
  466. // Map the buffer below 2G, so we can use direct calls and branches
  467. flags |= MAP_FIXED;
  468. addr = (void *) 0x60000000UL;
  469. if (code_gen_buffer_size > (512 * 1024 * 1024)) {
  470. code_gen_buffer_size = (512 * 1024 * 1024);
  471. }
  472. #endif
  473. code_gen_buffer = mmap(addr, code_gen_buffer_size,
  474. PROT_WRITE | PROT_READ | PROT_EXEC,
  475. flags, -1, 0);
  476. if (code_gen_buffer == MAP_FAILED) {
  477. fprintf(stderr, "Could not allocate dynamic translator buffer\n");
  478. exit(1);
  479. }
  480. }
  481. #else
  482. code_gen_buffer = qemu_malloc(code_gen_buffer_size);
  483. map_exec(code_gen_buffer, code_gen_buffer_size);
  484. #endif
  485. #endif /* !USE_STATIC_CODE_GEN_BUFFER */
  486. map_exec(code_gen_prologue, sizeof(code_gen_prologue));
  487. code_gen_buffer_max_size = code_gen_buffer_size -
  488. (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
  489. code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
  490. tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
  491. }
  492. /* Must be called before using the QEMU cpus. 'tb_size' is the size
  493. (in bytes) allocated to the translation buffer. Zero means default
  494. size. */
  495. void cpu_exec_init_all(unsigned long tb_size)
  496. {
  497. cpu_gen_init();
  498. code_gen_alloc(tb_size);
  499. code_gen_ptr = code_gen_buffer;
  500. page_init();
  501. #if !defined(CONFIG_USER_ONLY)
  502. io_mem_init();
  503. #endif
  504. #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
  505. /* There's no guest base to take into account, so go ahead and
  506. initialize the prologue now. */
  507. tcg_prologue_init(&tcg_ctx);
  508. #endif
  509. }
  510. #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
  511. static int cpu_common_post_load(void *opaque, int version_id)
  512. {
  513. CPUState *env = opaque;
  514. /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
  515. version_id is increased. */
  516. env->interrupt_request &= ~0x01;
  517. tlb_flush(env, 1);
  518. return 0;
  519. }
  520. static const VMStateDescription vmstate_cpu_common = {
  521. .name = "cpu_common",
  522. .version_id = 1,
  523. .minimum_version_id = 1,
  524. .minimum_version_id_old = 1,
  525. .post_load = cpu_common_post_load,
  526. .fields = (VMStateField []) {
  527. VMSTATE_UINT32(halted, CPUState),
  528. VMSTATE_UINT32(interrupt_request, CPUState),
  529. VMSTATE_END_OF_LIST()
  530. }
  531. };
  532. #endif
  533. CPUState *qemu_get_cpu(int cpu)
  534. {
  535. CPUState *env = first_cpu;
  536. while (env) {
  537. if (env->cpu_index == cpu)
  538. break;
  539. env = env->next_cpu;
  540. }
  541. return env;
  542. }
  543. void cpu_exec_init(CPUState *env)
  544. {
  545. CPUState **penv;
  546. int cpu_index;
  547. #if defined(CONFIG_USER_ONLY)
  548. cpu_list_lock();
  549. #endif
  550. env->next_cpu = NULL;
  551. penv = &first_cpu;
  552. cpu_index = 0;
  553. while (*penv != NULL) {
  554. penv = &(*penv)->next_cpu;
  555. cpu_index++;
  556. }
  557. env->cpu_index = cpu_index;
  558. env->numa_node = 0;
  559. QTAILQ_INIT(&env->breakpoints);
  560. QTAILQ_INIT(&env->watchpoints);
  561. #ifndef CONFIG_USER_ONLY
  562. env->thread_id = qemu_get_thread_id();
  563. #endif
  564. *penv = env;
  565. #if defined(CONFIG_USER_ONLY)
  566. cpu_list_unlock();
  567. #endif
  568. #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
  569. vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
  570. register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
  571. cpu_save, cpu_load, env);
  572. #endif
  573. }
  574. /* Allocate a new translation block. Flush the translation buffer if
  575. too many translation blocks or too much generated code. */
  576. static TranslationBlock *tb_alloc(target_ulong pc)
  577. {
  578. TranslationBlock *tb;
  579. if (nb_tbs >= code_gen_max_blocks ||
  580. (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
  581. return NULL;
  582. tb = &tbs[nb_tbs++];
  583. tb->pc = pc;
  584. tb->cflags = 0;
  585. return tb;
  586. }
  587. void tb_free(TranslationBlock *tb)
  588. {
  589. /* In practice this is mostly used for single use temporary TB
  590. Ignore the hard cases and just back up if this TB happens to
  591. be the last one generated. */
  592. if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
  593. code_gen_ptr = tb->tc_ptr;
  594. nb_tbs--;
  595. }
  596. }
  597. static inline void invalidate_page_bitmap(PageDesc *p)
  598. {
  599. if (p->code_bitmap) {
  600. qemu_free(p->code_bitmap);
  601. p->code_bitmap = NULL;
  602. }
  603. p->code_write_count = 0;
  604. }
  605. /* Set to NULL all the 'first_tb' fields in all PageDescs. */
  606. static void page_flush_tb_1 (int level, void **lp)
  607. {
  608. int i;
  609. if (*lp == NULL) {
  610. return;
  611. }
  612. if (level == 0) {
  613. PageDesc *pd = *lp;
  614. for (i = 0; i < L2_SIZE; ++i) {
  615. pd[i].first_tb = NULL;
  616. invalidate_page_bitmap(pd + i);
  617. }
  618. } else {
  619. void **pp = *lp;
  620. for (i = 0; i < L2_SIZE; ++i) {
  621. page_flush_tb_1 (level - 1, pp + i);
  622. }
  623. }
  624. }
  625. static void page_flush_tb(void)
  626. {
  627. int i;
  628. for (i = 0; i < V_L1_SIZE; i++) {
  629. page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
  630. }
  631. }
  632. /* flush all the translation blocks */
  633. /* XXX: tb_flush is currently not thread safe */
  634. void tb_flush(CPUState *env1)
  635. {
  636. CPUState *env;
  637. #if defined(DEBUG_FLUSH)
  638. printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
  639. (unsigned long)(code_gen_ptr - code_gen_buffer),
  640. nb_tbs, nb_tbs > 0 ?
  641. ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
  642. #endif
  643. if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
  644. cpu_abort(env1, "Internal error: code buffer overflow\n");
  645. nb_tbs = 0;
  646. for(env = first_cpu; env != NULL; env = env->next_cpu) {
  647. memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
  648. }
  649. memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
  650. page_flush_tb();
  651. code_gen_ptr = code_gen_buffer;
  652. /* XXX: flush processor icache at this point if cache flush is
  653. expensive */
  654. tb_flush_count++;
  655. }
  656. #ifdef DEBUG_TB_CHECK
  657. static void tb_invalidate_check(target_ulong address)
  658. {
  659. TranslationBlock *tb;
  660. int i;
  661. address &= TARGET_PAGE_MASK;
  662. for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
  663. for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
  664. if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
  665. address >= tb->pc + tb->size)) {
  666. printf("ERROR invalidate: address=" TARGET_FMT_lx
  667. " PC=%08lx size=%04x\n",
  668. address, (long)tb->pc, tb->size);
  669. }
  670. }
  671. }
  672. }
  673. /* verify that all the pages have correct rights for code */
  674. static void tb_page_check(void)
  675. {
  676. TranslationBlock *tb;
  677. int i, flags1, flags2;
  678. for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
  679. for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
  680. flags1 = page_get_flags(tb->pc);
  681. flags2 = page_get_flags(tb->pc + tb->size - 1);
  682. if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
  683. printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
  684. (long)tb->pc, tb->size, flags1, flags2);
  685. }
  686. }
  687. }
  688. }
  689. #endif
  690. /* invalidate one TB */
  691. static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
  692. int next_offset)
  693. {
  694. TranslationBlock *tb1;
  695. for(;;) {
  696. tb1 = *ptb;
  697. if (tb1 == tb) {
  698. *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
  699. break;
  700. }
  701. ptb = (TranslationBlock **)((char *)tb1 + next_offset);
  702. }
  703. }
  704. static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
  705. {
  706. TranslationBlock *tb1;
  707. unsigned int n1;
  708. for(;;) {
  709. tb1 = *ptb;
  710. n1 = (long)tb1 & 3;
  711. tb1 = (TranslationBlock *)((long)tb1 & ~3);
  712. if (tb1 == tb) {
  713. *ptb = tb1->page_next[n1];
  714. break;
  715. }
  716. ptb = &tb1->page_next[n1];
  717. }
  718. }
  719. static inline void tb_jmp_remove(TranslationBlock *tb, int n)
  720. {
  721. TranslationBlock *tb1, **ptb;
  722. unsigned int n1;
  723. ptb = &tb->jmp_next[n];
  724. tb1 = *ptb;
  725. if (tb1) {
  726. /* find tb(n) in circular list */
  727. for(;;) {
  728. tb1 = *ptb;
  729. n1 = (long)tb1 & 3;
  730. tb1 = (TranslationBlock *)((long)tb1 & ~3);
  731. if (n1 == n && tb1 == tb)
  732. break;
  733. if (n1 == 2) {
  734. ptb = &tb1->jmp_first;
  735. } else {
  736. ptb = &tb1->jmp_next[n1];
  737. }
  738. }
  739. /* now we can suppress tb(n) from the list */
  740. *ptb = tb->jmp_next[n];
  741. tb->jmp_next[n] = NULL;
  742. }
  743. }
  744. /* reset the jump entry 'n' of a TB so that it is not chained to
  745. another TB */
  746. static inline void tb_reset_jump(TranslationBlock *tb, int n)
  747. {
  748. tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
  749. }
  750. void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
  751. {
  752. CPUState *env;
  753. PageDesc *p;
  754. unsigned int h, n1;
  755. tb_page_addr_t phys_pc;
  756. TranslationBlock *tb1, *tb2;
  757. /* remove the TB from the hash list */
  758. phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
  759. h = tb_phys_hash_func(phys_pc);
  760. tb_remove(&tb_phys_hash[h], tb,
  761. offsetof(TranslationBlock, phys_hash_next));
  762. /* remove the TB from the page list */
  763. if (tb->page_addr[0] != page_addr) {
  764. p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
  765. tb_page_remove(&p->first_tb, tb);
  766. invalidate_page_bitmap(p);
  767. }
  768. if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
  769. p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
  770. tb_page_remove(&p->first_tb, tb);
  771. invalidate_page_bitmap(p);
  772. }
  773. tb_invalidated_flag = 1;
  774. /* remove the TB from the hash list */
  775. h = tb_jmp_cache_hash_func(tb->pc);
  776. for(env = first_cpu; env != NULL; env = env->next_cpu) {
  777. if (env->tb_jmp_cache[h] == tb)
  778. env->tb_jmp_cache[h] = NULL;
  779. }
  780. /* suppress this TB from the two jump lists */
  781. tb_jmp_remove(tb, 0);
  782. tb_jmp_remove(tb, 1);
  783. /* suppress any remaining jumps to this TB */
  784. tb1 = tb->jmp_first;
  785. for(;;) {
  786. n1 = (long)tb1 & 3;
  787. if (n1 == 2)
  788. break;
  789. tb1 = (TranslationBlock *)((long)tb1 & ~3);
  790. tb2 = tb1->jmp_next[n1];
  791. tb_reset_jump(tb1, n1);
  792. tb1->jmp_next[n1] = NULL;
  793. tb1 = tb2;
  794. }
  795. tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
  796. tb_phys_invalidate_count++;
  797. }
  798. static inline void set_bits(uint8_t *tab, int start, int len)
  799. {
  800. int end, mask, end1;
  801. end = start + len;
  802. tab += start >> 3;
  803. mask = 0xff << (start & 7);
  804. if ((start & ~7) == (end & ~7)) {
  805. if (start < end) {
  806. mask &= ~(0xff << (end & 7));
  807. *tab |= mask;
  808. }
  809. } else {
  810. *tab++ |= mask;
  811. start = (start + 8) & ~7;
  812. end1 = end & ~7;
  813. while (start < end1) {
  814. *tab++ = 0xff;
  815. start += 8;
  816. }
  817. if (start < end) {
  818. mask = ~(0xff << (end & 7));
  819. *tab |= mask;
  820. }
  821. }
  822. }
  823. static void build_page_bitmap(PageDesc *p)
  824. {
  825. int n, tb_start, tb_end;
  826. TranslationBlock *tb;
  827. p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
  828. tb = p->first_tb;
  829. while (tb != NULL) {
  830. n = (long)tb & 3;
  831. tb = (TranslationBlock *)((long)tb & ~3);
  832. /* NOTE: this is subtle as a TB may span two physical pages */
  833. if (n == 0) {
  834. /* NOTE: tb_end may be after the end of the page, but
  835. it is not a problem */
  836. tb_start = tb->pc & ~TARGET_PAGE_MASK;
  837. tb_end = tb_start + tb->size;
  838. if (tb_end > TARGET_PAGE_SIZE)
  839. tb_end = TARGET_PAGE_SIZE;
  840. } else {
  841. tb_start = 0;
  842. tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
  843. }
  844. set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
  845. tb = tb->page_next[n];
  846. }
  847. }
  848. TranslationBlock *tb_gen_code(CPUState *env,
  849. target_ulong pc, target_ulong cs_base,
  850. int flags, int cflags)
  851. {
  852. TranslationBlock *tb;
  853. uint8_t *tc_ptr;
  854. tb_page_addr_t phys_pc, phys_page2;
  855. target_ulong virt_page2;
  856. int code_gen_size;
  857. phys_pc = get_page_addr_code(env, pc);
  858. tb = tb_alloc(pc);
  859. if (!tb) {
  860. /* flush must be done */
  861. tb_flush(env);
  862. /* cannot fail at this point */
  863. tb = tb_alloc(pc);
  864. /* Don't forget to invalidate previous TB info. */
  865. tb_invalidated_flag = 1;
  866. }
  867. tc_ptr = code_gen_ptr;
  868. tb->tc_ptr = tc_ptr;
  869. tb->cs_base = cs_base;
  870. tb->flags = flags;
  871. tb->cflags = cflags;
  872. cpu_gen_code(env, tb, &code_gen_size);
  873. code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
  874. /* check next page if needed */
  875. virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
  876. phys_page2 = -1;
  877. if ((pc & TARGET_PAGE_MASK) != virt_page2) {
  878. phys_page2 = get_page_addr_code(env, virt_page2);
  879. }
  880. tb_link_page(tb, phys_pc, phys_page2);
  881. return tb;
  882. }
  883. /* invalidate all TBs which intersect with the target physical page
  884. starting in range [start;end[. NOTE: start and end must refer to
  885. the same physical page. 'is_cpu_write_access' should be true if called
  886. from a real cpu write access: the virtual CPU will exit the current
  887. TB if code is modified inside this TB. */
  888. void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
  889. int is_cpu_write_access)
  890. {
  891. TranslationBlock *tb, *tb_next, *saved_tb;
  892. CPUState *env = cpu_single_env;
  893. tb_page_addr_t tb_start, tb_end;
  894. PageDesc *p;
  895. int n;
  896. #ifdef TARGET_HAS_PRECISE_SMC
  897. int current_tb_not_found = is_cpu_write_access;
  898. TranslationBlock *current_tb = NULL;
  899. int current_tb_modified = 0;
  900. target_ulong current_pc = 0;
  901. target_ulong current_cs_base = 0;
  902. int current_flags = 0;
  903. #endif /* TARGET_HAS_PRECISE_SMC */
  904. p = page_find(start >> TARGET_PAGE_BITS);
  905. if (!p)
  906. return;
  907. if (!p->code_bitmap &&
  908. ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
  909. is_cpu_write_access) {
  910. /* build code bitmap */
  911. build_page_bitmap(p);
  912. }
  913. /* we remove all the TBs in the range [start, end[ */
  914. /* XXX: see if in some cases it could be faster to invalidate all the code */
  915. tb = p->first_tb;
  916. while (tb != NULL) {
  917. n = (long)tb & 3;
  918. tb = (TranslationBlock *)((long)tb & ~3);
  919. tb_next = tb->page_next[n];
  920. /* NOTE: this is subtle as a TB may span two physical pages */
  921. if (n == 0) {
  922. /* NOTE: tb_end may be after the end of the page, but
  923. it is not a problem */
  924. tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
  925. tb_end = tb_start + tb->size;
  926. } else {
  927. tb_start = tb->page_addr[1];
  928. tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
  929. }
  930. if (!(tb_end <= start || tb_start >= end)) {
  931. #ifdef TARGET_HAS_PRECISE_SMC
  932. if (current_tb_not_found) {
  933. current_tb_not_found = 0;
  934. current_tb = NULL;
  935. if (env->mem_io_pc) {
  936. /* now we have a real cpu fault */
  937. current_tb = tb_find_pc(env->mem_io_pc);
  938. }
  939. }
  940. if (current_tb == tb &&
  941. (current_tb->cflags & CF_COUNT_MASK) != 1) {
  942. /* If we are modifying the current TB, we must stop
  943. its execution. We could be more precise by checking
  944. that the modification is after the current PC, but it
  945. would require a specialized function to partially
  946. restore the CPU state */
  947. current_tb_modified = 1;
  948. cpu_restore_state(current_tb, env, env->mem_io_pc);
  949. cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
  950. &current_flags);
  951. }
  952. #endif /* TARGET_HAS_PRECISE_SMC */
  953. /* we need to do that to handle the case where a signal
  954. occurs while doing tb_phys_invalidate() */
  955. saved_tb = NULL;
  956. if (env) {
  957. saved_tb = env->current_tb;
  958. env->current_tb = NULL;
  959. }
  960. tb_phys_invalidate(tb, -1);
  961. if (env) {
  962. env->current_tb = saved_tb;
  963. if (env->interrupt_request && env->current_tb)
  964. cpu_interrupt(env, env->interrupt_request);
  965. }
  966. }
  967. tb = tb_next;
  968. }
  969. #if !defined(CONFIG_USER_ONLY)
  970. /* if no code remaining, no need to continue to use slow writes */
  971. if (!p->first_tb) {
  972. invalidate_page_bitmap(p);
  973. if (is_cpu_write_access) {
  974. tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
  975. }
  976. }
  977. #endif
  978. #ifdef TARGET_HAS_PRECISE_SMC
  979. if (current_tb_modified) {
  980. /* we generate a block containing just the instruction
  981. modifying the memory. It will ensure that it cannot modify
  982. itself */
  983. env->current_tb = NULL;
  984. tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
  985. cpu_resume_from_signal(env, NULL);
  986. }
  987. #endif
  988. }
  989. /* len must be <= 8 and start must be a multiple of len */
  990. static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
  991. {
  992. PageDesc *p;
  993. int offset, b;
  994. #if 0
  995. if (1) {
  996. qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
  997. cpu_single_env->mem_io_vaddr, len,
  998. cpu_single_env->eip,
  999. cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
  1000. }
  1001. #endif
  1002. p = page_find(start >> TARGET_PAGE_BITS);
  1003. if (!p)
  1004. return;
  1005. if (p->code_bitmap) {
  1006. offset = start & ~TARGET_PAGE_MASK;
  1007. b = p->code_bitmap[offset >> 3] >> (offset & 7);
  1008. if (b & ((1 << len) - 1))
  1009. goto do_invalidate;
  1010. } else {
  1011. do_invalidate:
  1012. tb_invalidate_phys_page_range(start, start + len, 1);
  1013. }
  1014. }
  1015. #if !defined(CONFIG_SOFTMMU)
  1016. static void tb_invalidate_phys_page(tb_page_addr_t addr,
  1017. unsigned long pc, void *puc)
  1018. {
  1019. TranslationBlock *tb;
  1020. PageDesc *p;
  1021. int n;
  1022. #ifdef TARGET_HAS_PRECISE_SMC
  1023. TranslationBlock *current_tb = NULL;
  1024. CPUState *env = cpu_single_env;
  1025. int current_tb_modified = 0;
  1026. target_ulong current_pc = 0;
  1027. target_ulong current_cs_base = 0;
  1028. int current_flags = 0;
  1029. #endif
  1030. addr &= TARGET_PAGE_MASK;
  1031. p = page_find(addr >> TARGET_PAGE_BITS);
  1032. if (!p)
  1033. return;
  1034. tb = p->first_tb;
  1035. #ifdef TARGET_HAS_PRECISE_SMC
  1036. if (tb && pc != 0) {
  1037. current_tb = tb_find_pc(pc);
  1038. }
  1039. #endif
  1040. while (tb != NULL) {
  1041. n = (long)tb & 3;
  1042. tb = (TranslationBlock *)((long)tb & ~3);
  1043. #ifdef TARGET_HAS_PRECISE_SMC
  1044. if (current_tb == tb &&
  1045. (current_tb->cflags & CF_COUNT_MASK) != 1) {
  1046. /* If we are modifying the current TB, we must stop
  1047. its execution. We could be more precise by checking
  1048. that the modification is after the current PC, but it
  1049. would require a specialized function to partially
  1050. restore the CPU state */
  1051. current_tb_modified = 1;
  1052. cpu_restore_state(current_tb, env, pc);
  1053. cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
  1054. &current_flags);
  1055. }
  1056. #endif /* TARGET_HAS_PRECISE_SMC */
  1057. tb_phys_invalidate(tb, addr);
  1058. tb = tb->page_next[n];
  1059. }
  1060. p->first_tb = NULL;
  1061. #ifdef TARGET_HAS_PRECISE_SMC
  1062. if (current_tb_modified) {
  1063. /* we generate a block containing just the instruction
  1064. modifying the memory. It will ensure that it cannot modify
  1065. itself */
  1066. env->current_tb = NULL;
  1067. tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
  1068. cpu_resume_from_signal(env, puc);
  1069. }
  1070. #endif
  1071. }
  1072. #endif
  1073. /* add the tb in the target page and protect it if necessary */
  1074. static inline void tb_alloc_page(TranslationBlock *tb,
  1075. unsigned int n, tb_page_addr_t page_addr)
  1076. {
  1077. PageDesc *p;
  1078. #ifndef CONFIG_USER_ONLY
  1079. bool page_already_protected;
  1080. #endif
  1081. tb->page_addr[n] = page_addr;
  1082. p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
  1083. tb->page_next[n] = p->first_tb;
  1084. #ifndef CONFIG_USER_ONLY
  1085. page_already_protected = p->first_tb != NULL;
  1086. #endif
  1087. p->first_tb = (TranslationBlock *)((long)tb | n);
  1088. invalidate_page_bitmap(p);
  1089. #if defined(TARGET_HAS_SMC) || 1
  1090. #if defined(CONFIG_USER_ONLY)
  1091. if (p->flags & PAGE_WRITE) {
  1092. target_ulong addr;
  1093. PageDesc *p2;
  1094. int prot;
  1095. /* force the host page as non writable (writes will have a
  1096. page fault + mprotect overhead) */
  1097. page_addr &= qemu_host_page_mask;
  1098. prot = 0;
  1099. for(addr = page_addr; addr < page_addr + qemu_host_page_size;
  1100. addr += TARGET_PAGE_SIZE) {
  1101. p2 = page_find (addr >> TARGET_PAGE_BITS);
  1102. if (!p2)
  1103. continue;
  1104. prot |= p2->flags;
  1105. p2->flags &= ~PAGE_WRITE;
  1106. }
  1107. mprotect(g2h(page_addr), qemu_host_page_size,
  1108. (prot & PAGE_BITS) & ~PAGE_WRITE);
  1109. #ifdef DEBUG_TB_INVALIDATE
  1110. printf("protecting code page: 0x" TARGET_FMT_lx "\n",
  1111. page_addr);
  1112. #endif
  1113. }
  1114. #else
  1115. /* if some code is already present, then the pages are already
  1116. protected. So we handle the case where only the first TB is
  1117. allocated in a physical page */
  1118. if (!page_already_protected) {
  1119. tlb_protect_code(page_addr);
  1120. }
  1121. #endif
  1122. #endif /* TARGET_HAS_SMC */
  1123. }
  1124. /* add a new TB and link it to the physical page tables. phys_page2 is
  1125. (-1) to indicate that only one page contains the TB. */
  1126. void tb_link_page(TranslationBlock *tb,
  1127. tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
  1128. {
  1129. unsigned int h;
  1130. TranslationBlock **ptb;
  1131. /* Grab the mmap lock to stop another thread invalidating this TB
  1132. before we are done. */
  1133. mmap_lock();
  1134. /* add in the physical hash table */
  1135. h = tb_phys_hash_func(phys_pc);
  1136. ptb = &tb_phys_hash[h];
  1137. tb->phys_hash_next = *ptb;
  1138. *ptb = tb;
  1139. /* add in the page list */
  1140. tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
  1141. if (phys_page2 != -1)
  1142. tb_alloc_page(tb, 1, phys_page2);
  1143. else
  1144. tb->page_addr[1] = -1;
  1145. tb->jmp_first = (TranslationBlock *)((long)tb | 2);
  1146. tb->jmp_next[0] = NULL;
  1147. tb->jmp_next[1] = NULL;
  1148. /* init original jump addresses */
  1149. if (tb->tb_next_offset[0] != 0xffff)
  1150. tb_reset_jump(tb, 0);
  1151. if (tb->tb_next_offset[1] != 0xffff)
  1152. tb_reset_jump(tb, 1);
  1153. #ifdef DEBUG_TB_CHECK
  1154. tb_page_check();
  1155. #endif
  1156. mmap_unlock();
  1157. }
  1158. /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
  1159. tb[1].tc_ptr. Return NULL if not found */
  1160. TranslationBlock *tb_find_pc(unsigned long tc_ptr)
  1161. {
  1162. int m_min, m_max, m;
  1163. unsigned long v;
  1164. TranslationBlock *tb;
  1165. if (nb_tbs <= 0)
  1166. return NULL;
  1167. if (tc_ptr < (unsigned long)code_gen_buffer ||
  1168. tc_ptr >= (unsigned long)code_gen_ptr)
  1169. return NULL;
  1170. /* binary search (cf Knuth) */
  1171. m_min = 0;
  1172. m_max = nb_tbs - 1;
  1173. while (m_min <= m_max) {
  1174. m = (m_min + m_max) >> 1;
  1175. tb = &tbs[m];
  1176. v = (unsigned long)tb->tc_ptr;
  1177. if (v == tc_ptr)
  1178. return tb;
  1179. else if (tc_ptr < v) {
  1180. m_max = m - 1;
  1181. } else {
  1182. m_min = m + 1;
  1183. }
  1184. }
  1185. return &tbs[m_max];
  1186. }
  1187. static void tb_reset_jump_recursive(TranslationBlock *tb);
  1188. static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
  1189. {
  1190. TranslationBlock *tb1, *tb_next, **ptb;
  1191. unsigned int n1;
  1192. tb1 = tb->jmp_next[n];
  1193. if (tb1 != NULL) {
  1194. /* find head of list */
  1195. for(;;) {
  1196. n1 = (long)tb1 & 3;
  1197. tb1 = (TranslationBlock *)((long)tb1 & ~3);
  1198. if (n1 == 2)
  1199. break;
  1200. tb1 = tb1->jmp_next[n1];
  1201. }
  1202. /* we are now sure now that tb jumps to tb1 */
  1203. tb_next = tb1;
  1204. /* remove tb from the jmp_first list */
  1205. ptb = &tb_next->jmp_first;
  1206. for(;;) {
  1207. tb1 = *ptb;
  1208. n1 = (long)tb1 & 3;
  1209. tb1 = (TranslationBlock *)((long)tb1 & ~3);
  1210. if (n1 == n && tb1 == tb)
  1211. break;
  1212. ptb = &tb1->jmp_next[n1];
  1213. }
  1214. *ptb = tb->jmp_next[n];
  1215. tb->jmp_next[n] = NULL;
  1216. /* suppress the jump to next tb in generated code */
  1217. tb_reset_jump(tb, n);
  1218. /* suppress jumps in the tb on which we could have jumped */
  1219. tb_reset_jump_recursive(tb_next);
  1220. }
  1221. }
  1222. static void tb_reset_jump_recursive(TranslationBlock *tb)
  1223. {
  1224. tb_reset_jump_recursive2(tb, 0);
  1225. tb_reset_jump_recursive2(tb, 1);
  1226. }
  1227. #if defined(TARGET_HAS_ICE)
  1228. #if defined(CONFIG_USER_ONLY)
  1229. static void breakpoint_invalidate(CPUState *env, target_ulong pc)
  1230. {
  1231. tb_invalidate_phys_page_range(pc, pc + 1, 0);
  1232. }
  1233. #else
  1234. static void breakpoint_invalidate(CPUState *env, target_ulong pc)
  1235. {
  1236. target_phys_addr_t addr;
  1237. target_ulong pd;
  1238. ram_addr_t ram_addr;
  1239. PhysPageDesc *p;
  1240. addr = cpu_get_phys_page_debug(env, pc);
  1241. p = phys_page_find(addr >> TARGET_PAGE_BITS);
  1242. if (!p) {
  1243. pd = IO_MEM_UNASSIGNED;
  1244. } else {
  1245. pd = p->phys_offset;
  1246. }
  1247. ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
  1248. tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
  1249. }
  1250. #endif
  1251. #endif /* TARGET_HAS_ICE */
  1252. #if defined(CONFIG_USER_ONLY)
  1253. void cpu_watchpoint_remove_all(CPUState *env, int mask)
  1254. {
  1255. }
  1256. int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
  1257. int flags, CPUWatchpoint **watchpoint)
  1258. {
  1259. return -ENOSYS;
  1260. }
  1261. #else
  1262. /* Add a watchpoint. */
  1263. int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
  1264. int flags, CPUWatchpoint **watchpoint)
  1265. {
  1266. target_ulong len_mask = ~(len - 1);
  1267. CPUWatchpoint *wp;
  1268. /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
  1269. if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
  1270. fprintf(stderr, "qemu: tried to set invalid watchpoint at "
  1271. TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
  1272. return -EINVAL;
  1273. }
  1274. wp = qemu_malloc(sizeof(*wp));
  1275. wp->vaddr = addr;
  1276. wp->len_mask = len_mask;
  1277. wp->flags = flags;
  1278. /* keep all GDB-injected watchpoints in front */
  1279. if (flags & BP_GDB)
  1280. QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
  1281. else
  1282. QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
  1283. tlb_flush_page(env, addr);
  1284. if (watchpoint)
  1285. *watchpoint = wp;
  1286. return 0;
  1287. }
  1288. /* Remove a specific watchpoint. */
  1289. int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
  1290. int flags)
  1291. {
  1292. target_ulong len_mask = ~(len - 1);
  1293. CPUWatchpoint *wp;
  1294. QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
  1295. if (addr == wp->vaddr && len_mask == wp->len_mask
  1296. && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
  1297. cpu_watchpoint_remove_by_ref(env, wp);
  1298. return 0;
  1299. }
  1300. }
  1301. return -ENOENT;
  1302. }
  1303. /* Remove a specific watchpoint by reference. */
  1304. void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
  1305. {
  1306. QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
  1307. tlb_flush_page(env, watchpoint->vaddr);
  1308. qemu_free(watchpoint);
  1309. }
  1310. /* Remove all matching watchpoints. */
  1311. void cpu_watchpoint_remove_all(CPUState *env, int mask)
  1312. {
  1313. CPUWatchpoint *wp, *next;
  1314. QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
  1315. if (wp->flags & mask)
  1316. cpu_watchpoint_remove_by_ref(env, wp);
  1317. }
  1318. }
  1319. #endif
  1320. /* Add a breakpoint. */
  1321. int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
  1322. CPUBreakpoint **breakpoint)
  1323. {
  1324. #if defined(TARGET_HAS_ICE)
  1325. CPUBreakpoint *bp;
  1326. bp = qemu_malloc(sizeof(*bp));
  1327. bp->pc = pc;
  1328. bp->flags = flags;
  1329. /* keep all GDB-injected breakpoints in front */
  1330. if (flags & BP_GDB)
  1331. QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
  1332. else
  1333. QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
  1334. breakpoint_invalidate(env, pc);
  1335. if (breakpoint)
  1336. *breakpoint = bp;
  1337. return 0;
  1338. #else
  1339. return -ENOSYS;
  1340. #endif
  1341. }
  1342. /* Remove a specific breakpoint. */
  1343. int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
  1344. {
  1345. #if defined(TARGET_HAS_ICE)
  1346. CPUBreakpoint *bp;
  1347. QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
  1348. if (bp->pc == pc && bp->flags == flags) {
  1349. cpu_breakpoint_remove_by_ref(env, bp);
  1350. return 0;
  1351. }
  1352. }
  1353. return -ENOENT;
  1354. #else
  1355. return -ENOSYS;
  1356. #endif
  1357. }
  1358. /* Remove a specific breakpoint by reference. */
  1359. void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
  1360. {
  1361. #if defined(TARGET_HAS_ICE)
  1362. QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
  1363. breakpoint_invalidate(env, breakpoint->pc);
  1364. qemu_free(breakpoint);
  1365. #endif
  1366. }
  1367. /* Remove all matching breakpoints. */
  1368. void cpu_breakpoint_remove_all(CPUState *env, int mask)
  1369. {
  1370. #if defined(TARGET_HAS_ICE)
  1371. CPUBreakpoint *bp, *next;
  1372. QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
  1373. if (bp->flags & mask)
  1374. cpu_breakpoint_remove_by_ref(env, bp);
  1375. }
  1376. #endif
  1377. }
  1378. /* enable or disable single step mode. EXCP_DEBUG is returned by the
  1379. CPU loop after each instruction */
  1380. void cpu_single_step(CPUState *env, int enabled)
  1381. {
  1382. #if defined(TARGET_HAS_ICE)
  1383. if (env->singlestep_enabled != enabled) {
  1384. env->singlestep_enabled = enabled;
  1385. if (kvm_enabled())
  1386. kvm_update_guest_debug(env, 0);
  1387. else {
  1388. /* must flush all the translated code to avoid inconsistencies */
  1389. /* XXX: only flush what is necessary */
  1390. tb_flush(env);
  1391. }
  1392. }
  1393. #endif
  1394. }
  1395. /* enable or disable low levels log */
  1396. void cpu_set_log(int log_flags)
  1397. {
  1398. loglevel = log_flags;
  1399. if (loglevel && !logfile) {
  1400. logfile = fopen(logfilename, log_append ? "a" : "w");
  1401. if (!logfile) {
  1402. perror(logfilename);
  1403. _exit(1);
  1404. }
  1405. #if !defined(CONFIG_SOFTMMU)
  1406. /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
  1407. {
  1408. static char logfile_buf[4096];
  1409. setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
  1410. }
  1411. #elif !defined(_WIN32)
  1412. /* Win32 doesn't support line-buffering and requires size >= 2 */
  1413. setvbuf(logfile, NULL, _IOLBF, 0);
  1414. #endif
  1415. log_append = 1;
  1416. }
  1417. if (!loglevel && logfile) {
  1418. fclose(logfile);
  1419. logfile = NULL;
  1420. }
  1421. }
  1422. void cpu_set_log_filename(const char *filename)
  1423. {
  1424. logfilename = strdup(filename);
  1425. if (logfile) {
  1426. fclose(logfile);
  1427. logfile = NULL;
  1428. }
  1429. cpu_set_log(loglevel);
  1430. }
  1431. static void cpu_unlink_tb(CPUState *env)
  1432. {
  1433. /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
  1434. problem and hope the cpu will stop of its own accord. For userspace
  1435. emulation this often isn't actually as bad as it sounds. Often
  1436. signals are used primarily to interrupt blocking syscalls. */
  1437. TranslationBlock *tb;
  1438. static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
  1439. spin_lock(&interrupt_lock);
  1440. tb = env->current_tb;
  1441. /* if the cpu is currently executing code, we must unlink it and
  1442. all the potentially executing TB */
  1443. if (tb) {
  1444. env->current_tb = NULL;
  1445. tb_reset_jump_recursive(tb);
  1446. }
  1447. spin_unlock(&interrupt_lock);
  1448. }
  1449. #ifndef CONFIG_USER_ONLY
  1450. /* mask must never be zero, except for A20 change call */
  1451. static void tcg_handle_interrupt(CPUState *env, int mask)
  1452. {
  1453. int old_mask;
  1454. old_mask = env->interrupt_request;
  1455. env->interrupt_request |= mask;
  1456. /*
  1457. * If called from iothread context, wake the target cpu in
  1458. * case its halted.
  1459. */
  1460. if (!qemu_cpu_is_self(env)) {
  1461. qemu_cpu_kick(env);
  1462. return;
  1463. }
  1464. if (use_icount) {
  1465. env->icount_decr.u16.high = 0xffff;
  1466. if (!can_do_io(env)
  1467. && (mask & ~old_mask) != 0) {
  1468. cpu_abort(env, "Raised interrupt while not in I/O function");
  1469. }
  1470. } else {
  1471. cpu_unlink_tb(env);
  1472. }
  1473. }
  1474. CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
  1475. #else /* CONFIG_USER_ONLY */
  1476. void cpu_interrupt(CPUState *env, int mask)
  1477. {
  1478. env->interrupt_request |= mask;
  1479. cpu_unlink_tb(env);
  1480. }
  1481. #endif /* CONFIG_USER_ONLY */
  1482. void cpu_reset_interrupt(CPUState *env, int mask)
  1483. {
  1484. env->interrupt_request &= ~mask;
  1485. }
  1486. void cpu_exit(CPUState *env)
  1487. {
  1488. env->exit_request = 1;
  1489. cpu_unlink_tb(env);
  1490. }
  1491. const CPULogItem cpu_log_items[] = {
  1492. { CPU_LOG_TB_OUT_ASM, "out_asm",
  1493. "show generated host assembly code for each compiled TB" },
  1494. { CPU_LOG_TB_IN_ASM, "in_asm",
  1495. "show target assembly code for each compiled TB" },
  1496. { CPU_LOG_TB_OP, "op",
  1497. "show micro ops for each compiled TB" },
  1498. { CPU_LOG_TB_OP_OPT, "op_opt",
  1499. "show micro ops "
  1500. #ifdef TARGET_I386
  1501. "before eflags optimization and "
  1502. #endif
  1503. "after liveness analysis" },
  1504. { CPU_LOG_INT, "int",
  1505. "show interrupts/exceptions in short format" },
  1506. { CPU_LOG_EXEC, "exec",
  1507. "show trace before each executed TB (lots of logs)" },
  1508. { CPU_LOG_TB_CPU, "cpu",
  1509. "show CPU state before block translation" },
  1510. #ifdef TARGET_I386
  1511. { CPU_LOG_PCALL, "pcall",
  1512. "show protected mode far calls/returns/exceptions" },
  1513. { CPU_LOG_RESET, "cpu_reset",
  1514. "show CPU state before CPU resets" },
  1515. #endif
  1516. #ifdef DEBUG_IOPORT
  1517. { CPU_LOG_IOPORT, "ioport",
  1518. "show all i/o ports accesses" },
  1519. #endif
  1520. { 0, NULL, NULL },
  1521. };
  1522. #ifndef CONFIG_USER_ONLY
  1523. static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
  1524. = QLIST_HEAD_INITIALIZER(memory_client_list);
  1525. static void cpu_notify_set_memory(target_phys_addr_t start_addr,
  1526. ram_addr_t size,
  1527. ram_addr_t phys_offset,
  1528. bool log_dirty)
  1529. {
  1530. CPUPhysMemoryClient *client;
  1531. QLIST_FOREACH(client, &memory_client_list, list) {
  1532. client->set_memory(client, start_addr, size, phys_offset, log_dirty);
  1533. }
  1534. }
  1535. static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
  1536. target_phys_addr_t end)
  1537. {
  1538. CPUPhysMemoryClient *client;
  1539. QLIST_FOREACH(client, &memory_client_list, list) {
  1540. int r = client->sync_dirty_bitmap(client, start, end);
  1541. if (r < 0)
  1542. return r;
  1543. }
  1544. return 0;
  1545. }
  1546. static int cpu_notify_migration_log(int enable)
  1547. {
  1548. CPUPhysMemoryClient *client;
  1549. QLIST_FOREACH(client, &memory_client_list, list) {
  1550. int r = client->migration_log(client, enable);
  1551. if (r < 0)
  1552. return r;
  1553. }
  1554. return 0;
  1555. }
  1556. struct last_map {
  1557. target_phys_addr_t start_addr;
  1558. ram_addr_t size;
  1559. ram_addr_t phys_offset;
  1560. };
  1561. /* The l1_phys_map provides the upper P_L1_BITs of the guest physical
  1562. * address. Each intermediate table provides the next L2_BITs of guest
  1563. * physical address space. The number of levels vary based on host and
  1564. * guest configuration, making it efficient to build the final guest
  1565. * physical address by seeding the L1 offset and shifting and adding in
  1566. * each L2 offset as we recurse through them. */
  1567. static void phys_page_for_each_1(CPUPhysMemoryClient *client, int level,
  1568. void **lp, target_phys_addr_t addr,
  1569. struct last_map *map)
  1570. {
  1571. int i;
  1572. if (*lp == NULL) {
  1573. return;
  1574. }
  1575. if (level == 0) {
  1576. PhysPageDesc *pd = *lp;
  1577. addr <<= L2_BITS + TARGET_PAGE_BITS;
  1578. for (i = 0; i < L2_SIZE; ++i) {
  1579. if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
  1580. target_phys_addr_t start_addr = addr | i << TARGET_PAGE_BITS;
  1581. if (map->size &&
  1582. start_addr == map->start_addr + map->size &&
  1583. pd[i].phys_offset == map->phys_offset + map->size) {
  1584. map->size += TARGET_PAGE_SIZE;
  1585. continue;
  1586. } else if (map->size) {
  1587. client->set_memory(client, map->start_addr,
  1588. map->size, map->phys_offset, false);
  1589. }
  1590. map->start_addr = start_addr;
  1591. map->size = TARGET_PAGE_SIZE;
  1592. map->phys_offset = pd[i].phys_offset;
  1593. }
  1594. }
  1595. } else {
  1596. void **pp = *lp;
  1597. for (i = 0; i < L2_SIZE; ++i) {
  1598. phys_page_for_each_1(client, level - 1, pp + i,
  1599. (addr << L2_BITS) | i, map);
  1600. }
  1601. }
  1602. }
  1603. static void phys_page_for_each(CPUPhysMemoryClient *client)
  1604. {
  1605. int i;
  1606. struct last_map map = { };
  1607. for (i = 0; i < P_L1_SIZE; ++i) {
  1608. phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
  1609. l1_phys_map + i, i, &map);
  1610. }
  1611. if (map.size) {
  1612. client->set_memory(client, map.start_addr, map.size, map.phys_offset,
  1613. false);
  1614. }
  1615. }
  1616. void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
  1617. {
  1618. QLIST_INSERT_HEAD(&memory_client_list, client, list);
  1619. phys_page_for_each(client);
  1620. }
  1621. void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
  1622. {
  1623. QLIST_REMOVE(client, list);
  1624. }
  1625. #endif
  1626. static int cmp1(const char *s1, int n, const char *s2)
  1627. {
  1628. if (strlen(s2) != n)
  1629. return 0;
  1630. return memcmp(s1, s2, n) == 0;
  1631. }
  1632. /* takes a comma separated list of log masks. Return 0 if error. */
  1633. int cpu_str_to_log_mask(const char *str)
  1634. {
  1635. const CPULogItem *item;
  1636. int mask;
  1637. const char *p, *p1;
  1638. p = str;
  1639. mask = 0;
  1640. for(;;) {
  1641. p1 = strchr(p, ',');
  1642. if (!p1)
  1643. p1 = p + strlen(p);
  1644. if(cmp1(p,p1-p,"all")) {
  1645. for(item = cpu_log_items; item->mask != 0; item++) {
  1646. mask |= item->mask;
  1647. }
  1648. } else {
  1649. for(item = cpu_log_items; item->mask != 0; item++) {
  1650. if (cmp1(p, p1 - p, item->name))
  1651. goto found;
  1652. }
  1653. return 0;
  1654. }
  1655. found:
  1656. mask |= item->mask;
  1657. if (*p1 != ',')
  1658. break;
  1659. p = p1 + 1;
  1660. }
  1661. return mask;
  1662. }
  1663. void cpu_abort(CPUState *env, const char *fmt, ...)
  1664. {
  1665. va_list ap;
  1666. va_list ap2;
  1667. va_start(ap, fmt);
  1668. va_copy(ap2, ap);
  1669. fprintf(stderr, "qemu: fatal: ");
  1670. vfprintf(stderr, fmt, ap);
  1671. fprintf(stderr, "\n");
  1672. #ifdef TARGET_I386
  1673. cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
  1674. #else
  1675. cpu_dump_state(env, stderr, fprintf, 0);
  1676. #endif
  1677. if (qemu_log_enabled()) {
  1678. qemu_log("qemu: fatal: ");
  1679. qemu_log_vprintf(fmt, ap2);
  1680. qemu_log("\n");
  1681. #ifdef TARGET_I386
  1682. log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
  1683. #else
  1684. log_cpu_state(env, 0);
  1685. #endif
  1686. qemu_log_flush();
  1687. qemu_log_close();
  1688. }
  1689. va_end(ap2);
  1690. va_end(ap);
  1691. #if defined(CONFIG_USER_ONLY)
  1692. {
  1693. struct sigaction act;
  1694. sigfillset(&act.sa_mask);
  1695. act.sa_handler = SIG_DFL;
  1696. sigaction(SIGABRT, &act, NULL);
  1697. }
  1698. #endif
  1699. abort();
  1700. }
  1701. CPUState *cpu_copy(CPUState *env)
  1702. {
  1703. CPUState *new_env = cpu_init(env->cpu_model_str);
  1704. CPUState *next_cpu = new_env->next_cpu;
  1705. int cpu_index = new_env->cpu_index;
  1706. #if defined(TARGET_HAS_ICE)
  1707. CPUBreakpoint *bp;
  1708. CPUWatchpoint *wp;
  1709. #endif
  1710. memcpy(new_env, env, sizeof(CPUState));
  1711. /* Preserve chaining and index. */
  1712. new_env->next_cpu = next_cpu;
  1713. new_env->cpu_index = cpu_index;
  1714. /* Clone all break/watchpoints.
  1715. Note: Once we support ptrace with hw-debug register access, make sure
  1716. BP_CPU break/watchpoints are handled correctly on clone. */
  1717. QTAILQ_INIT(&env->breakpoints);
  1718. QTAILQ_INIT(&env->watchpoints);
  1719. #if defined(TARGET_HAS_ICE)
  1720. QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
  1721. cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
  1722. }
  1723. QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
  1724. cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
  1725. wp->flags, NULL);
  1726. }
  1727. #endif
  1728. return new_env;
  1729. }
  1730. #if !defined(CONFIG_USER_ONLY)
  1731. static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
  1732. {
  1733. unsigned int i;
  1734. /* Discard jump cache entries for any tb which might potentially
  1735. overlap the flushed page. */
  1736. i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
  1737. memset (&env->tb_jmp_cache[i], 0,
  1738. TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
  1739. i = tb_jmp_cache_hash_page(addr);
  1740. memset (&env->tb_jmp_cache[i], 0,
  1741. TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
  1742. }
  1743. static CPUTLBEntry s_cputlb_empty_entry = {
  1744. .addr_read = -1,
  1745. .addr_write = -1,
  1746. .addr_code = -1,
  1747. .addend = -1,
  1748. };
  1749. /* NOTE: if flush_global is true, also flush global entries (not
  1750. implemented yet) */
  1751. void tlb_flush(CPUState *env, int flush_global)
  1752. {
  1753. int i;
  1754. #if defined(DEBUG_TLB)
  1755. printf("tlb_flush:\n");
  1756. #endif
  1757. /* must reset current TB so that interrupts cannot modify the
  1758. links while we are modifying them */
  1759. env->current_tb = NULL;
  1760. for(i = 0; i < CPU_TLB_SIZE; i++) {
  1761. int mmu_idx;
  1762. for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
  1763. env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
  1764. }
  1765. }
  1766. memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
  1767. env->tlb_flush_addr = -1;
  1768. env->tlb_flush_mask = 0;
  1769. tlb_flush_count++;
  1770. }
  1771. static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
  1772. {
  1773. if (addr == (tlb_entry->addr_read &
  1774. (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
  1775. addr == (tlb_entry->addr_write &
  1776. (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
  1777. addr == (tlb_entry->addr_code &
  1778. (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
  1779. *tlb_entry = s_cputlb_empty_entry;
  1780. }
  1781. }
  1782. void tlb_flush_page(CPUState *env, target_ulong addr)
  1783. {
  1784. int i;
  1785. int mmu_idx;
  1786. #if defined(DEBUG_TLB)
  1787. printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
  1788. #endif
  1789. /* Check if we need to flush due to large pages. */
  1790. if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
  1791. #if defined(DEBUG_TLB)
  1792. printf("tlb_flush_page: forced full flush ("
  1793. TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
  1794. env->tlb_flush_addr, env->tlb_flush_mask);
  1795. #endif
  1796. tlb_flush(env, 1);
  1797. return;
  1798. }
  1799. /* must reset current TB so that interrupts cannot modify the
  1800. links while we are modifying them */
  1801. env->current_tb = NULL;
  1802. addr &= TARGET_PAGE_MASK;
  1803. i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
  1804. for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
  1805. tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
  1806. tlb_flush_jmp_cache(env, addr);
  1807. }
  1808. /* update the TLBs so that writes to code in the virtual page 'addr'
  1809. can be detected */
  1810. static void tlb_protect_code(ram_addr_t ram_addr)
  1811. {
  1812. cpu_physical_memory_reset_dirty(ram_addr,
  1813. ram_addr + TARGET_PAGE_SIZE,
  1814. CODE_DIRTY_FLAG);
  1815. }
  1816. /* update the TLB so that writes in physical page 'phys_addr' are no longer
  1817. tested for self modifying code */
  1818. static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
  1819. target_ulong vaddr)
  1820. {
  1821. cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
  1822. }
  1823. static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
  1824. unsigned long start, unsigned long length)
  1825. {
  1826. unsigned long addr;
  1827. if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
  1828. addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
  1829. if ((addr - start) < length) {
  1830. tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
  1831. }
  1832. }
  1833. }
  1834. /* Note: start and end must be within the same ram block. */
  1835. void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
  1836. int dirty_flags)
  1837. {
  1838. CPUState *env;
  1839. unsigned long length, start1;
  1840. int i;
  1841. start &= TARGET_PAGE_MASK;
  1842. end = TARGET_PAGE_ALIGN(end);
  1843. length = end - start;
  1844. if (length == 0)
  1845. return;
  1846. cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
  1847. /* we modify the TLB cache so that the dirty bit will be set again
  1848. when accessing the range */
  1849. start1 = (unsigned long)qemu_safe_ram_ptr(start);
  1850. /* Check that we don't span multiple blocks - this breaks the
  1851. address comparisons below. */
  1852. if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
  1853. != (end - 1) - start) {
  1854. abort();
  1855. }
  1856. for(env = first_cpu; env != NULL; env = env->next_cpu) {
  1857. int mmu_idx;
  1858. for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
  1859. for(i = 0; i < CPU_TLB_SIZE; i++)
  1860. tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
  1861. start1, length);
  1862. }
  1863. }
  1864. }
  1865. int cpu_physical_memory_set_dirty_tracking(int enable)
  1866. {
  1867. int ret = 0;
  1868. in_migration = enable;
  1869. ret = cpu_notify_migration_log(!!enable);
  1870. return ret;
  1871. }
  1872. int cpu_physical_memory_get_dirty_tracking(void)
  1873. {
  1874. return in_migration;
  1875. }
  1876. int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
  1877. target_phys_addr_t end_addr)
  1878. {
  1879. int ret;
  1880. ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
  1881. return ret;
  1882. }
  1883. int cpu_physical_log_start(target_phys_addr_t start_addr,
  1884. ram_addr_t size)
  1885. {
  1886. CPUPhysMemoryClient *client;
  1887. QLIST_FOREACH(client, &memory_client_list, list) {
  1888. if (client->log_start) {
  1889. int r = client->log_start(client, start_addr, size);
  1890. if (r < 0) {
  1891. return r;
  1892. }
  1893. }
  1894. }
  1895. return 0;
  1896. }
  1897. int cpu_physical_log_stop(target_phys_addr_t start_addr,
  1898. ram_addr_t size)
  1899. {
  1900. CPUPhysMemoryClient *client;
  1901. QLIST_FOREACH(client, &memory_client_list, list) {
  1902. if (client->log_stop) {
  1903. int r = client->log_stop(client, start_addr, size);
  1904. if (r < 0) {
  1905. return r;
  1906. }
  1907. }
  1908. }
  1909. return 0;
  1910. }
  1911. static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
  1912. {
  1913. ram_addr_t ram_addr;
  1914. void *p;
  1915. if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
  1916. p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
  1917. + tlb_entry->addend);
  1918. ram_addr = qemu_ram_addr_from_host_nofail(p);
  1919. if (!cpu_physical_memory_is_dirty(ram_addr)) {
  1920. tlb_entry->addr_write |= TLB_NOTDIRTY;
  1921. }
  1922. }
  1923. }
  1924. /* update the TLB according to the current state of the dirty bits */
  1925. void cpu_tlb_update_dirty(CPUState *env)
  1926. {
  1927. int i;
  1928. int mmu_idx;
  1929. for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
  1930. for(i = 0; i < CPU_TLB_SIZE; i++)
  1931. tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
  1932. }
  1933. }
  1934. static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
  1935. {
  1936. if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
  1937. tlb_entry->addr_write = vaddr;
  1938. }
  1939. /* update the TLB corresponding to virtual page vaddr
  1940. so that it is no longer dirty */
  1941. static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
  1942. {
  1943. int i;
  1944. int mmu_idx;
  1945. vaddr &= TARGET_PAGE_MASK;
  1946. i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
  1947. for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
  1948. tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
  1949. }
  1950. /* Our TLB does not support large pages, so remember the area covered by
  1951. large pages and trigger a full TLB flush if these are invalidated. */
  1952. static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
  1953. target_ulong size)
  1954. {
  1955. target_ulong mask = ~(size - 1);
  1956. if (env->tlb_flush_addr == (target_ulong)-1) {
  1957. env->tlb_flush_addr = vaddr & mask;
  1958. env->tlb_flush_mask = mask;
  1959. return;
  1960. }
  1961. /* Extend the existing region to include the new page.
  1962. This is a compromise between unnecessary flushes and the cost
  1963. of maintaining a full variable size TLB. */
  1964. mask &= env->tlb_flush_mask;
  1965. while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
  1966. mask <<= 1;
  1967. }
  1968. env->tlb_flush_addr &= mask;
  1969. env->tlb_flush_mask = mask;
  1970. }
  1971. /* Add a new TLB entry. At most one entry for a given virtual address
  1972. is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
  1973. supplied size is only used by tlb_flush_page. */
  1974. void tlb_set_page(CPUState *env, target_ulong vaddr,
  1975. target_phys_addr_t paddr, int prot,
  1976. int mmu_idx, target_ulong size)
  1977. {
  1978. PhysPageDesc *p;
  1979. unsigned long pd;
  1980. unsigned int index;
  1981. target_ulong address;
  1982. target_ulong code_address;
  1983. unsigned long addend;
  1984. CPUTLBEntry *te;
  1985. CPUWatchpoint *wp;
  1986. target_phys_addr_t iotlb;
  1987. assert(size >= TARGET_PAGE_SIZE);
  1988. if (size != TARGET_PAGE_SIZE) {
  1989. tlb_add_large_page(env, vaddr, size);
  1990. }
  1991. p = phys_page_find(paddr >> TARGET_PAGE_BITS);
  1992. if (!p) {
  1993. pd = IO_MEM_UNASSIGNED;
  1994. } else {
  1995. pd = p->phys_offset;
  1996. }
  1997. #if defined(DEBUG_TLB)
  1998. printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
  1999. " prot=%x idx=%d pd=0x%08lx\n",
  2000. vaddr, paddr, prot, mmu_idx, pd);
  2001. #endif
  2002. address = vaddr;
  2003. if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
  2004. /* IO memory case (romd handled later) */
  2005. address |= TLB_MMIO;
  2006. }
  2007. addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
  2008. if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
  2009. /* Normal RAM. */
  2010. iotlb = pd & TARGET_PAGE_MASK;
  2011. if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
  2012. iotlb |= IO_MEM_NOTDIRTY;
  2013. else
  2014. iotlb |= IO_MEM_ROM;
  2015. } else {
  2016. /* IO handlers are currently passed a physical address.
  2017. It would be nice to pass an offset from the base address
  2018. of that region. This would avoid having to special case RAM,
  2019. and avoid full address decoding in every device.
  2020. We can't use the high bits of pd for this because
  2021. IO_MEM_ROMD uses these as a ram address. */
  2022. iotlb = (pd & ~TARGET_PAGE_MASK);
  2023. if (p) {
  2024. iotlb += p->region_offset;
  2025. } else {
  2026. iotlb += paddr;
  2027. }
  2028. }
  2029. code_address = address;
  2030. /* Make accesses to pages with watchpoints go via the
  2031. watchpoint trap routines. */
  2032. QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
  2033. if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
  2034. /* Avoid trapping reads of pages with a write breakpoint. */
  2035. if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
  2036. iotlb = io_mem_watch + paddr;
  2037. address |= TLB_MMIO;
  2038. break;
  2039. }
  2040. }
  2041. }
  2042. index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
  2043. env->iotlb[mmu_idx][index] = iotlb - vaddr;
  2044. te = &env->tlb_table[mmu_idx][index];
  2045. te->addend = addend - vaddr;
  2046. if (prot & PAGE_READ) {
  2047. te->addr_read = address;
  2048. } else {
  2049. te->addr_read = -1;
  2050. }
  2051. if (prot & PAGE_EXEC) {
  2052. te->addr_code = code_address;
  2053. } else {
  2054. te->addr_code = -1;
  2055. }
  2056. if (prot & PAGE_WRITE) {
  2057. if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
  2058. (pd & IO_MEM_ROMD)) {
  2059. /* Write access calls the I/O callback. */
  2060. te->addr_write = address | TLB_MMIO;
  2061. } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
  2062. !cpu_physical_memory_is_dirty(pd)) {
  2063. te->addr_write = address | TLB_NOTDIRTY;
  2064. } else {
  2065. te->addr_write = address;
  2066. }
  2067. } else {
  2068. te->addr_write = -1;
  2069. }
  2070. }
  2071. #else
  2072. void tlb_flush(CPUState *env, int flush_global)
  2073. {
  2074. }
  2075. void tlb_flush_page(CPUState *env, target_ulong addr)
  2076. {
  2077. }
  2078. /*
  2079. * Walks guest process memory "regions" one by one
  2080. * and calls callback function 'fn' for each region.
  2081. */
  2082. struct walk_memory_regions_data
  2083. {
  2084. walk_memory_regions_fn fn;
  2085. void *priv;
  2086. unsigned long start;
  2087. int prot;
  2088. };
  2089. static int walk_memory_regions_end(struct walk_memory_regions_data *data,
  2090. abi_ulong end, int new_prot)
  2091. {
  2092. if (data->start != -1ul) {
  2093. int rc = data->fn(data->priv, data->start, end, data->prot);
  2094. if (rc != 0) {
  2095. return rc;
  2096. }
  2097. }
  2098. data->start = (new_prot ? end : -1ul);
  2099. data->prot = new_prot;
  2100. return 0;
  2101. }
  2102. static int walk_memory_regions_1(struct walk_memory_regions_data *data,
  2103. abi_ulong base, int level, void **lp)
  2104. {
  2105. abi_ulong pa;
  2106. int i, rc;
  2107. if (*lp == NULL) {
  2108. return walk_memory_regions_end(data, base, 0);
  2109. }
  2110. if (level == 0) {
  2111. PageDesc *pd = *lp;
  2112. for (i = 0; i < L2_SIZE; ++i) {
  2113. int prot = pd[i].flags;
  2114. pa = base | (i << TARGET_PAGE_BITS);
  2115. if (prot != data->prot) {
  2116. rc = walk_memory_regions_end(data, pa, prot);
  2117. if (rc != 0) {
  2118. return rc;
  2119. }
  2120. }
  2121. }
  2122. } else {
  2123. void **pp = *lp;
  2124. for (i = 0; i < L2_SIZE; ++i) {
  2125. pa = base | ((abi_ulong)i <<
  2126. (TARGET_PAGE_BITS + L2_BITS * level));
  2127. rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
  2128. if (rc != 0) {
  2129. return rc;
  2130. }
  2131. }
  2132. }
  2133. return 0;
  2134. }
  2135. int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
  2136. {
  2137. struct walk_memory_regions_data data;
  2138. unsigned long i;
  2139. data.fn = fn;
  2140. data.priv = priv;
  2141. data.start = -1ul;
  2142. data.prot = 0;
  2143. for (i = 0; i < V_L1_SIZE; i++) {
  2144. int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
  2145. V_L1_SHIFT / L2_BITS - 1, l1_map + i);
  2146. if (rc != 0) {
  2147. return rc;
  2148. }
  2149. }
  2150. return walk_memory_regions_end(&data, 0, 0);
  2151. }
  2152. static int dump_region(void *priv, abi_ulong start,
  2153. abi_ulong end, unsigned long prot)
  2154. {
  2155. FILE *f = (FILE *)priv;
  2156. (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
  2157. " "TARGET_ABI_FMT_lx" %c%c%c\n",
  2158. start, end, end - start,
  2159. ((prot & PAGE_READ) ? 'r' : '-'),
  2160. ((prot & PAGE_WRITE) ? 'w' : '-'),
  2161. ((prot & PAGE_EXEC) ? 'x' : '-'));
  2162. return (0);
  2163. }
  2164. /* dump memory mappings */
  2165. void page_dump(FILE *f)
  2166. {
  2167. (void) fprintf(f, "%-8s %-8s %-8s %s\n",
  2168. "start", "end", "size", "prot");
  2169. walk_memory_regions(f, dump_region);
  2170. }
  2171. int page_get_flags(target_ulong address)
  2172. {
  2173. PageDesc *p;
  2174. p = page_find(address >> TARGET_PAGE_BITS);
  2175. if (!p)
  2176. return 0;
  2177. return p->flags;
  2178. }
  2179. /* Modify the flags of a page and invalidate the code if necessary.
  2180. The flag PAGE_WRITE_ORG is positioned automatically depending
  2181. on PAGE_WRITE. The mmap_lock should already be held. */
  2182. void page_set_flags(target_ulong start, target_ulong end, int flags)
  2183. {
  2184. target_ulong addr, len;
  2185. /* This function should never be called with addresses outside the
  2186. guest address space. If this assert fires, it probably indicates
  2187. a missing call to h2g_valid. */
  2188. #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
  2189. assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
  2190. #endif
  2191. assert(start < end);
  2192. start = start & TARGET_PAGE_MASK;
  2193. end = TARGET_PAGE_ALIGN(end);
  2194. if (flags & PAGE_WRITE) {
  2195. flags |= PAGE_WRITE_ORG;
  2196. }
  2197. for (addr = start, len = end - start;
  2198. len != 0;
  2199. len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
  2200. PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
  2201. /* If the write protection bit is set, then we invalidate
  2202. the code inside. */
  2203. if (!(p->flags & PAGE_WRITE) &&
  2204. (flags & PAGE_WRITE) &&
  2205. p->first_tb) {
  2206. tb_invalidate_phys_page(addr, 0, NULL);
  2207. }
  2208. p->flags = flags;
  2209. }
  2210. }
  2211. int page_check_range(target_ulong start, target_ulong len, int flags)
  2212. {
  2213. PageDesc *p;
  2214. target_ulong end;
  2215. target_ulong addr;
  2216. /* This function should never be called with addresses outside the
  2217. guest address space. If this assert fires, it probably indicates
  2218. a missing call to h2g_valid. */
  2219. #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
  2220. assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
  2221. #endif
  2222. if (len == 0) {
  2223. return 0;
  2224. }
  2225. if (start + len - 1 < start) {
  2226. /* We've wrapped around. */
  2227. return -1;
  2228. }
  2229. end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
  2230. start = start & TARGET_PAGE_MASK;
  2231. for (addr = start, len = end - start;
  2232. len != 0;
  2233. len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
  2234. p = page_find(addr >> TARGET_PAGE_BITS);
  2235. if( !p )
  2236. return -1;
  2237. if( !(p->flags & PAGE_VALID) )
  2238. return -1;
  2239. if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
  2240. return -1;
  2241. if (flags & PAGE_WRITE) {
  2242. if (!(p->flags & PAGE_WRITE_ORG))
  2243. return -1;
  2244. /* unprotect the page if it was put read-only because it
  2245. contains translated code */
  2246. if (!(p->flags & PAGE_WRITE)) {
  2247. if (!page_unprotect(addr, 0, NULL))
  2248. return -1;
  2249. }
  2250. return 0;
  2251. }
  2252. }
  2253. return 0;
  2254. }
  2255. /* called from signal handler: invalidate the code and unprotect the
  2256. page. Return TRUE if the fault was successfully handled. */
  2257. int page_unprotect(target_ulong address, unsigned long pc, void *puc)
  2258. {
  2259. unsigned int prot;
  2260. PageDesc *p;
  2261. target_ulong host_start, host_end, addr;
  2262. /* Technically this isn't safe inside a signal handler. However we
  2263. know this only ever happens in a synchronous SEGV handler, so in
  2264. practice it seems to be ok. */
  2265. mmap_lock();
  2266. p = page_find(address >> TARGET_PAGE_BITS);
  2267. if (!p) {
  2268. mmap_unlock();
  2269. return 0;
  2270. }
  2271. /* if the page was really writable, then we change its
  2272. protection back to writable */
  2273. if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
  2274. host_start = address & qemu_host_page_mask;
  2275. host_end = host_start + qemu_host_page_size;
  2276. prot = 0;
  2277. for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
  2278. p = page_find(addr >> TARGET_PAGE_BITS);
  2279. p->flags |= PAGE_WRITE;
  2280. prot |= p->flags;
  2281. /* and since the content will be modified, we must invalidate
  2282. the corresponding translated code. */
  2283. tb_invalidate_phys_page(addr, pc, puc);
  2284. #ifdef DEBUG_TB_CHECK
  2285. tb_invalidate_check(addr);
  2286. #endif
  2287. }
  2288. mprotect((void *)g2h(host_start), qemu_host_page_size,
  2289. prot & PAGE_BITS);
  2290. mmap_unlock();
  2291. return 1;
  2292. }
  2293. mmap_unlock();
  2294. return 0;
  2295. }
  2296. static inline void tlb_set_dirty(CPUState *env,
  2297. unsigned long addr, target_ulong vaddr)
  2298. {
  2299. }
  2300. #endif /* defined(CONFIG_USER_ONLY) */
  2301. #if !defined(CONFIG_USER_ONLY)
  2302. #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
  2303. typedef struct subpage_t {
  2304. target_phys_addr_t base;
  2305. ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
  2306. ram_addr_t region_offset[TARGET_PAGE_SIZE];
  2307. } subpage_t;
  2308. static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
  2309. ram_addr_t memory, ram_addr_t region_offset);
  2310. static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
  2311. ram_addr_t orig_memory,
  2312. ram_addr_t region_offset);
  2313. #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
  2314. need_subpage) \
  2315. do { \
  2316. if (addr > start_addr) \
  2317. start_addr2 = 0; \
  2318. else { \
  2319. start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
  2320. if (start_addr2 > 0) \
  2321. need_subpage = 1; \
  2322. } \
  2323. \
  2324. if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
  2325. end_addr2 = TARGET_PAGE_SIZE - 1; \
  2326. else { \
  2327. end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
  2328. if (end_addr2 < TARGET_PAGE_SIZE - 1) \
  2329. need_subpage = 1; \
  2330. } \
  2331. } while (0)
  2332. /* register physical memory.
  2333. For RAM, 'size' must be a multiple of the target page size.
  2334. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
  2335. io memory page. The address used when calling the IO function is
  2336. the offset from the start of the region, plus region_offset. Both
  2337. start_addr and region_offset are rounded down to a page boundary
  2338. before calculating this offset. This should not be a problem unless
  2339. the low bits of start_addr and region_offset differ. */
  2340. void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
  2341. ram_addr_t size,
  2342. ram_addr_t phys_offset,
  2343. ram_addr_t region_offset,
  2344. bool log_dirty)
  2345. {
  2346. target_phys_addr_t addr, end_addr;
  2347. PhysPageDesc *p;
  2348. CPUState *env;
  2349. ram_addr_t orig_size = size;
  2350. subpage_t *subpage;
  2351. assert(size);
  2352. cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
  2353. if (phys_offset == IO_MEM_UNASSIGNED) {
  2354. region_offset = start_addr;
  2355. }
  2356. region_offset &= TARGET_PAGE_MASK;
  2357. size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
  2358. end_addr = start_addr + (target_phys_addr_t)size;
  2359. addr = start_addr;
  2360. do {
  2361. p = phys_page_find(addr >> TARGET_PAGE_BITS);
  2362. if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
  2363. ram_addr_t orig_memory = p->phys_offset;
  2364. target_phys_addr_t start_addr2, end_addr2;
  2365. int need_subpage = 0;
  2366. CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
  2367. need_subpage);
  2368. if (need_subpage) {
  2369. if (!(orig_memory & IO_MEM_SUBPAGE)) {
  2370. subpage = subpage_init((addr & TARGET_PAGE_MASK),
  2371. &p->phys_offset, orig_memory,
  2372. p->region_offset);
  2373. } else {
  2374. subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
  2375. >> IO_MEM_SHIFT];
  2376. }
  2377. subpage_register(subpage, start_addr2, end_addr2, phys_offset,
  2378. region_offset);
  2379. p->region_offset = 0;
  2380. } else {
  2381. p->phys_offset = phys_offset;
  2382. if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
  2383. (phys_offset & IO_MEM_ROMD))
  2384. phys_offset += TARGET_PAGE_SIZE;
  2385. }
  2386. } else {
  2387. p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
  2388. p->phys_offset = phys_offset;
  2389. p->region_offset = region_offset;
  2390. if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
  2391. (phys_offset & IO_MEM_ROMD)) {
  2392. phys_offset += TARGET_PAGE_SIZE;
  2393. } else {
  2394. target_phys_addr_t start_addr2, end_addr2;
  2395. int need_subpage = 0;
  2396. CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
  2397. end_addr2, need_subpage);
  2398. if (need_subpage) {
  2399. subpage = subpage_init((addr & TARGET_PAGE_MASK),
  2400. &p->phys_offset, IO_MEM_UNASSIGNED,
  2401. addr & TARGET_PAGE_MASK);
  2402. subpage_register(subpage, start_addr2, end_addr2,
  2403. phys_offset, region_offset);
  2404. p->region_offset = 0;
  2405. }
  2406. }
  2407. }
  2408. region_offset += TARGET_PAGE_SIZE;
  2409. addr += TARGET_PAGE_SIZE;
  2410. } while (addr != end_addr);
  2411. /* since each CPU stores ram addresses in its TLB cache, we must
  2412. reset the modified entries */
  2413. /* XXX: slow ! */
  2414. for(env = first_cpu; env != NULL; env = env->next_cpu) {
  2415. tlb_flush(env, 1);
  2416. }
  2417. }
  2418. /* XXX: temporary until new memory mapping API */
  2419. ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
  2420. {
  2421. PhysPageDesc *p;
  2422. p = phys_page_find(addr >> TARGET_PAGE_BITS);
  2423. if (!p)
  2424. return IO_MEM_UNASSIGNED;
  2425. return p->phys_offset;
  2426. }
  2427. void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
  2428. {
  2429. if (kvm_enabled())
  2430. kvm_coalesce_mmio_region(addr, size);
  2431. }
  2432. void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
  2433. {
  2434. if (kvm_enabled())
  2435. kvm_uncoalesce_mmio_region(addr, size);
  2436. }
  2437. void qemu_flush_coalesced_mmio_buffer(void)
  2438. {
  2439. if (kvm_enabled())
  2440. kvm_flush_coalesced_mmio_buffer();
  2441. }
  2442. #if defined(__linux__) && !defined(TARGET_S390X)
  2443. #include <sys/vfs.h>
  2444. #define HUGETLBFS_MAGIC 0x958458f6
  2445. static long gethugepagesize(const char *path)
  2446. {
  2447. struct statfs fs;
  2448. int ret;
  2449. do {
  2450. ret = statfs(path, &fs);
  2451. } while (ret != 0 && errno == EINTR);
  2452. if (ret != 0) {
  2453. perror(path);
  2454. return 0;
  2455. }
  2456. if (fs.f_type != HUGETLBFS_MAGIC)
  2457. fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
  2458. return fs.f_bsize;
  2459. }
  2460. static void *file_ram_alloc(RAMBlock *block,
  2461. ram_addr_t memory,
  2462. const char *path)
  2463. {
  2464. char *filename;
  2465. void *area;
  2466. int fd;
  2467. #ifdef MAP_POPULATE
  2468. int flags;
  2469. #endif
  2470. unsigned long hpagesize;
  2471. hpagesize = gethugepagesize(path);
  2472. if (!hpagesize) {
  2473. return NULL;
  2474. }
  2475. if (memory < hpagesize) {
  2476. return NULL;
  2477. }
  2478. if (kvm_enabled() && !kvm_has_sync_mmu()) {
  2479. fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
  2480. return NULL;
  2481. }
  2482. if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
  2483. return NULL;
  2484. }
  2485. fd = mkstemp(filename);
  2486. if (fd < 0) {
  2487. perror("unable to create backing store for hugepages");
  2488. free(filename);
  2489. return NULL;
  2490. }
  2491. unlink(filename);
  2492. free(filename);
  2493. memory = (memory+hpagesize-1) & ~(hpagesize-1);
  2494. /*
  2495. * ftruncate is not supported by hugetlbfs in older
  2496. * hosts, so don't bother bailing out on errors.
  2497. * If anything goes wrong with it under other filesystems,
  2498. * mmap will fail.
  2499. */
  2500. if (ftruncate(fd, memory))
  2501. perror("ftruncate");
  2502. #ifdef MAP_POPULATE
  2503. /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
  2504. * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
  2505. * to sidestep this quirk.
  2506. */
  2507. flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
  2508. area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
  2509. #else
  2510. area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
  2511. #endif
  2512. if (area == MAP_FAILED) {
  2513. perror("file_ram_alloc: can't mmap RAM pages");
  2514. close(fd);
  2515. return (NULL);
  2516. }
  2517. block->fd = fd;
  2518. return area;
  2519. }
  2520. #endif
  2521. static ram_addr_t find_ram_offset(ram_addr_t size)
  2522. {
  2523. RAMBlock *block, *next_block;
  2524. ram_addr_t offset = 0, mingap = ULONG_MAX;
  2525. if (QLIST_EMPTY(&ram_list.blocks))
  2526. return 0;
  2527. QLIST_FOREACH(block, &ram_list.blocks, next) {
  2528. ram_addr_t end, next = ULONG_MAX;
  2529. end = block->offset + block->length;
  2530. QLIST_FOREACH(next_block, &ram_list.blocks, next) {
  2531. if (next_block->offset >= end) {
  2532. next = MIN(next, next_block->offset);
  2533. }
  2534. }
  2535. if (next - end >= size && next - end < mingap) {
  2536. offset = end;
  2537. mingap = next - end;
  2538. }
  2539. }
  2540. return offset;
  2541. }
  2542. static ram_addr_t last_ram_offset(void)
  2543. {
  2544. RAMBlock *block;
  2545. ram_addr_t last = 0;
  2546. QLIST_FOREACH(block, &ram_list.blocks, next)
  2547. last = MAX(last, block->offset + block->length);
  2548. return last;
  2549. }
  2550. ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
  2551. ram_addr_t size, void *host)
  2552. {
  2553. RAMBlock *new_block, *block;
  2554. size = TARGET_PAGE_ALIGN(size);
  2555. new_block = qemu_mallocz(sizeof(*new_block));
  2556. if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
  2557. char *id = dev->parent_bus->info->get_dev_path(dev);
  2558. if (id) {
  2559. snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
  2560. qemu_free(id);
  2561. }
  2562. }
  2563. pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
  2564. QLIST_FOREACH(block, &ram_list.blocks, next) {
  2565. if (!strcmp(block->idstr, new_block->idstr)) {
  2566. fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
  2567. new_block->idstr);
  2568. abort();
  2569. }
  2570. }
  2571. new_block->offset = find_ram_offset(size);
  2572. if (host) {
  2573. new_block->host = host;
  2574. new_block->flags |= RAM_PREALLOC_MASK;
  2575. } else {
  2576. if (mem_path) {
  2577. #if defined (__linux__) && !defined(TARGET_S390X)
  2578. new_block->host = file_ram_alloc(new_block, size, mem_path);
  2579. if (!new_block->host) {
  2580. new_block->host = qemu_vmalloc(size);
  2581. qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
  2582. }
  2583. #else
  2584. fprintf(stderr, "-mem-path option unsupported\n");
  2585. exit(1);
  2586. #endif
  2587. } else {
  2588. #if defined(TARGET_S390X) && defined(CONFIG_KVM)
  2589. /* S390 KVM requires the topmost vma of the RAM to be smaller than
  2590. an system defined value, which is at least 256GB. Larger systems
  2591. have larger values. We put the guest between the end of data
  2592. segment (system break) and this value. We use 32GB as a base to
  2593. have enough room for the system break to grow. */
  2594. new_block->host = mmap((void*)0x800000000, size,
  2595. PROT_EXEC|PROT_READ|PROT_WRITE,
  2596. MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
  2597. if (new_block->host == MAP_FAILED) {
  2598. fprintf(stderr, "Allocating RAM failed\n");
  2599. abort();
  2600. }
  2601. #else
  2602. if (xen_enabled()) {
  2603. xen_ram_alloc(new_block->offset, size);
  2604. } else {
  2605. new_block->host = qemu_vmalloc(size);
  2606. }
  2607. #endif
  2608. qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
  2609. }
  2610. }
  2611. new_block->length = size;
  2612. QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
  2613. ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
  2614. last_ram_offset() >> TARGET_PAGE_BITS);
  2615. memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
  2616. 0xff, size >> TARGET_PAGE_BITS);
  2617. if (kvm_enabled())
  2618. kvm_setup_guest_memory(new_block->host, size);
  2619. return new_block->offset;
  2620. }
  2621. ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
  2622. {
  2623. return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
  2624. }
  2625. void qemu_ram_free_from_ptr(ram_addr_t addr)
  2626. {
  2627. RAMBlock *block;
  2628. QLIST_FOREACH(block, &ram_list.blocks, next) {
  2629. if (addr == block->offset) {
  2630. QLIST_REMOVE(block, next);
  2631. qemu_free(block);
  2632. return;
  2633. }
  2634. }
  2635. }
  2636. void qemu_ram_free(ram_addr_t addr)
  2637. {
  2638. RAMBlock *block;
  2639. QLIST_FOREACH(block, &ram_list.blocks, next) {
  2640. if (addr == block->offset) {
  2641. QLIST_REMOVE(block, next);
  2642. if (block->flags & RAM_PREALLOC_MASK) {
  2643. ;
  2644. } else if (mem_path) {
  2645. #if defined (__linux__) && !defined(TARGET_S390X)
  2646. if (block->fd) {
  2647. munmap(block->host, block->length);
  2648. close(block->fd);
  2649. } else {
  2650. qemu_vfree(block->host);
  2651. }
  2652. #else
  2653. abort();
  2654. #endif
  2655. } else {
  2656. #if defined(TARGET_S390X) && defined(CONFIG_KVM)
  2657. munmap(block->host, block->length);
  2658. #else
  2659. if (xen_enabled()) {
  2660. xen_invalidate_map_cache_entry(block->host);
  2661. } else {
  2662. qemu_vfree(block->host);
  2663. }
  2664. #endif
  2665. }
  2666. qemu_free(block);
  2667. return;
  2668. }
  2669. }
  2670. }
  2671. #ifndef _WIN32
  2672. void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
  2673. {
  2674. RAMBlock *block;
  2675. ram_addr_t offset;
  2676. int flags;
  2677. void *area, *vaddr;
  2678. QLIST_FOREACH(block, &ram_list.blocks, next) {
  2679. offset = addr - block->offset;
  2680. if (offset < block->length) {
  2681. vaddr = block->host + offset;
  2682. if (block->flags & RAM_PREALLOC_MASK) {
  2683. ;
  2684. } else {
  2685. flags = MAP_FIXED;
  2686. munmap(vaddr, length);
  2687. if (mem_path) {
  2688. #if defined(__linux__) && !defined(TARGET_S390X)
  2689. if (block->fd) {
  2690. #ifdef MAP_POPULATE
  2691. flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
  2692. MAP_PRIVATE;
  2693. #else
  2694. flags |= MAP_PRIVATE;
  2695. #endif
  2696. area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
  2697. flags, block->fd, offset);
  2698. } else {
  2699. flags |= MAP_PRIVATE | MAP_ANONYMOUS;
  2700. area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
  2701. flags, -1, 0);
  2702. }
  2703. #else
  2704. abort();
  2705. #endif
  2706. } else {
  2707. #if defined(TARGET_S390X) && defined(CONFIG_KVM)
  2708. flags |= MAP_SHARED | MAP_ANONYMOUS;
  2709. area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
  2710. flags, -1, 0);
  2711. #else
  2712. flags |= MAP_PRIVATE | MAP_ANONYMOUS;
  2713. area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
  2714. flags, -1, 0);
  2715. #endif
  2716. }
  2717. if (area != vaddr) {
  2718. fprintf(stderr, "Could not remap addr: %lx@%lx\n",
  2719. length, addr);
  2720. exit(1);
  2721. }
  2722. qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
  2723. }
  2724. return;
  2725. }
  2726. }
  2727. }
  2728. #endif /* !_WIN32 */
  2729. /* Return a host pointer to ram allocated with qemu_ram_alloc.
  2730. With the exception of the softmmu code in this file, this should
  2731. only be used for local memory (e.g. video ram) that the device owns,
  2732. and knows it isn't going to access beyond the end of the block.
  2733. It should not be used for general purpose DMA.
  2734. Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
  2735. */
  2736. void *qemu_get_ram_ptr(ram_addr_t addr)
  2737. {
  2738. RAMBlock *block;
  2739. QLIST_FOREACH(block, &ram_list.blocks, next) {
  2740. if (addr - block->offset < block->length) {
  2741. /* Move this entry to to start of the list. */
  2742. if (block != QLIST_FIRST(&ram_list.blocks)) {
  2743. QLIST_REMOVE(block, next);
  2744. QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
  2745. }
  2746. if (xen_enabled()) {
  2747. /* We need to check if the requested address is in the RAM
  2748. * because we don't want to map the entire memory in QEMU.
  2749. * In that case just map until the end of the page.
  2750. */
  2751. if (block->offset == 0) {
  2752. return xen_map_cache(addr, 0, 0);
  2753. } else if (block->host == NULL) {
  2754. block->host =
  2755. xen_map_cache(block->offset, block->length, 1);
  2756. }
  2757. }
  2758. return block->host + (addr - block->offset);
  2759. }
  2760. }
  2761. fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
  2762. abort();
  2763. return NULL;
  2764. }
  2765. /* Return a host pointer to ram allocated with qemu_ram_alloc.
  2766. * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
  2767. */
  2768. void *qemu_safe_ram_ptr(ram_addr_t addr)
  2769. {
  2770. RAMBlock *block;
  2771. QLIST_FOREACH(block, &ram_list.blocks, next) {
  2772. if (addr - block->offset < block->length) {
  2773. if (xen_enabled()) {
  2774. /* We need to check if the requested address is in the RAM
  2775. * because we don't want to map the entire memory in QEMU.
  2776. * In that case just map until the end of the page.
  2777. */
  2778. if (block->offset == 0) {
  2779. return xen_map_cache(addr, 0, 0);
  2780. } else if (block->host == NULL) {
  2781. block->host =
  2782. xen_map_cache(block->offset, block->length, 1);
  2783. }
  2784. }
  2785. return block->host + (addr - block->offset);
  2786. }
  2787. }
  2788. fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
  2789. abort();
  2790. return NULL;
  2791. }
  2792. /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
  2793. * but takes a size argument */
  2794. void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
  2795. {
  2796. if (*size == 0) {
  2797. return NULL;
  2798. }
  2799. if (xen_enabled()) {
  2800. return xen_map_cache(addr, *size, 1);
  2801. } else {
  2802. RAMBlock *block;
  2803. QLIST_FOREACH(block, &ram_list.blocks, next) {
  2804. if (addr - block->offset < block->length) {
  2805. if (addr - block->offset + *size > block->length)
  2806. *size = block->length - addr + block->offset;
  2807. return block->host + (addr - block->offset);
  2808. }
  2809. }
  2810. fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
  2811. abort();
  2812. }
  2813. }
  2814. void qemu_put_ram_ptr(void *addr)
  2815. {
  2816. trace_qemu_put_ram_ptr(addr);
  2817. }
  2818. int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
  2819. {
  2820. RAMBlock *block;
  2821. uint8_t *host = ptr;
  2822. if (xen_enabled()) {
  2823. *ram_addr = xen_ram_addr_from_mapcache(ptr);
  2824. return 0;
  2825. }
  2826. QLIST_FOREACH(block, &ram_list.blocks, next) {
  2827. /* This case append when the block is not mapped. */
  2828. if (block->host == NULL) {
  2829. continue;
  2830. }
  2831. if (host - block->host < block->length) {
  2832. *ram_addr = block->offset + (host - block->host);
  2833. return 0;
  2834. }
  2835. }
  2836. return -1;
  2837. }
  2838. /* Some of the softmmu routines need to translate from a host pointer
  2839. (typically a TLB entry) back to a ram offset. */
  2840. ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
  2841. {
  2842. ram_addr_t ram_addr;
  2843. if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
  2844. fprintf(stderr, "Bad ram pointer %p\n", ptr);
  2845. abort();
  2846. }
  2847. return ram_addr;
  2848. }
  2849. static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
  2850. {
  2851. #ifdef DEBUG_UNASSIGNED
  2852. printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
  2853. #endif
  2854. #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
  2855. cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
  2856. #endif
  2857. return 0;
  2858. }
  2859. static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
  2860. {
  2861. #ifdef DEBUG_UNASSIGNED
  2862. printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
  2863. #endif
  2864. #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
  2865. cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
  2866. #endif
  2867. return 0;
  2868. }
  2869. static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
  2870. {
  2871. #ifdef DEBUG_UNASSIGNED
  2872. printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
  2873. #endif
  2874. #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
  2875. cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
  2876. #endif
  2877. return 0;
  2878. }
  2879. static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
  2880. {
  2881. #ifdef DEBUG_UNASSIGNED
  2882. printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
  2883. #endif
  2884. #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
  2885. cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
  2886. #endif
  2887. }
  2888. static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
  2889. {
  2890. #ifdef DEBUG_UNASSIGNED
  2891. printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
  2892. #endif
  2893. #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
  2894. cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
  2895. #endif
  2896. }
  2897. static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
  2898. {
  2899. #ifdef DEBUG_UNASSIGNED
  2900. printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
  2901. #endif
  2902. #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
  2903. cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
  2904. #endif
  2905. }
  2906. static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
  2907. unassigned_mem_readb,
  2908. unassigned_mem_readw,
  2909. unassigned_mem_readl,
  2910. };
  2911. static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
  2912. unassigned_mem_writeb,
  2913. unassigned_mem_writew,
  2914. unassigned_mem_writel,
  2915. };
  2916. static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
  2917. uint32_t val)
  2918. {
  2919. int dirty_flags;
  2920. dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
  2921. if (!(dirty_flags & CODE_DIRTY_FLAG)) {
  2922. #if !defined(CONFIG_USER_ONLY)
  2923. tb_invalidate_phys_page_fast(ram_addr, 1);
  2924. dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
  2925. #endif
  2926. }
  2927. stb_p(qemu_get_ram_ptr(ram_addr), val);
  2928. dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
  2929. cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
  2930. /* we remove the notdirty callback only if the code has been
  2931. flushed */
  2932. if (dirty_flags == 0xff)
  2933. tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
  2934. }
  2935. static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
  2936. uint32_t val)
  2937. {
  2938. int dirty_flags;
  2939. dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
  2940. if (!(dirty_flags & CODE_DIRTY_FLAG)) {
  2941. #if !defined(CONFIG_USER_ONLY)
  2942. tb_invalidate_phys_page_fast(ram_addr, 2);
  2943. dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
  2944. #endif
  2945. }
  2946. stw_p(qemu_get_ram_ptr(ram_addr), val);
  2947. dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
  2948. cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
  2949. /* we remove the notdirty callback only if the code has been
  2950. flushed */
  2951. if (dirty_flags == 0xff)
  2952. tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
  2953. }
  2954. static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
  2955. uint32_t val)
  2956. {
  2957. int dirty_flags;
  2958. dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
  2959. if (!(dirty_flags & CODE_DIRTY_FLAG)) {
  2960. #if !defined(CONFIG_USER_ONLY)
  2961. tb_invalidate_phys_page_fast(ram_addr, 4);
  2962. dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
  2963. #endif
  2964. }
  2965. stl_p(qemu_get_ram_ptr(ram_addr), val);
  2966. dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
  2967. cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
  2968. /* we remove the notdirty callback only if the code has been
  2969. flushed */
  2970. if (dirty_flags == 0xff)
  2971. tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
  2972. }
  2973. static CPUReadMemoryFunc * const error_mem_read[3] = {
  2974. NULL, /* never used */
  2975. NULL, /* never used */
  2976. NULL, /* never used */
  2977. };
  2978. static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
  2979. notdirty_mem_writeb,
  2980. notdirty_mem_writew,
  2981. notdirty_mem_writel,
  2982. };
  2983. /* Generate a debug exception if a watchpoint has been hit. */
  2984. static void check_watchpoint(int offset, int len_mask, int flags)
  2985. {
  2986. CPUState *env = cpu_single_env;
  2987. target_ulong pc, cs_base;
  2988. TranslationBlock *tb;
  2989. target_ulong vaddr;
  2990. CPUWatchpoint *wp;
  2991. int cpu_flags;
  2992. if (env->watchpoint_hit) {
  2993. /* We re-entered the check after replacing the TB. Now raise
  2994. * the debug interrupt so that is will trigger after the
  2995. * current instruction. */
  2996. cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
  2997. return;
  2998. }
  2999. vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
  3000. QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
  3001. if ((vaddr == (wp->vaddr & len_mask) ||
  3002. (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
  3003. wp->flags |= BP_WATCHPOINT_HIT;
  3004. if (!env->watchpoint_hit) {
  3005. env->watchpoint_hit = wp;
  3006. tb = tb_find_pc(env->mem_io_pc);
  3007. if (!tb) {
  3008. cpu_abort(env, "check_watchpoint: could not find TB for "
  3009. "pc=%p", (void *)env->mem_io_pc);
  3010. }
  3011. cpu_restore_state(tb, env, env->mem_io_pc);
  3012. tb_phys_invalidate(tb, -1);
  3013. if (wp->flags & BP_STOP_BEFORE_ACCESS) {
  3014. env->exception_index = EXCP_DEBUG;
  3015. } else {
  3016. cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
  3017. tb_gen_code(env, pc, cs_base, cpu_flags, 1);
  3018. }
  3019. cpu_resume_from_signal(env, NULL);
  3020. }
  3021. } else {
  3022. wp->flags &= ~BP_WATCHPOINT_HIT;
  3023. }
  3024. }
  3025. }
  3026. /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
  3027. so these check for a hit then pass through to the normal out-of-line
  3028. phys routines. */
  3029. static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
  3030. {
  3031. check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
  3032. return ldub_phys(addr);
  3033. }
  3034. static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
  3035. {
  3036. check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
  3037. return lduw_phys(addr);
  3038. }
  3039. static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
  3040. {
  3041. check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
  3042. return ldl_phys(addr);
  3043. }
  3044. static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
  3045. uint32_t val)
  3046. {
  3047. check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
  3048. stb_phys(addr, val);
  3049. }
  3050. static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
  3051. uint32_t val)
  3052. {
  3053. check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
  3054. stw_phys(addr, val);
  3055. }
  3056. static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
  3057. uint32_t val)
  3058. {
  3059. check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
  3060. stl_phys(addr, val);
  3061. }
  3062. static CPUReadMemoryFunc * const watch_mem_read[3] = {
  3063. watch_mem_readb,
  3064. watch_mem_readw,
  3065. watch_mem_readl,
  3066. };
  3067. static CPUWriteMemoryFunc * const watch_mem_write[3] = {
  3068. watch_mem_writeb,
  3069. watch_mem_writew,
  3070. watch_mem_writel,
  3071. };
  3072. static inline uint32_t subpage_readlen (subpage_t *mmio,
  3073. target_phys_addr_t addr,
  3074. unsigned int len)
  3075. {
  3076. unsigned int idx = SUBPAGE_IDX(addr);
  3077. #if defined(DEBUG_SUBPAGE)
  3078. printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
  3079. mmio, len, addr, idx);
  3080. #endif
  3081. addr += mmio->region_offset[idx];
  3082. idx = mmio->sub_io_index[idx];
  3083. return io_mem_read[idx][len](io_mem_opaque[idx], addr);
  3084. }
  3085. static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
  3086. uint32_t value, unsigned int len)
  3087. {
  3088. unsigned int idx = SUBPAGE_IDX(addr);
  3089. #if defined(DEBUG_SUBPAGE)
  3090. printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
  3091. __func__, mmio, len, addr, idx, value);
  3092. #endif
  3093. addr += mmio->region_offset[idx];
  3094. idx = mmio->sub_io_index[idx];
  3095. io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
  3096. }
  3097. static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
  3098. {
  3099. return subpage_readlen(opaque, addr, 0);
  3100. }
  3101. static void subpage_writeb (void *opaque, target_phys_addr_t addr,
  3102. uint32_t value)
  3103. {
  3104. subpage_writelen(opaque, addr, value, 0);
  3105. }
  3106. static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
  3107. {
  3108. return subpage_readlen(opaque, addr, 1);
  3109. }
  3110. static void subpage_writew (void *opaque, target_phys_addr_t addr,
  3111. uint32_t value)
  3112. {
  3113. subpage_writelen(opaque, addr, value, 1);
  3114. }
  3115. static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
  3116. {
  3117. return subpage_readlen(opaque, addr, 2);
  3118. }
  3119. static void subpage_writel (void *opaque, target_phys_addr_t addr,
  3120. uint32_t value)
  3121. {
  3122. subpage_writelen(opaque, addr, value, 2);
  3123. }
  3124. static CPUReadMemoryFunc * const subpage_read[] = {
  3125. &subpage_readb,
  3126. &subpage_readw,
  3127. &subpage_readl,
  3128. };
  3129. static CPUWriteMemoryFunc * const subpage_write[] = {
  3130. &subpage_writeb,
  3131. &subpage_writew,
  3132. &subpage_writel,
  3133. };
  3134. static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
  3135. ram_addr_t memory, ram_addr_t region_offset)
  3136. {
  3137. int idx, eidx;
  3138. if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
  3139. return -1;
  3140. idx = SUBPAGE_IDX(start);
  3141. eidx = SUBPAGE_IDX(end);
  3142. #if defined(DEBUG_SUBPAGE)
  3143. printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
  3144. mmio, start, end, idx, eidx, memory);
  3145. #endif
  3146. if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
  3147. memory = IO_MEM_UNASSIGNED;
  3148. memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
  3149. for (; idx <= eidx; idx++) {
  3150. mmio->sub_io_index[idx] = memory;
  3151. mmio->region_offset[idx] = region_offset;
  3152. }
  3153. return 0;
  3154. }
  3155. static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
  3156. ram_addr_t orig_memory,
  3157. ram_addr_t region_offset)
  3158. {
  3159. subpage_t *mmio;
  3160. int subpage_memory;
  3161. mmio = qemu_mallocz(sizeof(subpage_t));
  3162. mmio->base = base;
  3163. subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
  3164. DEVICE_NATIVE_ENDIAN);
  3165. #if defined(DEBUG_SUBPAGE)
  3166. printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
  3167. mmio, base, TARGET_PAGE_SIZE, subpage_memory);
  3168. #endif
  3169. *phys = subpage_memory | IO_MEM_SUBPAGE;
  3170. subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
  3171. return mmio;
  3172. }
  3173. static int get_free_io_mem_idx(void)
  3174. {
  3175. int i;
  3176. for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
  3177. if (!io_mem_used[i]) {
  3178. io_mem_used[i] = 1;
  3179. return i;
  3180. }
  3181. fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
  3182. return -1;
  3183. }
  3184. /*
  3185. * Usually, devices operate in little endian mode. There are devices out
  3186. * there that operate in big endian too. Each device gets byte swapped
  3187. * mmio if plugged onto a CPU that does the other endianness.
  3188. *
  3189. * CPU Device swap?
  3190. *
  3191. * little little no
  3192. * little big yes
  3193. * big little yes
  3194. * big big no
  3195. */
  3196. typedef struct SwapEndianContainer {
  3197. CPUReadMemoryFunc *read[3];
  3198. CPUWriteMemoryFunc *write[3];
  3199. void *opaque;
  3200. } SwapEndianContainer;
  3201. static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
  3202. {
  3203. uint32_t val;
  3204. SwapEndianContainer *c = opaque;
  3205. val = c->read[0](c->opaque, addr);
  3206. return val;
  3207. }
  3208. static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
  3209. {
  3210. uint32_t val;
  3211. SwapEndianContainer *c = opaque;
  3212. val = bswap16(c->read[1](c->opaque, addr));
  3213. return val;
  3214. }
  3215. static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
  3216. {
  3217. uint32_t val;
  3218. SwapEndianContainer *c = opaque;
  3219. val = bswap32(c->read[2](c->opaque, addr));
  3220. return val;
  3221. }
  3222. static CPUReadMemoryFunc * const swapendian_readfn[3]={
  3223. swapendian_mem_readb,
  3224. swapendian_mem_readw,
  3225. swapendian_mem_readl
  3226. };
  3227. static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
  3228. uint32_t val)
  3229. {
  3230. SwapEndianContainer *c = opaque;
  3231. c->write[0](c->opaque, addr, val);
  3232. }
  3233. static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
  3234. uint32_t val)
  3235. {
  3236. SwapEndianContainer *c = opaque;
  3237. c->write[1](c->opaque, addr, bswap16(val));
  3238. }
  3239. static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
  3240. uint32_t val)
  3241. {
  3242. SwapEndianContainer *c = opaque;
  3243. c->write[2](c->opaque, addr, bswap32(val));
  3244. }
  3245. static CPUWriteMemoryFunc * const swapendian_writefn[3]={
  3246. swapendian_mem_writeb,
  3247. swapendian_mem_writew,
  3248. swapendian_mem_writel
  3249. };
  3250. static void swapendian_init(int io_index)
  3251. {
  3252. SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer));
  3253. int i;
  3254. /* Swap mmio for big endian targets */
  3255. c->opaque = io_mem_opaque[io_index];
  3256. for (i = 0; i < 3; i++) {
  3257. c->read[i] = io_mem_read[io_index][i];
  3258. c->write[i] = io_mem_write[io_index][i];
  3259. io_mem_read[io_index][i] = swapendian_readfn[i];
  3260. io_mem_write[io_index][i] = swapendian_writefn[i];
  3261. }
  3262. io_mem_opaque[io_index] = c;
  3263. }
  3264. static void swapendian_del(int io_index)
  3265. {
  3266. if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
  3267. qemu_free(io_mem_opaque[io_index]);
  3268. }
  3269. }
  3270. /* mem_read and mem_write are arrays of functions containing the
  3271. function to access byte (index 0), word (index 1) and dword (index
  3272. 2). Functions can be omitted with a NULL function pointer.
  3273. If io_index is non zero, the corresponding io zone is
  3274. modified. If it is zero, a new io zone is allocated. The return
  3275. value can be used with cpu_register_physical_memory(). (-1) is
  3276. returned if error. */
  3277. static int cpu_register_io_memory_fixed(int io_index,
  3278. CPUReadMemoryFunc * const *mem_read,
  3279. CPUWriteMemoryFunc * const *mem_write,
  3280. void *opaque, enum device_endian endian)
  3281. {
  3282. int i;
  3283. if (io_index <= 0) {
  3284. io_index = get_free_io_mem_idx();
  3285. if (io_index == -1)
  3286. return io_index;
  3287. } else {
  3288. io_index >>= IO_MEM_SHIFT;
  3289. if (io_index >= IO_MEM_NB_ENTRIES)
  3290. return -1;
  3291. }
  3292. for (i = 0; i < 3; ++i) {
  3293. io_mem_read[io_index][i]
  3294. = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
  3295. }
  3296. for (i = 0; i < 3; ++i) {
  3297. io_mem_write[io_index][i]
  3298. = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
  3299. }
  3300. io_mem_opaque[io_index] = opaque;
  3301. switch (endian) {
  3302. case DEVICE_BIG_ENDIAN:
  3303. #ifndef TARGET_WORDS_BIGENDIAN
  3304. swapendian_init(io_index);
  3305. #endif
  3306. break;
  3307. case DEVICE_LITTLE_ENDIAN:
  3308. #ifdef TARGET_WORDS_BIGENDIAN
  3309. swapendian_init(io_index);
  3310. #endif
  3311. break;
  3312. case DEVICE_NATIVE_ENDIAN:
  3313. default:
  3314. break;
  3315. }
  3316. return (io_index << IO_MEM_SHIFT);
  3317. }
  3318. int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
  3319. CPUWriteMemoryFunc * const *mem_write,
  3320. void *opaque, enum device_endian endian)
  3321. {
  3322. return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
  3323. }
  3324. void cpu_unregister_io_memory(int io_table_address)
  3325. {
  3326. int i;
  3327. int io_index = io_table_address >> IO_MEM_SHIFT;
  3328. swapendian_del(io_index);
  3329. for (i=0;i < 3; i++) {
  3330. io_mem_read[io_index][i] = unassigned_mem_read[i];
  3331. io_mem_write[io_index][i] = unassigned_mem_write[i];
  3332. }
  3333. io_mem_opaque[io_index] = NULL;
  3334. io_mem_used[io_index] = 0;
  3335. }
  3336. static void io_mem_init(void)
  3337. {
  3338. int i;
  3339. cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
  3340. unassigned_mem_write, NULL,
  3341. DEVICE_NATIVE_ENDIAN);
  3342. cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
  3343. unassigned_mem_write, NULL,
  3344. DEVICE_NATIVE_ENDIAN);
  3345. cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
  3346. notdirty_mem_write, NULL,
  3347. DEVICE_NATIVE_ENDIAN);
  3348. for (i=0; i<5; i++)
  3349. io_mem_used[i] = 1;
  3350. io_mem_watch = cpu_register_io_memory(watch_mem_read,
  3351. watch_mem_write, NULL,
  3352. DEVICE_NATIVE_ENDIAN);
  3353. }
  3354. #endif /* !defined(CONFIG_USER_ONLY) */
  3355. /* physical memory access (slow version, mainly for debug) */
  3356. #if defined(CONFIG_USER_ONLY)
  3357. int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
  3358. uint8_t *buf, int len, int is_write)
  3359. {
  3360. int l, flags;
  3361. target_ulong page;
  3362. void * p;
  3363. while (len > 0) {
  3364. page = addr & TARGET_PAGE_MASK;
  3365. l = (page + TARGET_PAGE_SIZE) - addr;
  3366. if (l > len)
  3367. l = len;
  3368. flags = page_get_flags(page);
  3369. if (!(flags & PAGE_VALID))
  3370. return -1;
  3371. if (is_write) {
  3372. if (!(flags & PAGE_WRITE))
  3373. return -1;
  3374. /* XXX: this code should not depend on lock_user */
  3375. if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
  3376. return -1;
  3377. memcpy(p, buf, l);
  3378. unlock_user(p, addr, l);
  3379. } else {
  3380. if (!(flags & PAGE_READ))
  3381. return -1;
  3382. /* XXX: this code should not depend on lock_user */
  3383. if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
  3384. return -1;
  3385. memcpy(buf, p, l);
  3386. unlock_user(p, addr, 0);
  3387. }
  3388. len -= l;
  3389. buf += l;
  3390. addr += l;
  3391. }
  3392. return 0;
  3393. }
  3394. #else
  3395. void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
  3396. int len, int is_write)
  3397. {
  3398. int l, io_index;
  3399. uint8_t *ptr;
  3400. uint32_t val;
  3401. target_phys_addr_t page;
  3402. unsigned long pd;
  3403. PhysPageDesc *p;
  3404. while (len > 0) {
  3405. page = addr & TARGET_PAGE_MASK;
  3406. l = (page + TARGET_PAGE_SIZE) - addr;
  3407. if (l > len)
  3408. l = len;
  3409. p = phys_page_find(page >> TARGET_PAGE_BITS);
  3410. if (!p) {
  3411. pd = IO_MEM_UNASSIGNED;
  3412. } else {
  3413. pd = p->phys_offset;
  3414. }
  3415. if (is_write) {
  3416. if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
  3417. target_phys_addr_t addr1 = addr;
  3418. io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
  3419. if (p)
  3420. addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
  3421. /* XXX: could force cpu_single_env to NULL to avoid
  3422. potential bugs */
  3423. if (l >= 4 && ((addr1 & 3) == 0)) {
  3424. /* 32 bit write access */
  3425. val = ldl_p(buf);
  3426. io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
  3427. l = 4;
  3428. } else if (l >= 2 && ((addr1 & 1) == 0)) {
  3429. /* 16 bit write access */
  3430. val = lduw_p(buf);
  3431. io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
  3432. l = 2;
  3433. } else {
  3434. /* 8 bit write access */
  3435. val = ldub_p(buf);
  3436. io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
  3437. l = 1;
  3438. }
  3439. } else {
  3440. unsigned long addr1;
  3441. addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
  3442. /* RAM case */
  3443. ptr = qemu_get_ram_ptr(addr1);
  3444. memcpy(ptr, buf, l);
  3445. if (!cpu_physical_memory_is_dirty(addr1)) {
  3446. /* invalidate code */
  3447. tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
  3448. /* set dirty bit */
  3449. cpu_physical_memory_set_dirty_flags(
  3450. addr1, (0xff & ~CODE_DIRTY_FLAG));
  3451. }
  3452. qemu_put_ram_ptr(ptr);
  3453. }
  3454. } else {
  3455. if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
  3456. !(pd & IO_MEM_ROMD)) {
  3457. target_phys_addr_t addr1 = addr;
  3458. /* I/O case */
  3459. io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
  3460. if (p)
  3461. addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
  3462. if (l >= 4 && ((addr1 & 3) == 0)) {
  3463. /* 32 bit read access */
  3464. val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
  3465. stl_p(buf, val);
  3466. l = 4;
  3467. } else if (l >= 2 && ((addr1 & 1) == 0)) {
  3468. /* 16 bit read access */
  3469. val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
  3470. stw_p(buf, val);
  3471. l = 2;
  3472. } else {
  3473. /* 8 bit read access */
  3474. val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
  3475. stb_p(buf, val);
  3476. l = 1;
  3477. }
  3478. } else {
  3479. /* RAM case */
  3480. ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
  3481. memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
  3482. qemu_put_ram_ptr(ptr);
  3483. }
  3484. }
  3485. len -= l;
  3486. buf += l;
  3487. addr += l;
  3488. }
  3489. }
  3490. /* used for ROM loading : can write in RAM and ROM */
  3491. void cpu_physical_memory_write_rom(target_phys_addr_t addr,
  3492. const uint8_t *buf, int len)
  3493. {
  3494. int l;
  3495. uint8_t *ptr;
  3496. target_phys_addr_t page;
  3497. unsigned long pd;
  3498. PhysPageDesc *p;
  3499. while (len > 0) {
  3500. page = addr & TARGET_PAGE_MASK;
  3501. l = (page + TARGET_PAGE_SIZE) - addr;
  3502. if (l > len)
  3503. l = len;
  3504. p = phys_page_find(page >> TARGET_PAGE_BITS);
  3505. if (!p) {
  3506. pd = IO_MEM_UNASSIGNED;
  3507. } else {
  3508. pd = p->phys_offset;
  3509. }
  3510. if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
  3511. (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
  3512. !(pd & IO_MEM_ROMD)) {
  3513. /* do nothing */
  3514. } else {
  3515. unsigned long addr1;
  3516. addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
  3517. /* ROM/RAM case */
  3518. ptr = qemu_get_ram_ptr(addr1);
  3519. memcpy(ptr, buf, l);
  3520. qemu_put_ram_ptr(ptr);
  3521. }
  3522. len -= l;
  3523. buf += l;
  3524. addr += l;
  3525. }
  3526. }
  3527. typedef struct {
  3528. void *buffer;
  3529. target_phys_addr_t addr;
  3530. target_phys_addr_t len;
  3531. } BounceBuffer;
  3532. static BounceBuffer bounce;
  3533. typedef struct MapClient {
  3534. void *opaque;
  3535. void (*callback)(void *opaque);
  3536. QLIST_ENTRY(MapClient) link;
  3537. } MapClient;
  3538. static QLIST_HEAD(map_client_list, MapClient) map_client_list
  3539. = QLIST_HEAD_INITIALIZER(map_client_list);
  3540. void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
  3541. {
  3542. MapClient *client = qemu_malloc(sizeof(*client));
  3543. client->opaque = opaque;
  3544. client->callback = callback;
  3545. QLIST_INSERT_HEAD(&map_client_list, client, link);
  3546. return client;
  3547. }
  3548. void cpu_unregister_map_client(void *_client)
  3549. {
  3550. MapClient *client = (MapClient *)_client;
  3551. QLIST_REMOVE(client, link);
  3552. qemu_free(client);
  3553. }
  3554. static void cpu_notify_map_clients(void)
  3555. {
  3556. MapClient *client;
  3557. while (!QLIST_EMPTY(&map_client_list)) {
  3558. client = QLIST_FIRST(&map_client_list);
  3559. client->callback(client->opaque);
  3560. cpu_unregister_map_client(client);
  3561. }
  3562. }
  3563. /* Map a physical memory region into a host virtual address.
  3564. * May map a subset of the requested range, given by and returned in *plen.
  3565. * May return NULL if resources needed to perform the mapping are exhausted.
  3566. * Use only for reads OR writes - not for read-modify-write operations.
  3567. * Use cpu_register_map_client() to know when retrying the map operation is
  3568. * likely to succeed.
  3569. */
  3570. void *cpu_physical_memory_map(target_phys_addr_t addr,
  3571. target_phys_addr_t *plen,
  3572. int is_write)
  3573. {
  3574. target_phys_addr_t len = *plen;
  3575. target_phys_addr_t todo = 0;
  3576. int l;
  3577. target_phys_addr_t page;
  3578. unsigned long pd;
  3579. PhysPageDesc *p;
  3580. ram_addr_t raddr = ULONG_MAX;
  3581. ram_addr_t rlen;
  3582. void *ret;
  3583. while (len > 0) {
  3584. page = addr & TARGET_PAGE_MASK;
  3585. l = (page + TARGET_PAGE_SIZE) - addr;
  3586. if (l > len)
  3587. l = len;
  3588. p = phys_page_find(page >> TARGET_PAGE_BITS);
  3589. if (!p) {
  3590. pd = IO_MEM_UNASSIGNED;
  3591. } else {
  3592. pd = p->phys_offset;
  3593. }
  3594. if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
  3595. if (todo || bounce.buffer) {
  3596. break;
  3597. }
  3598. bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
  3599. bounce.addr = addr;
  3600. bounce.len = l;
  3601. if (!is_write) {
  3602. cpu_physical_memory_read(addr, bounce.buffer, l);
  3603. }
  3604. *plen = l;
  3605. return bounce.buffer;
  3606. }
  3607. if (!todo) {
  3608. raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
  3609. }
  3610. len -= l;
  3611. addr += l;
  3612. todo += l;
  3613. }
  3614. rlen = todo;
  3615. ret = qemu_ram_ptr_length(raddr, &rlen);
  3616. *plen = rlen;
  3617. return ret;
  3618. }
  3619. /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
  3620. * Will also mark the memory as dirty if is_write == 1. access_len gives
  3621. * the amount of memory that was actually read or written by the caller.
  3622. */
  3623. void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
  3624. int is_write, target_phys_addr_t access_len)
  3625. {
  3626. if (buffer != bounce.buffer) {
  3627. if (is_write) {
  3628. ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
  3629. while (access_len) {
  3630. unsigned l;
  3631. l = TARGET_PAGE_SIZE;
  3632. if (l > access_len)
  3633. l = access_len;
  3634. if (!cpu_physical_memory_is_dirty(addr1)) {
  3635. /* invalidate code */
  3636. tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
  3637. /* set dirty bit */
  3638. cpu_physical_memory_set_dirty_flags(
  3639. addr1, (0xff & ~CODE_DIRTY_FLAG));
  3640. }
  3641. addr1 += l;
  3642. access_len -= l;
  3643. }
  3644. }
  3645. if (xen_enabled()) {
  3646. xen_invalidate_map_cache_entry(buffer);
  3647. }
  3648. return;
  3649. }
  3650. if (is_write) {
  3651. cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
  3652. }
  3653. qemu_vfree(bounce.buffer);
  3654. bounce.buffer = NULL;
  3655. cpu_notify_map_clients();
  3656. }
  3657. /* warning: addr must be aligned */
  3658. static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
  3659. enum device_endian endian)
  3660. {
  3661. int io_index;
  3662. uint8_t *ptr;
  3663. uint32_t val;
  3664. unsigned long pd;
  3665. PhysPageDesc *p;
  3666. p = phys_page_find(addr >> TARGET_PAGE_BITS);
  3667. if (!p) {
  3668. pd = IO_MEM_UNASSIGNED;
  3669. } else {
  3670. pd = p->phys_offset;
  3671. }
  3672. if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
  3673. !(pd & IO_MEM_ROMD)) {
  3674. /* I/O case */
  3675. io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
  3676. if (p)
  3677. addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
  3678. val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
  3679. #if defined(TARGET_WORDS_BIGENDIAN)
  3680. if (endian == DEVICE_LITTLE_ENDIAN) {
  3681. val = bswap32(val);
  3682. }
  3683. #else
  3684. if (endian == DEVICE_BIG_ENDIAN) {
  3685. val = bswap32(val);
  3686. }
  3687. #endif
  3688. } else {
  3689. /* RAM case */
  3690. ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
  3691. (addr & ~TARGET_PAGE_MASK);
  3692. switch (endian) {
  3693. case DEVICE_LITTLE_ENDIAN:
  3694. val = ldl_le_p(ptr);
  3695. break;
  3696. case DEVICE_BIG_ENDIAN:
  3697. val = ldl_be_p(ptr);
  3698. break;
  3699. default:
  3700. val = ldl_p(ptr);
  3701. break;
  3702. }
  3703. }
  3704. return val;
  3705. }
  3706. uint32_t ldl_phys(target_phys_addr_t addr)
  3707. {
  3708. return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
  3709. }
  3710. uint32_t ldl_le_phys(target_phys_addr_t addr)
  3711. {
  3712. return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
  3713. }
  3714. uint32_t ldl_be_phys(target_phys_addr_t addr)
  3715. {
  3716. return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
  3717. }
  3718. /* warning: addr must be aligned */
  3719. static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
  3720. enum device_endian endian)
  3721. {
  3722. int io_index;
  3723. uint8_t *ptr;
  3724. uint64_t val;
  3725. unsigned long pd;
  3726. PhysPageDesc *p;
  3727. p = phys_page_find(addr >> TARGET_PAGE_BITS);
  3728. if (!p) {
  3729. pd = IO_MEM_UNASSIGNED;
  3730. } else {
  3731. pd = p->phys_offset;
  3732. }
  3733. if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
  3734. !(pd & IO_MEM_ROMD)) {
  3735. /* I/O case */
  3736. io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
  3737. if (p)
  3738. addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
  3739. /* XXX This is broken when device endian != cpu endian.
  3740. Fix and add "endian" variable check */
  3741. #ifdef TARGET_WORDS_BIGENDIAN
  3742. val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
  3743. val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
  3744. #else
  3745. val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
  3746. val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
  3747. #endif
  3748. } else {
  3749. /* RAM case */
  3750. ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
  3751. (addr & ~TARGET_PAGE_MASK);
  3752. switch (endian) {
  3753. case DEVICE_LITTLE_ENDIAN:
  3754. val = ldq_le_p(ptr);
  3755. break;
  3756. case DEVICE_BIG_ENDIAN:
  3757. val = ldq_be_p(ptr);
  3758. break;
  3759. default:
  3760. val = ldq_p(ptr);
  3761. break;
  3762. }
  3763. }
  3764. return val;
  3765. }
  3766. uint64_t ldq_phys(target_phys_addr_t addr)
  3767. {
  3768. return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
  3769. }
  3770. uint64_t ldq_le_phys(target_phys_addr_t addr)
  3771. {
  3772. return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
  3773. }
  3774. uint64_t ldq_be_phys(target_phys_addr_t addr)
  3775. {
  3776. return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
  3777. }
  3778. /* XXX: optimize */
  3779. uint32_t ldub_phys(target_phys_addr_t addr)
  3780. {
  3781. uint8_t val;
  3782. cpu_physical_memory_read(addr, &val, 1);
  3783. return val;
  3784. }
  3785. /* warning: addr must be aligned */
  3786. static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
  3787. enum device_endian endian)
  3788. {
  3789. int io_index;
  3790. uint8_t *ptr;
  3791. uint64_t val;
  3792. unsigned long pd;
  3793. PhysPageDesc *p;
  3794. p = phys_page_find(addr >> TARGET_PAGE_BITS);
  3795. if (!p) {
  3796. pd = IO_MEM_UNASSIGNED;
  3797. } else {
  3798. pd = p->phys_offset;
  3799. }
  3800. if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
  3801. !(pd & IO_MEM_ROMD)) {
  3802. /* I/O case */
  3803. io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
  3804. if (p)
  3805. addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
  3806. val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
  3807. #if defined(TARGET_WORDS_BIGENDIAN)
  3808. if (endian == DEVICE_LITTLE_ENDIAN) {
  3809. val = bswap16(val);
  3810. }
  3811. #else
  3812. if (endian == DEVICE_BIG_ENDIAN) {
  3813. val = bswap16(val);
  3814. }
  3815. #endif
  3816. } else {
  3817. /* RAM case */
  3818. ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
  3819. (addr & ~TARGET_PAGE_MASK);
  3820. switch (endian) {
  3821. case DEVICE_LITTLE_ENDIAN:
  3822. val = lduw_le_p(ptr);
  3823. break;
  3824. case DEVICE_BIG_ENDIAN:
  3825. val = lduw_be_p(ptr);
  3826. break;
  3827. default:
  3828. val = lduw_p(ptr);
  3829. break;
  3830. }
  3831. }
  3832. return val;
  3833. }
  3834. uint32_t lduw_phys(target_phys_addr_t addr)
  3835. {
  3836. return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
  3837. }
  3838. uint32_t lduw_le_phys(target_phys_addr_t addr)
  3839. {
  3840. return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
  3841. }
  3842. uint32_t lduw_be_phys(target_phys_addr_t addr)
  3843. {
  3844. return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
  3845. }
  3846. /* warning: addr must be aligned. The ram page is not masked as dirty
  3847. and the code inside is not invalidated. It is useful if the dirty
  3848. bits are used to track modified PTEs */
  3849. void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
  3850. {
  3851. int io_index;
  3852. uint8_t *ptr;
  3853. unsigned long pd;
  3854. PhysPageDesc *p;
  3855. p = phys_page_find(addr >> TARGET_PAGE_BITS);
  3856. if (!p) {
  3857. pd = IO_MEM_UNASSIGNED;
  3858. } else {
  3859. pd = p->phys_offset;
  3860. }
  3861. if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
  3862. io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
  3863. if (p)
  3864. addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
  3865. io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
  3866. } else {
  3867. unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
  3868. ptr = qemu_get_ram_ptr(addr1);
  3869. stl_p(ptr, val);
  3870. if (unlikely(in_migration)) {
  3871. if (!cpu_physical_memory_is_dirty(addr1)) {
  3872. /* invalidate code */
  3873. tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
  3874. /* set dirty bit */
  3875. cpu_physical_memory_set_dirty_flags(
  3876. addr1, (0xff & ~CODE_DIRTY_FLAG));
  3877. }
  3878. }
  3879. }
  3880. }
  3881. void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
  3882. {
  3883. int io_index;
  3884. uint8_t *ptr;
  3885. unsigned long pd;
  3886. PhysPageDesc *p;
  3887. p = phys_page_find(addr >> TARGET_PAGE_BITS);
  3888. if (!p) {
  3889. pd = IO_MEM_UNASSIGNED;
  3890. } else {
  3891. pd = p->phys_offset;
  3892. }
  3893. if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
  3894. io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
  3895. if (p)
  3896. addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
  3897. #ifdef TARGET_WORDS_BIGENDIAN
  3898. io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
  3899. io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
  3900. #else
  3901. io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
  3902. io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
  3903. #endif
  3904. } else {
  3905. ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
  3906. (addr & ~TARGET_PAGE_MASK);
  3907. stq_p(ptr, val);
  3908. }
  3909. }
  3910. /* warning: addr must be aligned */
  3911. static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
  3912. enum device_endian endian)
  3913. {
  3914. int io_index;
  3915. uint8_t *ptr;
  3916. unsigned long pd;
  3917. PhysPageDesc *p;
  3918. p = phys_page_find(addr >> TARGET_PAGE_BITS);
  3919. if (!p) {
  3920. pd = IO_MEM_UNASSIGNED;
  3921. } else {
  3922. pd = p->phys_offset;
  3923. }
  3924. if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
  3925. io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
  3926. if (p)
  3927. addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
  3928. #if defined(TARGET_WORDS_BIGENDIAN)
  3929. if (endian == DEVICE_LITTLE_ENDIAN) {
  3930. val = bswap32(val);
  3931. }
  3932. #else
  3933. if (endian == DEVICE_BIG_ENDIAN) {
  3934. val = bswap32(val);
  3935. }
  3936. #endif
  3937. io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
  3938. } else {
  3939. unsigned long addr1;
  3940. addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
  3941. /* RAM case */
  3942. ptr = qemu_get_ram_ptr(addr1);
  3943. switch (endian) {
  3944. case DEVICE_LITTLE_ENDIAN:
  3945. stl_le_p(ptr, val);
  3946. break;
  3947. case DEVICE_BIG_ENDIAN:
  3948. stl_be_p(ptr, val);
  3949. break;
  3950. default:
  3951. stl_p(ptr, val);
  3952. break;
  3953. }
  3954. if (!cpu_physical_memory_is_dirty(addr1)) {
  3955. /* invalidate code */
  3956. tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
  3957. /* set dirty bit */
  3958. cpu_physical_memory_set_dirty_flags(addr1,
  3959. (0xff & ~CODE_DIRTY_FLAG));
  3960. }
  3961. }
  3962. }
  3963. void stl_phys(target_phys_addr_t addr, uint32_t val)
  3964. {
  3965. stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
  3966. }
  3967. void stl_le_phys(target_phys_addr_t addr, uint32_t val)
  3968. {
  3969. stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
  3970. }
  3971. void stl_be_phys(target_phys_addr_t addr, uint32_t val)
  3972. {
  3973. stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
  3974. }
  3975. /* XXX: optimize */
  3976. void stb_phys(target_phys_addr_t addr, uint32_t val)
  3977. {
  3978. uint8_t v = val;
  3979. cpu_physical_memory_write(addr, &v, 1);
  3980. }
  3981. /* warning: addr must be aligned */
  3982. static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
  3983. enum device_endian endian)
  3984. {
  3985. int io_index;
  3986. uint8_t *ptr;
  3987. unsigned long pd;
  3988. PhysPageDesc *p;
  3989. p = phys_page_find(addr >> TARGET_PAGE_BITS);
  3990. if (!p) {
  3991. pd = IO_MEM_UNASSIGNED;
  3992. } else {
  3993. pd = p->phys_offset;
  3994. }
  3995. if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
  3996. io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
  3997. if (p)
  3998. addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
  3999. #if defined(TARGET_WORDS_BIGENDIAN)
  4000. if (endian == DEVICE_LITTLE_ENDIAN) {
  4001. val = bswap16(val);
  4002. }
  4003. #else
  4004. if (endian == DEVICE_BIG_ENDIAN) {
  4005. val = bswap16(val);
  4006. }
  4007. #endif
  4008. io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
  4009. } else {
  4010. unsigned long addr1;
  4011. addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
  4012. /* RAM case */
  4013. ptr = qemu_get_ram_ptr(addr1);
  4014. switch (endian) {
  4015. case DEVICE_LITTLE_ENDIAN:
  4016. stw_le_p(ptr, val);
  4017. break;
  4018. case DEVICE_BIG_ENDIAN:
  4019. stw_be_p(ptr, val);
  4020. break;
  4021. default:
  4022. stw_p(ptr, val);
  4023. break;
  4024. }
  4025. if (!cpu_physical_memory_is_dirty(addr1)) {
  4026. /* invalidate code */
  4027. tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
  4028. /* set dirty bit */
  4029. cpu_physical_memory_set_dirty_flags(addr1,
  4030. (0xff & ~CODE_DIRTY_FLAG));
  4031. }
  4032. }
  4033. }
  4034. void stw_phys(target_phys_addr_t addr, uint32_t val)
  4035. {
  4036. stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
  4037. }
  4038. void stw_le_phys(target_phys_addr_t addr, uint32_t val)
  4039. {
  4040. stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
  4041. }
  4042. void stw_be_phys(target_phys_addr_t addr, uint32_t val)
  4043. {
  4044. stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
  4045. }
  4046. /* XXX: optimize */
  4047. void stq_phys(target_phys_addr_t addr, uint64_t val)
  4048. {
  4049. val = tswap64(val);
  4050. cpu_physical_memory_write(addr, &val, 8);
  4051. }
  4052. void stq_le_phys(target_phys_addr_t addr, uint64_t val)
  4053. {
  4054. val = cpu_to_le64(val);
  4055. cpu_physical_memory_write(addr, &val, 8);
  4056. }
  4057. void stq_be_phys(target_phys_addr_t addr, uint64_t val)
  4058. {
  4059. val = cpu_to_be64(val);
  4060. cpu_physical_memory_write(addr, &val, 8);
  4061. }
  4062. /* virtual memory access for debug (includes writing to ROM) */
  4063. int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
  4064. uint8_t *buf, int len, int is_write)
  4065. {
  4066. int l;
  4067. target_phys_addr_t phys_addr;
  4068. target_ulong page;
  4069. while (len > 0) {
  4070. page = addr & TARGET_PAGE_MASK;
  4071. phys_addr = cpu_get_phys_page_debug(env, page);
  4072. /* if no physical page mapped, return an error */
  4073. if (phys_addr == -1)
  4074. return -1;
  4075. l = (page + TARGET_PAGE_SIZE) - addr;
  4076. if (l > len)
  4077. l = len;
  4078. phys_addr += (addr & ~TARGET_PAGE_MASK);
  4079. if (is_write)
  4080. cpu_physical_memory_write_rom(phys_addr, buf, l);
  4081. else
  4082. cpu_physical_memory_rw(phys_addr, buf, l, is_write);
  4083. len -= l;
  4084. buf += l;
  4085. addr += l;
  4086. }
  4087. return 0;
  4088. }
  4089. #endif
  4090. /* in deterministic execution mode, instructions doing device I/Os
  4091. must be at the end of the TB */
  4092. void cpu_io_recompile(CPUState *env, void *retaddr)
  4093. {
  4094. TranslationBlock *tb;
  4095. uint32_t n, cflags;
  4096. target_ulong pc, cs_base;
  4097. uint64_t flags;
  4098. tb = tb_find_pc((unsigned long)retaddr);
  4099. if (!tb) {
  4100. cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
  4101. retaddr);
  4102. }
  4103. n = env->icount_decr.u16.low + tb->icount;
  4104. cpu_restore_state(tb, env, (unsigned long)retaddr);
  4105. /* Calculate how many instructions had been executed before the fault
  4106. occurred. */
  4107. n = n - env->icount_decr.u16.low;
  4108. /* Generate a new TB ending on the I/O insn. */
  4109. n++;
  4110. /* On MIPS and SH, delay slot instructions can only be restarted if
  4111. they were already the first instruction in the TB. If this is not
  4112. the first instruction in a TB then re-execute the preceding
  4113. branch. */
  4114. #if defined(TARGET_MIPS)
  4115. if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
  4116. env->active_tc.PC -= 4;
  4117. env->icount_decr.u16.low++;
  4118. env->hflags &= ~MIPS_HFLAG_BMASK;
  4119. }
  4120. #elif defined(TARGET_SH4)
  4121. if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
  4122. && n > 1) {
  4123. env->pc -= 2;
  4124. env->icount_decr.u16.low++;
  4125. env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
  4126. }
  4127. #endif
  4128. /* This should never happen. */
  4129. if (n > CF_COUNT_MASK)
  4130. cpu_abort(env, "TB too big during recompile");
  4131. cflags = n | CF_LAST_IO;
  4132. pc = tb->pc;
  4133. cs_base = tb->cs_base;
  4134. flags = tb->flags;
  4135. tb_phys_invalidate(tb, -1);
  4136. /* FIXME: In theory this could raise an exception. In practice
  4137. we have already translated the block once so it's probably ok. */
  4138. tb_gen_code(env, pc, cs_base, flags, cflags);
  4139. /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
  4140. the first in the TB) then we end up generating a whole new TB and
  4141. repeating the fault, which is horribly inefficient.
  4142. Better would be to execute just this insn uncached, or generate a
  4143. second new TB. */
  4144. cpu_resume_from_signal(env, NULL);
  4145. }
  4146. #if !defined(CONFIG_USER_ONLY)
  4147. void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
  4148. {
  4149. int i, target_code_size, max_target_code_size;
  4150. int direct_jmp_count, direct_jmp2_count, cross_page;
  4151. TranslationBlock *tb;
  4152. target_code_size = 0;
  4153. max_target_code_size = 0;
  4154. cross_page = 0;
  4155. direct_jmp_count = 0;
  4156. direct_jmp2_count = 0;
  4157. for(i = 0; i < nb_tbs; i++) {
  4158. tb = &tbs[i];
  4159. target_code_size += tb->size;
  4160. if (tb->size > max_target_code_size)
  4161. max_target_code_size = tb->size;
  4162. if (tb->page_addr[1] != -1)
  4163. cross_page++;
  4164. if (tb->tb_next_offset[0] != 0xffff) {
  4165. direct_jmp_count++;
  4166. if (tb->tb_next_offset[1] != 0xffff) {
  4167. direct_jmp2_count++;
  4168. }
  4169. }
  4170. }
  4171. /* XXX: avoid using doubles ? */
  4172. cpu_fprintf(f, "Translation buffer state:\n");
  4173. cpu_fprintf(f, "gen code size %td/%ld\n",
  4174. code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
  4175. cpu_fprintf(f, "TB count %d/%d\n",
  4176. nb_tbs, code_gen_max_blocks);
  4177. cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
  4178. nb_tbs ? target_code_size / nb_tbs : 0,
  4179. max_target_code_size);
  4180. cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
  4181. nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
  4182. target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
  4183. cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
  4184. cross_page,
  4185. nb_tbs ? (cross_page * 100) / nb_tbs : 0);
  4186. cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
  4187. direct_jmp_count,
  4188. nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
  4189. direct_jmp2_count,
  4190. nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
  4191. cpu_fprintf(f, "\nStatistics:\n");
  4192. cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
  4193. cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
  4194. cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
  4195. tcg_dump_info(f, cpu_fprintf);
  4196. }
  4197. #define MMUSUFFIX _cmmu
  4198. #define GETPC() NULL
  4199. #define env cpu_single_env
  4200. #define SOFTMMU_CODE_ACCESS
  4201. #define SHIFT 0
  4202. #include "softmmu_template.h"
  4203. #define SHIFT 1
  4204. #include "softmmu_template.h"
  4205. #define SHIFT 2
  4206. #include "softmmu_template.h"
  4207. #define SHIFT 3
  4208. #include "softmmu_template.h"
  4209. #undef env
  4210. #endif