translate.c 404 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214112151121611217112181121911220112211122211223112241122511226112271122811229112301123111232112331123411235112361123711238112391124011241112421124311244112451124611247112481124911250112511125211253112541125511256112571125811259112601126111262112631126411265112661126711268112691127011271112721127311274112751127611277112781127911280112811128211283112841128511286112871128811289112901129111292112931129411295112961129711298112991130011301113021130311304113051130611307113081130911310113111131211313113141131511316113171131811319113201132111322113231132411325113261132711328113291133011331113321133311334113351133611337113381133911340113411134211343113441134511346113471134811349113501135111352113531135411355113561135711358113591136011361113621136311364113651136611367113681136911370113711137211373113741137511376113771137811379113801138111382113831138411385113861138711388113891139011391113921139311394113951139611397113981139911400114011140211403114041140511406114071140811409114101141111412114131141411415114161141711418114191142011421114221142311424114251142611427114281142911430114311143211433114341143511436114371143811439114401144111442114431144411445114461144711448114491145011451114521145311454114551145611457114581145911460114611146211463114641146511466114671146811469114701147111472114731147411475114761147711478114791148011481114821148311484114851148611487114881148911490114911149211493114941149511496114971149811499115001150111502115031150411505115061150711508115091151011511115121151311514115151151611517115181151911520115211152211523115241152511526115271152811529115301153111532115331153411535115361153711538115391154011541115421154311544115451154611547115481154911550115511155211553115541155511556115571155811559115601156111562115631156411565115661156711568115691157011571115721157311574115751157611577115781157911580115811158211583115841158511586115871158811589
  1. /*
  2. * ARM translation
  3. *
  4. * Copyright (c) 2003 Fabrice Bellard
  5. * Copyright (c) 2005-2007 CodeSourcery
  6. * Copyright (c) 2007 OpenedHand, Ltd.
  7. *
  8. * This library is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2 of the License, or (at your option) any later version.
  12. *
  13. * This library is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  20. */
  21. #include <stdarg.h>
  22. #include <stdlib.h>
  23. #include <stdio.h>
  24. #include <string.h>
  25. #include <inttypes.h>
  26. #include "cpu.h"
  27. #include "internals.h"
  28. #include "disas/disas.h"
  29. #include "tcg-op.h"
  30. #include "qemu/log.h"
  31. #include "qemu/bitops.h"
  32. #include "arm_ldst.h"
  33. #include "exec/helper-proto.h"
  34. #include "exec/helper-gen.h"
  35. #include "trace-tcg.h"
  36. #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
  37. #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
  38. /* currently all emulated v5 cores are also v5TE, so don't bother */
  39. #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
  40. #define ENABLE_ARCH_5J 0
  41. #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
  42. #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
  43. #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
  44. #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
  45. #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
  46. #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
  47. #include "translate.h"
  48. #if defined(CONFIG_USER_ONLY)
  49. #define IS_USER(s) 1
  50. #else
  51. #define IS_USER(s) (s->user)
  52. #endif
  53. TCGv_ptr cpu_env;
  54. /* We reuse the same 64-bit temporaries for efficiency. */
  55. static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
  56. static TCGv_i32 cpu_R[16];
  57. TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
  58. TCGv_i64 cpu_exclusive_addr;
  59. TCGv_i64 cpu_exclusive_val;
  60. #ifdef CONFIG_USER_ONLY
  61. TCGv_i64 cpu_exclusive_test;
  62. TCGv_i32 cpu_exclusive_info;
  63. #endif
  64. /* FIXME: These should be removed. */
  65. static TCGv_i32 cpu_F0s, cpu_F1s;
  66. static TCGv_i64 cpu_F0d, cpu_F1d;
  67. #include "exec/gen-icount.h"
  68. static const char *regnames[] =
  69. { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
  70. "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
  71. /* initialize TCG globals. */
  72. void arm_translate_init(void)
  73. {
  74. int i;
  75. cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
  76. for (i = 0; i < 16; i++) {
  77. cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
  78. offsetof(CPUARMState, regs[i]),
  79. regnames[i]);
  80. }
  81. cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
  82. cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
  83. cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
  84. cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
  85. cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
  86. offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
  87. cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
  88. offsetof(CPUARMState, exclusive_val), "exclusive_val");
  89. #ifdef CONFIG_USER_ONLY
  90. cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
  91. offsetof(CPUARMState, exclusive_test), "exclusive_test");
  92. cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
  93. offsetof(CPUARMState, exclusive_info), "exclusive_info");
  94. #endif
  95. a64_translate_init();
  96. }
  97. static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
  98. {
  99. /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
  100. * insns:
  101. * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
  102. * otherwise, access as if at PL0.
  103. */
  104. switch (s->mmu_idx) {
  105. case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
  106. case ARMMMUIdx_S12NSE0:
  107. case ARMMMUIdx_S12NSE1:
  108. return ARMMMUIdx_S12NSE0;
  109. case ARMMMUIdx_S1E3:
  110. case ARMMMUIdx_S1SE0:
  111. case ARMMMUIdx_S1SE1:
  112. return ARMMMUIdx_S1SE0;
  113. case ARMMMUIdx_S2NS:
  114. default:
  115. g_assert_not_reached();
  116. }
  117. }
  118. static inline TCGv_i32 load_cpu_offset(int offset)
  119. {
  120. TCGv_i32 tmp = tcg_temp_new_i32();
  121. tcg_gen_ld_i32(tmp, cpu_env, offset);
  122. return tmp;
  123. }
  124. #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
  125. static inline void store_cpu_offset(TCGv_i32 var, int offset)
  126. {
  127. tcg_gen_st_i32(var, cpu_env, offset);
  128. tcg_temp_free_i32(var);
  129. }
  130. #define store_cpu_field(var, name) \
  131. store_cpu_offset(var, offsetof(CPUARMState, name))
  132. /* Set a variable to the value of a CPU register. */
  133. static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
  134. {
  135. if (reg == 15) {
  136. uint32_t addr;
  137. /* normally, since we updated PC, we need only to add one insn */
  138. if (s->thumb)
  139. addr = (long)s->pc + 2;
  140. else
  141. addr = (long)s->pc + 4;
  142. tcg_gen_movi_i32(var, addr);
  143. } else {
  144. tcg_gen_mov_i32(var, cpu_R[reg]);
  145. }
  146. }
  147. /* Create a new temporary and set it to the value of a CPU register. */
  148. static inline TCGv_i32 load_reg(DisasContext *s, int reg)
  149. {
  150. TCGv_i32 tmp = tcg_temp_new_i32();
  151. load_reg_var(s, tmp, reg);
  152. return tmp;
  153. }
  154. /* Set a CPU register. The source must be a temporary and will be
  155. marked as dead. */
  156. static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
  157. {
  158. if (reg == 15) {
  159. tcg_gen_andi_i32(var, var, ~1);
  160. s->is_jmp = DISAS_JUMP;
  161. }
  162. tcg_gen_mov_i32(cpu_R[reg], var);
  163. tcg_temp_free_i32(var);
  164. }
  165. /* Value extensions. */
  166. #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
  167. #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
  168. #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
  169. #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
  170. #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
  171. #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
  172. static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
  173. {
  174. TCGv_i32 tmp_mask = tcg_const_i32(mask);
  175. gen_helper_cpsr_write(cpu_env, var, tmp_mask);
  176. tcg_temp_free_i32(tmp_mask);
  177. }
  178. /* Set NZCV flags from the high 4 bits of var. */
  179. #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
  180. static void gen_exception_internal(int excp)
  181. {
  182. TCGv_i32 tcg_excp = tcg_const_i32(excp);
  183. assert(excp_is_internal(excp));
  184. gen_helper_exception_internal(cpu_env, tcg_excp);
  185. tcg_temp_free_i32(tcg_excp);
  186. }
  187. static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
  188. {
  189. TCGv_i32 tcg_excp = tcg_const_i32(excp);
  190. TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
  191. TCGv_i32 tcg_el = tcg_const_i32(target_el);
  192. gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
  193. tcg_syn, tcg_el);
  194. tcg_temp_free_i32(tcg_el);
  195. tcg_temp_free_i32(tcg_syn);
  196. tcg_temp_free_i32(tcg_excp);
  197. }
  198. static void gen_ss_advance(DisasContext *s)
  199. {
  200. /* If the singlestep state is Active-not-pending, advance to
  201. * Active-pending.
  202. */
  203. if (s->ss_active) {
  204. s->pstate_ss = 0;
  205. gen_helper_clear_pstate_ss(cpu_env);
  206. }
  207. }
  208. static void gen_step_complete_exception(DisasContext *s)
  209. {
  210. /* We just completed step of an insn. Move from Active-not-pending
  211. * to Active-pending, and then also take the swstep exception.
  212. * This corresponds to making the (IMPDEF) choice to prioritize
  213. * swstep exceptions over asynchronous exceptions taken to an exception
  214. * level where debug is disabled. This choice has the advantage that
  215. * we do not need to maintain internal state corresponding to the
  216. * ISV/EX syndrome bits between completion of the step and generation
  217. * of the exception, and our syndrome information is always correct.
  218. */
  219. gen_ss_advance(s);
  220. gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
  221. default_exception_el(s));
  222. s->is_jmp = DISAS_EXC;
  223. }
  224. static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
  225. {
  226. TCGv_i32 tmp1 = tcg_temp_new_i32();
  227. TCGv_i32 tmp2 = tcg_temp_new_i32();
  228. tcg_gen_ext16s_i32(tmp1, a);
  229. tcg_gen_ext16s_i32(tmp2, b);
  230. tcg_gen_mul_i32(tmp1, tmp1, tmp2);
  231. tcg_temp_free_i32(tmp2);
  232. tcg_gen_sari_i32(a, a, 16);
  233. tcg_gen_sari_i32(b, b, 16);
  234. tcg_gen_mul_i32(b, b, a);
  235. tcg_gen_mov_i32(a, tmp1);
  236. tcg_temp_free_i32(tmp1);
  237. }
  238. /* Byteswap each halfword. */
  239. static void gen_rev16(TCGv_i32 var)
  240. {
  241. TCGv_i32 tmp = tcg_temp_new_i32();
  242. tcg_gen_shri_i32(tmp, var, 8);
  243. tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
  244. tcg_gen_shli_i32(var, var, 8);
  245. tcg_gen_andi_i32(var, var, 0xff00ff00);
  246. tcg_gen_or_i32(var, var, tmp);
  247. tcg_temp_free_i32(tmp);
  248. }
  249. /* Byteswap low halfword and sign extend. */
  250. static void gen_revsh(TCGv_i32 var)
  251. {
  252. tcg_gen_ext16u_i32(var, var);
  253. tcg_gen_bswap16_i32(var, var);
  254. tcg_gen_ext16s_i32(var, var);
  255. }
  256. /* Unsigned bitfield extract. */
  257. static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
  258. {
  259. if (shift)
  260. tcg_gen_shri_i32(var, var, shift);
  261. tcg_gen_andi_i32(var, var, mask);
  262. }
  263. /* Signed bitfield extract. */
  264. static void gen_sbfx(TCGv_i32 var, int shift, int width)
  265. {
  266. uint32_t signbit;
  267. if (shift)
  268. tcg_gen_sari_i32(var, var, shift);
  269. if (shift + width < 32) {
  270. signbit = 1u << (width - 1);
  271. tcg_gen_andi_i32(var, var, (1u << width) - 1);
  272. tcg_gen_xori_i32(var, var, signbit);
  273. tcg_gen_subi_i32(var, var, signbit);
  274. }
  275. }
  276. /* Return (b << 32) + a. Mark inputs as dead */
  277. static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
  278. {
  279. TCGv_i64 tmp64 = tcg_temp_new_i64();
  280. tcg_gen_extu_i32_i64(tmp64, b);
  281. tcg_temp_free_i32(b);
  282. tcg_gen_shli_i64(tmp64, tmp64, 32);
  283. tcg_gen_add_i64(a, tmp64, a);
  284. tcg_temp_free_i64(tmp64);
  285. return a;
  286. }
  287. /* Return (b << 32) - a. Mark inputs as dead. */
  288. static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
  289. {
  290. TCGv_i64 tmp64 = tcg_temp_new_i64();
  291. tcg_gen_extu_i32_i64(tmp64, b);
  292. tcg_temp_free_i32(b);
  293. tcg_gen_shli_i64(tmp64, tmp64, 32);
  294. tcg_gen_sub_i64(a, tmp64, a);
  295. tcg_temp_free_i64(tmp64);
  296. return a;
  297. }
  298. /* 32x32->64 multiply. Marks inputs as dead. */
  299. static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
  300. {
  301. TCGv_i32 lo = tcg_temp_new_i32();
  302. TCGv_i32 hi = tcg_temp_new_i32();
  303. TCGv_i64 ret;
  304. tcg_gen_mulu2_i32(lo, hi, a, b);
  305. tcg_temp_free_i32(a);
  306. tcg_temp_free_i32(b);
  307. ret = tcg_temp_new_i64();
  308. tcg_gen_concat_i32_i64(ret, lo, hi);
  309. tcg_temp_free_i32(lo);
  310. tcg_temp_free_i32(hi);
  311. return ret;
  312. }
  313. static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
  314. {
  315. TCGv_i32 lo = tcg_temp_new_i32();
  316. TCGv_i32 hi = tcg_temp_new_i32();
  317. TCGv_i64 ret;
  318. tcg_gen_muls2_i32(lo, hi, a, b);
  319. tcg_temp_free_i32(a);
  320. tcg_temp_free_i32(b);
  321. ret = tcg_temp_new_i64();
  322. tcg_gen_concat_i32_i64(ret, lo, hi);
  323. tcg_temp_free_i32(lo);
  324. tcg_temp_free_i32(hi);
  325. return ret;
  326. }
  327. /* Swap low and high halfwords. */
  328. static void gen_swap_half(TCGv_i32 var)
  329. {
  330. TCGv_i32 tmp = tcg_temp_new_i32();
  331. tcg_gen_shri_i32(tmp, var, 16);
  332. tcg_gen_shli_i32(var, var, 16);
  333. tcg_gen_or_i32(var, var, tmp);
  334. tcg_temp_free_i32(tmp);
  335. }
  336. /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
  337. tmp = (t0 ^ t1) & 0x8000;
  338. t0 &= ~0x8000;
  339. t1 &= ~0x8000;
  340. t0 = (t0 + t1) ^ tmp;
  341. */
  342. static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
  343. {
  344. TCGv_i32 tmp = tcg_temp_new_i32();
  345. tcg_gen_xor_i32(tmp, t0, t1);
  346. tcg_gen_andi_i32(tmp, tmp, 0x8000);
  347. tcg_gen_andi_i32(t0, t0, ~0x8000);
  348. tcg_gen_andi_i32(t1, t1, ~0x8000);
  349. tcg_gen_add_i32(t0, t0, t1);
  350. tcg_gen_xor_i32(t0, t0, tmp);
  351. tcg_temp_free_i32(tmp);
  352. tcg_temp_free_i32(t1);
  353. }
  354. /* Set CF to the top bit of var. */
  355. static void gen_set_CF_bit31(TCGv_i32 var)
  356. {
  357. tcg_gen_shri_i32(cpu_CF, var, 31);
  358. }
  359. /* Set N and Z flags from var. */
  360. static inline void gen_logic_CC(TCGv_i32 var)
  361. {
  362. tcg_gen_mov_i32(cpu_NF, var);
  363. tcg_gen_mov_i32(cpu_ZF, var);
  364. }
  365. /* T0 += T1 + CF. */
  366. static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
  367. {
  368. tcg_gen_add_i32(t0, t0, t1);
  369. tcg_gen_add_i32(t0, t0, cpu_CF);
  370. }
  371. /* dest = T0 + T1 + CF. */
  372. static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
  373. {
  374. tcg_gen_add_i32(dest, t0, t1);
  375. tcg_gen_add_i32(dest, dest, cpu_CF);
  376. }
  377. /* dest = T0 - T1 + CF - 1. */
  378. static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
  379. {
  380. tcg_gen_sub_i32(dest, t0, t1);
  381. tcg_gen_add_i32(dest, dest, cpu_CF);
  382. tcg_gen_subi_i32(dest, dest, 1);
  383. }
  384. /* dest = T0 + T1. Compute C, N, V and Z flags */
  385. static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
  386. {
  387. TCGv_i32 tmp = tcg_temp_new_i32();
  388. tcg_gen_movi_i32(tmp, 0);
  389. tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
  390. tcg_gen_mov_i32(cpu_ZF, cpu_NF);
  391. tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
  392. tcg_gen_xor_i32(tmp, t0, t1);
  393. tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
  394. tcg_temp_free_i32(tmp);
  395. tcg_gen_mov_i32(dest, cpu_NF);
  396. }
  397. /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
  398. static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
  399. {
  400. TCGv_i32 tmp = tcg_temp_new_i32();
  401. if (TCG_TARGET_HAS_add2_i32) {
  402. tcg_gen_movi_i32(tmp, 0);
  403. tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
  404. tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
  405. } else {
  406. TCGv_i64 q0 = tcg_temp_new_i64();
  407. TCGv_i64 q1 = tcg_temp_new_i64();
  408. tcg_gen_extu_i32_i64(q0, t0);
  409. tcg_gen_extu_i32_i64(q1, t1);
  410. tcg_gen_add_i64(q0, q0, q1);
  411. tcg_gen_extu_i32_i64(q1, cpu_CF);
  412. tcg_gen_add_i64(q0, q0, q1);
  413. tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
  414. tcg_temp_free_i64(q0);
  415. tcg_temp_free_i64(q1);
  416. }
  417. tcg_gen_mov_i32(cpu_ZF, cpu_NF);
  418. tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
  419. tcg_gen_xor_i32(tmp, t0, t1);
  420. tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
  421. tcg_temp_free_i32(tmp);
  422. tcg_gen_mov_i32(dest, cpu_NF);
  423. }
  424. /* dest = T0 - T1. Compute C, N, V and Z flags */
  425. static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
  426. {
  427. TCGv_i32 tmp;
  428. tcg_gen_sub_i32(cpu_NF, t0, t1);
  429. tcg_gen_mov_i32(cpu_ZF, cpu_NF);
  430. tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
  431. tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
  432. tmp = tcg_temp_new_i32();
  433. tcg_gen_xor_i32(tmp, t0, t1);
  434. tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
  435. tcg_temp_free_i32(tmp);
  436. tcg_gen_mov_i32(dest, cpu_NF);
  437. }
  438. /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
  439. static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
  440. {
  441. TCGv_i32 tmp = tcg_temp_new_i32();
  442. tcg_gen_not_i32(tmp, t1);
  443. gen_adc_CC(dest, t0, tmp);
  444. tcg_temp_free_i32(tmp);
  445. }
  446. #define GEN_SHIFT(name) \
  447. static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
  448. { \
  449. TCGv_i32 tmp1, tmp2, tmp3; \
  450. tmp1 = tcg_temp_new_i32(); \
  451. tcg_gen_andi_i32(tmp1, t1, 0xff); \
  452. tmp2 = tcg_const_i32(0); \
  453. tmp3 = tcg_const_i32(0x1f); \
  454. tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
  455. tcg_temp_free_i32(tmp3); \
  456. tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
  457. tcg_gen_##name##_i32(dest, tmp2, tmp1); \
  458. tcg_temp_free_i32(tmp2); \
  459. tcg_temp_free_i32(tmp1); \
  460. }
  461. GEN_SHIFT(shl)
  462. GEN_SHIFT(shr)
  463. #undef GEN_SHIFT
  464. static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
  465. {
  466. TCGv_i32 tmp1, tmp2;
  467. tmp1 = tcg_temp_new_i32();
  468. tcg_gen_andi_i32(tmp1, t1, 0xff);
  469. tmp2 = tcg_const_i32(0x1f);
  470. tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
  471. tcg_temp_free_i32(tmp2);
  472. tcg_gen_sar_i32(dest, t0, tmp1);
  473. tcg_temp_free_i32(tmp1);
  474. }
  475. static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
  476. {
  477. TCGv_i32 c0 = tcg_const_i32(0);
  478. TCGv_i32 tmp = tcg_temp_new_i32();
  479. tcg_gen_neg_i32(tmp, src);
  480. tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
  481. tcg_temp_free_i32(c0);
  482. tcg_temp_free_i32(tmp);
  483. }
  484. static void shifter_out_im(TCGv_i32 var, int shift)
  485. {
  486. if (shift == 0) {
  487. tcg_gen_andi_i32(cpu_CF, var, 1);
  488. } else {
  489. tcg_gen_shri_i32(cpu_CF, var, shift);
  490. if (shift != 31) {
  491. tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
  492. }
  493. }
  494. }
  495. /* Shift by immediate. Includes special handling for shift == 0. */
  496. static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
  497. int shift, int flags)
  498. {
  499. switch (shiftop) {
  500. case 0: /* LSL */
  501. if (shift != 0) {
  502. if (flags)
  503. shifter_out_im(var, 32 - shift);
  504. tcg_gen_shli_i32(var, var, shift);
  505. }
  506. break;
  507. case 1: /* LSR */
  508. if (shift == 0) {
  509. if (flags) {
  510. tcg_gen_shri_i32(cpu_CF, var, 31);
  511. }
  512. tcg_gen_movi_i32(var, 0);
  513. } else {
  514. if (flags)
  515. shifter_out_im(var, shift - 1);
  516. tcg_gen_shri_i32(var, var, shift);
  517. }
  518. break;
  519. case 2: /* ASR */
  520. if (shift == 0)
  521. shift = 32;
  522. if (flags)
  523. shifter_out_im(var, shift - 1);
  524. if (shift == 32)
  525. shift = 31;
  526. tcg_gen_sari_i32(var, var, shift);
  527. break;
  528. case 3: /* ROR/RRX */
  529. if (shift != 0) {
  530. if (flags)
  531. shifter_out_im(var, shift - 1);
  532. tcg_gen_rotri_i32(var, var, shift); break;
  533. } else {
  534. TCGv_i32 tmp = tcg_temp_new_i32();
  535. tcg_gen_shli_i32(tmp, cpu_CF, 31);
  536. if (flags)
  537. shifter_out_im(var, 0);
  538. tcg_gen_shri_i32(var, var, 1);
  539. tcg_gen_or_i32(var, var, tmp);
  540. tcg_temp_free_i32(tmp);
  541. }
  542. }
  543. };
  544. static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
  545. TCGv_i32 shift, int flags)
  546. {
  547. if (flags) {
  548. switch (shiftop) {
  549. case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
  550. case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
  551. case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
  552. case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
  553. }
  554. } else {
  555. switch (shiftop) {
  556. case 0:
  557. gen_shl(var, var, shift);
  558. break;
  559. case 1:
  560. gen_shr(var, var, shift);
  561. break;
  562. case 2:
  563. gen_sar(var, var, shift);
  564. break;
  565. case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
  566. tcg_gen_rotr_i32(var, var, shift); break;
  567. }
  568. }
  569. tcg_temp_free_i32(shift);
  570. }
  571. #define PAS_OP(pfx) \
  572. switch (op2) { \
  573. case 0: gen_pas_helper(glue(pfx,add16)); break; \
  574. case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
  575. case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
  576. case 3: gen_pas_helper(glue(pfx,sub16)); break; \
  577. case 4: gen_pas_helper(glue(pfx,add8)); break; \
  578. case 7: gen_pas_helper(glue(pfx,sub8)); break; \
  579. }
  580. static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
  581. {
  582. TCGv_ptr tmp;
  583. switch (op1) {
  584. #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
  585. case 1:
  586. tmp = tcg_temp_new_ptr();
  587. tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
  588. PAS_OP(s)
  589. tcg_temp_free_ptr(tmp);
  590. break;
  591. case 5:
  592. tmp = tcg_temp_new_ptr();
  593. tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
  594. PAS_OP(u)
  595. tcg_temp_free_ptr(tmp);
  596. break;
  597. #undef gen_pas_helper
  598. #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
  599. case 2:
  600. PAS_OP(q);
  601. break;
  602. case 3:
  603. PAS_OP(sh);
  604. break;
  605. case 6:
  606. PAS_OP(uq);
  607. break;
  608. case 7:
  609. PAS_OP(uh);
  610. break;
  611. #undef gen_pas_helper
  612. }
  613. }
  614. #undef PAS_OP
  615. /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
  616. #define PAS_OP(pfx) \
  617. switch (op1) { \
  618. case 0: gen_pas_helper(glue(pfx,add8)); break; \
  619. case 1: gen_pas_helper(glue(pfx,add16)); break; \
  620. case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
  621. case 4: gen_pas_helper(glue(pfx,sub8)); break; \
  622. case 5: gen_pas_helper(glue(pfx,sub16)); break; \
  623. case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
  624. }
  625. static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
  626. {
  627. TCGv_ptr tmp;
  628. switch (op2) {
  629. #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
  630. case 0:
  631. tmp = tcg_temp_new_ptr();
  632. tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
  633. PAS_OP(s)
  634. tcg_temp_free_ptr(tmp);
  635. break;
  636. case 4:
  637. tmp = tcg_temp_new_ptr();
  638. tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
  639. PAS_OP(u)
  640. tcg_temp_free_ptr(tmp);
  641. break;
  642. #undef gen_pas_helper
  643. #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
  644. case 1:
  645. PAS_OP(q);
  646. break;
  647. case 2:
  648. PAS_OP(sh);
  649. break;
  650. case 5:
  651. PAS_OP(uq);
  652. break;
  653. case 6:
  654. PAS_OP(uh);
  655. break;
  656. #undef gen_pas_helper
  657. }
  658. }
  659. #undef PAS_OP
  660. /*
  661. * Generate a conditional based on ARM condition code cc.
  662. * This is common between ARM and Aarch64 targets.
  663. */
  664. void arm_test_cc(DisasCompare *cmp, int cc)
  665. {
  666. TCGv_i32 value;
  667. TCGCond cond;
  668. bool global = true;
  669. switch (cc) {
  670. case 0: /* eq: Z */
  671. case 1: /* ne: !Z */
  672. cond = TCG_COND_EQ;
  673. value = cpu_ZF;
  674. break;
  675. case 2: /* cs: C */
  676. case 3: /* cc: !C */
  677. cond = TCG_COND_NE;
  678. value = cpu_CF;
  679. break;
  680. case 4: /* mi: N */
  681. case 5: /* pl: !N */
  682. cond = TCG_COND_LT;
  683. value = cpu_NF;
  684. break;
  685. case 6: /* vs: V */
  686. case 7: /* vc: !V */
  687. cond = TCG_COND_LT;
  688. value = cpu_VF;
  689. break;
  690. case 8: /* hi: C && !Z */
  691. case 9: /* ls: !C || Z -> !(C && !Z) */
  692. cond = TCG_COND_NE;
  693. value = tcg_temp_new_i32();
  694. global = false;
  695. /* CF is 1 for C, so -CF is an all-bits-set mask for C;
  696. ZF is non-zero for !Z; so AND the two subexpressions. */
  697. tcg_gen_neg_i32(value, cpu_CF);
  698. tcg_gen_and_i32(value, value, cpu_ZF);
  699. break;
  700. case 10: /* ge: N == V -> N ^ V == 0 */
  701. case 11: /* lt: N != V -> N ^ V != 0 */
  702. /* Since we're only interested in the sign bit, == 0 is >= 0. */
  703. cond = TCG_COND_GE;
  704. value = tcg_temp_new_i32();
  705. global = false;
  706. tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
  707. break;
  708. case 12: /* gt: !Z && N == V */
  709. case 13: /* le: Z || N != V */
  710. cond = TCG_COND_NE;
  711. value = tcg_temp_new_i32();
  712. global = false;
  713. /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
  714. * the sign bit then AND with ZF to yield the result. */
  715. tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
  716. tcg_gen_sari_i32(value, value, 31);
  717. tcg_gen_andc_i32(value, cpu_ZF, value);
  718. break;
  719. case 14: /* always */
  720. case 15: /* always */
  721. /* Use the ALWAYS condition, which will fold early.
  722. * It doesn't matter what we use for the value. */
  723. cond = TCG_COND_ALWAYS;
  724. value = cpu_ZF;
  725. goto no_invert;
  726. default:
  727. fprintf(stderr, "Bad condition code 0x%x\n", cc);
  728. abort();
  729. }
  730. if (cc & 1) {
  731. cond = tcg_invert_cond(cond);
  732. }
  733. no_invert:
  734. cmp->cond = cond;
  735. cmp->value = value;
  736. cmp->value_global = global;
  737. }
  738. void arm_free_cc(DisasCompare *cmp)
  739. {
  740. if (!cmp->value_global) {
  741. tcg_temp_free_i32(cmp->value);
  742. }
  743. }
  744. void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
  745. {
  746. tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
  747. }
  748. void arm_gen_test_cc(int cc, TCGLabel *label)
  749. {
  750. DisasCompare cmp;
  751. arm_test_cc(&cmp, cc);
  752. arm_jump_cc(&cmp, label);
  753. arm_free_cc(&cmp);
  754. }
  755. static const uint8_t table_logic_cc[16] = {
  756. 1, /* and */
  757. 1, /* xor */
  758. 0, /* sub */
  759. 0, /* rsb */
  760. 0, /* add */
  761. 0, /* adc */
  762. 0, /* sbc */
  763. 0, /* rsc */
  764. 1, /* andl */
  765. 1, /* xorl */
  766. 0, /* cmp */
  767. 0, /* cmn */
  768. 1, /* orr */
  769. 1, /* mov */
  770. 1, /* bic */
  771. 1, /* mvn */
  772. };
  773. /* Set PC and Thumb state from an immediate address. */
  774. static inline void gen_bx_im(DisasContext *s, uint32_t addr)
  775. {
  776. TCGv_i32 tmp;
  777. s->is_jmp = DISAS_UPDATE;
  778. if (s->thumb != (addr & 1)) {
  779. tmp = tcg_temp_new_i32();
  780. tcg_gen_movi_i32(tmp, addr & 1);
  781. tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
  782. tcg_temp_free_i32(tmp);
  783. }
  784. tcg_gen_movi_i32(cpu_R[15], addr & ~1);
  785. }
  786. /* Set PC and Thumb state from var. var is marked as dead. */
  787. static inline void gen_bx(DisasContext *s, TCGv_i32 var)
  788. {
  789. s->is_jmp = DISAS_UPDATE;
  790. tcg_gen_andi_i32(cpu_R[15], var, ~1);
  791. tcg_gen_andi_i32(var, var, 1);
  792. store_cpu_field(var, thumb);
  793. }
  794. /* Variant of store_reg which uses branch&exchange logic when storing
  795. to r15 in ARM architecture v7 and above. The source must be a temporary
  796. and will be marked as dead. */
  797. static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
  798. {
  799. if (reg == 15 && ENABLE_ARCH_7) {
  800. gen_bx(s, var);
  801. } else {
  802. store_reg(s, reg, var);
  803. }
  804. }
  805. /* Variant of store_reg which uses branch&exchange logic when storing
  806. * to r15 in ARM architecture v5T and above. This is used for storing
  807. * the results of a LDR/LDM/POP into r15, and corresponds to the cases
  808. * in the ARM ARM which use the LoadWritePC() pseudocode function. */
  809. static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
  810. {
  811. if (reg == 15 && ENABLE_ARCH_5) {
  812. gen_bx(s, var);
  813. } else {
  814. store_reg(s, reg, var);
  815. }
  816. }
  817. /* Abstractions of "generate code to do a guest load/store for
  818. * AArch32", where a vaddr is always 32 bits (and is zero
  819. * extended if we're a 64 bit core) and data is also
  820. * 32 bits unless specifically doing a 64 bit access.
  821. * These functions work like tcg_gen_qemu_{ld,st}* except
  822. * that the address argument is TCGv_i32 rather than TCGv.
  823. */
  824. #if TARGET_LONG_BITS == 32
  825. #define DO_GEN_LD(SUFF, OPC) \
  826. static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
  827. { \
  828. tcg_gen_qemu_ld_i32(val, addr, index, OPC); \
  829. }
  830. #define DO_GEN_ST(SUFF, OPC) \
  831. static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
  832. { \
  833. tcg_gen_qemu_st_i32(val, addr, index, OPC); \
  834. }
  835. static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
  836. {
  837. tcg_gen_qemu_ld_i64(val, addr, index, MO_TEQ);
  838. }
  839. static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
  840. {
  841. tcg_gen_qemu_st_i64(val, addr, index, MO_TEQ);
  842. }
  843. #else
  844. #define DO_GEN_LD(SUFF, OPC) \
  845. static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
  846. { \
  847. TCGv addr64 = tcg_temp_new(); \
  848. tcg_gen_extu_i32_i64(addr64, addr); \
  849. tcg_gen_qemu_ld_i32(val, addr64, index, OPC); \
  850. tcg_temp_free(addr64); \
  851. }
  852. #define DO_GEN_ST(SUFF, OPC) \
  853. static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
  854. { \
  855. TCGv addr64 = tcg_temp_new(); \
  856. tcg_gen_extu_i32_i64(addr64, addr); \
  857. tcg_gen_qemu_st_i32(val, addr64, index, OPC); \
  858. tcg_temp_free(addr64); \
  859. }
  860. static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
  861. {
  862. TCGv addr64 = tcg_temp_new();
  863. tcg_gen_extu_i32_i64(addr64, addr);
  864. tcg_gen_qemu_ld_i64(val, addr64, index, MO_TEQ);
  865. tcg_temp_free(addr64);
  866. }
  867. static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
  868. {
  869. TCGv addr64 = tcg_temp_new();
  870. tcg_gen_extu_i32_i64(addr64, addr);
  871. tcg_gen_qemu_st_i64(val, addr64, index, MO_TEQ);
  872. tcg_temp_free(addr64);
  873. }
  874. #endif
  875. DO_GEN_LD(8s, MO_SB)
  876. DO_GEN_LD(8u, MO_UB)
  877. DO_GEN_LD(16s, MO_TESW)
  878. DO_GEN_LD(16u, MO_TEUW)
  879. DO_GEN_LD(32u, MO_TEUL)
  880. DO_GEN_ST(8, MO_UB)
  881. DO_GEN_ST(16, MO_TEUW)
  882. DO_GEN_ST(32, MO_TEUL)
  883. static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
  884. {
  885. tcg_gen_movi_i32(cpu_R[15], val);
  886. }
  887. static inline void gen_hvc(DisasContext *s, int imm16)
  888. {
  889. /* The pre HVC helper handles cases when HVC gets trapped
  890. * as an undefined insn by runtime configuration (ie before
  891. * the insn really executes).
  892. */
  893. gen_set_pc_im(s, s->pc - 4);
  894. gen_helper_pre_hvc(cpu_env);
  895. /* Otherwise we will treat this as a real exception which
  896. * happens after execution of the insn. (The distinction matters
  897. * for the PC value reported to the exception handler and also
  898. * for single stepping.)
  899. */
  900. s->svc_imm = imm16;
  901. gen_set_pc_im(s, s->pc);
  902. s->is_jmp = DISAS_HVC;
  903. }
  904. static inline void gen_smc(DisasContext *s)
  905. {
  906. /* As with HVC, we may take an exception either before or after
  907. * the insn executes.
  908. */
  909. TCGv_i32 tmp;
  910. gen_set_pc_im(s, s->pc - 4);
  911. tmp = tcg_const_i32(syn_aa32_smc());
  912. gen_helper_pre_smc(cpu_env, tmp);
  913. tcg_temp_free_i32(tmp);
  914. gen_set_pc_im(s, s->pc);
  915. s->is_jmp = DISAS_SMC;
  916. }
  917. static inline void
  918. gen_set_condexec (DisasContext *s)
  919. {
  920. if (s->condexec_mask) {
  921. uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
  922. TCGv_i32 tmp = tcg_temp_new_i32();
  923. tcg_gen_movi_i32(tmp, val);
  924. store_cpu_field(tmp, condexec_bits);
  925. }
  926. }
  927. static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
  928. {
  929. gen_set_condexec(s);
  930. gen_set_pc_im(s, s->pc - offset);
  931. gen_exception_internal(excp);
  932. s->is_jmp = DISAS_JUMP;
  933. }
  934. static void gen_exception_insn(DisasContext *s, int offset, int excp,
  935. int syn, uint32_t target_el)
  936. {
  937. gen_set_condexec(s);
  938. gen_set_pc_im(s, s->pc - offset);
  939. gen_exception(excp, syn, target_el);
  940. s->is_jmp = DISAS_JUMP;
  941. }
  942. /* Force a TB lookup after an instruction that changes the CPU state. */
  943. static inline void gen_lookup_tb(DisasContext *s)
  944. {
  945. tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
  946. s->is_jmp = DISAS_UPDATE;
  947. }
  948. static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
  949. TCGv_i32 var)
  950. {
  951. int val, rm, shift, shiftop;
  952. TCGv_i32 offset;
  953. if (!(insn & (1 << 25))) {
  954. /* immediate */
  955. val = insn & 0xfff;
  956. if (!(insn & (1 << 23)))
  957. val = -val;
  958. if (val != 0)
  959. tcg_gen_addi_i32(var, var, val);
  960. } else {
  961. /* shift/register */
  962. rm = (insn) & 0xf;
  963. shift = (insn >> 7) & 0x1f;
  964. shiftop = (insn >> 5) & 3;
  965. offset = load_reg(s, rm);
  966. gen_arm_shift_im(offset, shiftop, shift, 0);
  967. if (!(insn & (1 << 23)))
  968. tcg_gen_sub_i32(var, var, offset);
  969. else
  970. tcg_gen_add_i32(var, var, offset);
  971. tcg_temp_free_i32(offset);
  972. }
  973. }
  974. static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
  975. int extra, TCGv_i32 var)
  976. {
  977. int val, rm;
  978. TCGv_i32 offset;
  979. if (insn & (1 << 22)) {
  980. /* immediate */
  981. val = (insn & 0xf) | ((insn >> 4) & 0xf0);
  982. if (!(insn & (1 << 23)))
  983. val = -val;
  984. val += extra;
  985. if (val != 0)
  986. tcg_gen_addi_i32(var, var, val);
  987. } else {
  988. /* register */
  989. if (extra)
  990. tcg_gen_addi_i32(var, var, extra);
  991. rm = (insn) & 0xf;
  992. offset = load_reg(s, rm);
  993. if (!(insn & (1 << 23)))
  994. tcg_gen_sub_i32(var, var, offset);
  995. else
  996. tcg_gen_add_i32(var, var, offset);
  997. tcg_temp_free_i32(offset);
  998. }
  999. }
  1000. static TCGv_ptr get_fpstatus_ptr(int neon)
  1001. {
  1002. TCGv_ptr statusptr = tcg_temp_new_ptr();
  1003. int offset;
  1004. if (neon) {
  1005. offset = offsetof(CPUARMState, vfp.standard_fp_status);
  1006. } else {
  1007. offset = offsetof(CPUARMState, vfp.fp_status);
  1008. }
  1009. tcg_gen_addi_ptr(statusptr, cpu_env, offset);
  1010. return statusptr;
  1011. }
  1012. #define VFP_OP2(name) \
  1013. static inline void gen_vfp_##name(int dp) \
  1014. { \
  1015. TCGv_ptr fpst = get_fpstatus_ptr(0); \
  1016. if (dp) { \
  1017. gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
  1018. } else { \
  1019. gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
  1020. } \
  1021. tcg_temp_free_ptr(fpst); \
  1022. }
  1023. VFP_OP2(add)
  1024. VFP_OP2(sub)
  1025. VFP_OP2(mul)
  1026. VFP_OP2(div)
  1027. #undef VFP_OP2
  1028. static inline void gen_vfp_F1_mul(int dp)
  1029. {
  1030. /* Like gen_vfp_mul() but put result in F1 */
  1031. TCGv_ptr fpst = get_fpstatus_ptr(0);
  1032. if (dp) {
  1033. gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
  1034. } else {
  1035. gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
  1036. }
  1037. tcg_temp_free_ptr(fpst);
  1038. }
  1039. static inline void gen_vfp_F1_neg(int dp)
  1040. {
  1041. /* Like gen_vfp_neg() but put result in F1 */
  1042. if (dp) {
  1043. gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
  1044. } else {
  1045. gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
  1046. }
  1047. }
  1048. static inline void gen_vfp_abs(int dp)
  1049. {
  1050. if (dp)
  1051. gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
  1052. else
  1053. gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
  1054. }
  1055. static inline void gen_vfp_neg(int dp)
  1056. {
  1057. if (dp)
  1058. gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
  1059. else
  1060. gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
  1061. }
  1062. static inline void gen_vfp_sqrt(int dp)
  1063. {
  1064. if (dp)
  1065. gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
  1066. else
  1067. gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
  1068. }
  1069. static inline void gen_vfp_cmp(int dp)
  1070. {
  1071. if (dp)
  1072. gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
  1073. else
  1074. gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
  1075. }
  1076. static inline void gen_vfp_cmpe(int dp)
  1077. {
  1078. if (dp)
  1079. gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
  1080. else
  1081. gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
  1082. }
  1083. static inline void gen_vfp_F1_ld0(int dp)
  1084. {
  1085. if (dp)
  1086. tcg_gen_movi_i64(cpu_F1d, 0);
  1087. else
  1088. tcg_gen_movi_i32(cpu_F1s, 0);
  1089. }
  1090. #define VFP_GEN_ITOF(name) \
  1091. static inline void gen_vfp_##name(int dp, int neon) \
  1092. { \
  1093. TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
  1094. if (dp) { \
  1095. gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
  1096. } else { \
  1097. gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
  1098. } \
  1099. tcg_temp_free_ptr(statusptr); \
  1100. }
  1101. VFP_GEN_ITOF(uito)
  1102. VFP_GEN_ITOF(sito)
  1103. #undef VFP_GEN_ITOF
  1104. #define VFP_GEN_FTOI(name) \
  1105. static inline void gen_vfp_##name(int dp, int neon) \
  1106. { \
  1107. TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
  1108. if (dp) { \
  1109. gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
  1110. } else { \
  1111. gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
  1112. } \
  1113. tcg_temp_free_ptr(statusptr); \
  1114. }
  1115. VFP_GEN_FTOI(toui)
  1116. VFP_GEN_FTOI(touiz)
  1117. VFP_GEN_FTOI(tosi)
  1118. VFP_GEN_FTOI(tosiz)
  1119. #undef VFP_GEN_FTOI
  1120. #define VFP_GEN_FIX(name, round) \
  1121. static inline void gen_vfp_##name(int dp, int shift, int neon) \
  1122. { \
  1123. TCGv_i32 tmp_shift = tcg_const_i32(shift); \
  1124. TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
  1125. if (dp) { \
  1126. gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
  1127. statusptr); \
  1128. } else { \
  1129. gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
  1130. statusptr); \
  1131. } \
  1132. tcg_temp_free_i32(tmp_shift); \
  1133. tcg_temp_free_ptr(statusptr); \
  1134. }
  1135. VFP_GEN_FIX(tosh, _round_to_zero)
  1136. VFP_GEN_FIX(tosl, _round_to_zero)
  1137. VFP_GEN_FIX(touh, _round_to_zero)
  1138. VFP_GEN_FIX(toul, _round_to_zero)
  1139. VFP_GEN_FIX(shto, )
  1140. VFP_GEN_FIX(slto, )
  1141. VFP_GEN_FIX(uhto, )
  1142. VFP_GEN_FIX(ulto, )
  1143. #undef VFP_GEN_FIX
  1144. static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
  1145. {
  1146. if (dp) {
  1147. gen_aa32_ld64(cpu_F0d, addr, get_mem_index(s));
  1148. } else {
  1149. gen_aa32_ld32u(cpu_F0s, addr, get_mem_index(s));
  1150. }
  1151. }
  1152. static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
  1153. {
  1154. if (dp) {
  1155. gen_aa32_st64(cpu_F0d, addr, get_mem_index(s));
  1156. } else {
  1157. gen_aa32_st32(cpu_F0s, addr, get_mem_index(s));
  1158. }
  1159. }
  1160. static inline long
  1161. vfp_reg_offset (int dp, int reg)
  1162. {
  1163. if (dp)
  1164. return offsetof(CPUARMState, vfp.regs[reg]);
  1165. else if (reg & 1) {
  1166. return offsetof(CPUARMState, vfp.regs[reg >> 1])
  1167. + offsetof(CPU_DoubleU, l.upper);
  1168. } else {
  1169. return offsetof(CPUARMState, vfp.regs[reg >> 1])
  1170. + offsetof(CPU_DoubleU, l.lower);
  1171. }
  1172. }
  1173. /* Return the offset of a 32-bit piece of a NEON register.
  1174. zero is the least significant end of the register. */
  1175. static inline long
  1176. neon_reg_offset (int reg, int n)
  1177. {
  1178. int sreg;
  1179. sreg = reg * 2 + n;
  1180. return vfp_reg_offset(0, sreg);
  1181. }
  1182. static TCGv_i32 neon_load_reg(int reg, int pass)
  1183. {
  1184. TCGv_i32 tmp = tcg_temp_new_i32();
  1185. tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
  1186. return tmp;
  1187. }
  1188. static void neon_store_reg(int reg, int pass, TCGv_i32 var)
  1189. {
  1190. tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
  1191. tcg_temp_free_i32(var);
  1192. }
  1193. static inline void neon_load_reg64(TCGv_i64 var, int reg)
  1194. {
  1195. tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
  1196. }
  1197. static inline void neon_store_reg64(TCGv_i64 var, int reg)
  1198. {
  1199. tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
  1200. }
  1201. #define tcg_gen_ld_f32 tcg_gen_ld_i32
  1202. #define tcg_gen_ld_f64 tcg_gen_ld_i64
  1203. #define tcg_gen_st_f32 tcg_gen_st_i32
  1204. #define tcg_gen_st_f64 tcg_gen_st_i64
  1205. static inline void gen_mov_F0_vreg(int dp, int reg)
  1206. {
  1207. if (dp)
  1208. tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
  1209. else
  1210. tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
  1211. }
  1212. static inline void gen_mov_F1_vreg(int dp, int reg)
  1213. {
  1214. if (dp)
  1215. tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
  1216. else
  1217. tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
  1218. }
  1219. static inline void gen_mov_vreg_F0(int dp, int reg)
  1220. {
  1221. if (dp)
  1222. tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
  1223. else
  1224. tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
  1225. }
  1226. #define ARM_CP_RW_BIT (1 << 20)
  1227. static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
  1228. {
  1229. tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
  1230. }
  1231. static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
  1232. {
  1233. tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
  1234. }
  1235. static inline TCGv_i32 iwmmxt_load_creg(int reg)
  1236. {
  1237. TCGv_i32 var = tcg_temp_new_i32();
  1238. tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
  1239. return var;
  1240. }
  1241. static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
  1242. {
  1243. tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
  1244. tcg_temp_free_i32(var);
  1245. }
  1246. static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
  1247. {
  1248. iwmmxt_store_reg(cpu_M0, rn);
  1249. }
  1250. static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
  1251. {
  1252. iwmmxt_load_reg(cpu_M0, rn);
  1253. }
  1254. static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
  1255. {
  1256. iwmmxt_load_reg(cpu_V1, rn);
  1257. tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
  1258. }
  1259. static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
  1260. {
  1261. iwmmxt_load_reg(cpu_V1, rn);
  1262. tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
  1263. }
  1264. static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
  1265. {
  1266. iwmmxt_load_reg(cpu_V1, rn);
  1267. tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
  1268. }
  1269. #define IWMMXT_OP(name) \
  1270. static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
  1271. { \
  1272. iwmmxt_load_reg(cpu_V1, rn); \
  1273. gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
  1274. }
  1275. #define IWMMXT_OP_ENV(name) \
  1276. static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
  1277. { \
  1278. iwmmxt_load_reg(cpu_V1, rn); \
  1279. gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
  1280. }
  1281. #define IWMMXT_OP_ENV_SIZE(name) \
  1282. IWMMXT_OP_ENV(name##b) \
  1283. IWMMXT_OP_ENV(name##w) \
  1284. IWMMXT_OP_ENV(name##l)
  1285. #define IWMMXT_OP_ENV1(name) \
  1286. static inline void gen_op_iwmmxt_##name##_M0(void) \
  1287. { \
  1288. gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
  1289. }
  1290. IWMMXT_OP(maddsq)
  1291. IWMMXT_OP(madduq)
  1292. IWMMXT_OP(sadb)
  1293. IWMMXT_OP(sadw)
  1294. IWMMXT_OP(mulslw)
  1295. IWMMXT_OP(mulshw)
  1296. IWMMXT_OP(mululw)
  1297. IWMMXT_OP(muluhw)
  1298. IWMMXT_OP(macsw)
  1299. IWMMXT_OP(macuw)
  1300. IWMMXT_OP_ENV_SIZE(unpackl)
  1301. IWMMXT_OP_ENV_SIZE(unpackh)
  1302. IWMMXT_OP_ENV1(unpacklub)
  1303. IWMMXT_OP_ENV1(unpackluw)
  1304. IWMMXT_OP_ENV1(unpacklul)
  1305. IWMMXT_OP_ENV1(unpackhub)
  1306. IWMMXT_OP_ENV1(unpackhuw)
  1307. IWMMXT_OP_ENV1(unpackhul)
  1308. IWMMXT_OP_ENV1(unpacklsb)
  1309. IWMMXT_OP_ENV1(unpacklsw)
  1310. IWMMXT_OP_ENV1(unpacklsl)
  1311. IWMMXT_OP_ENV1(unpackhsb)
  1312. IWMMXT_OP_ENV1(unpackhsw)
  1313. IWMMXT_OP_ENV1(unpackhsl)
  1314. IWMMXT_OP_ENV_SIZE(cmpeq)
  1315. IWMMXT_OP_ENV_SIZE(cmpgtu)
  1316. IWMMXT_OP_ENV_SIZE(cmpgts)
  1317. IWMMXT_OP_ENV_SIZE(mins)
  1318. IWMMXT_OP_ENV_SIZE(minu)
  1319. IWMMXT_OP_ENV_SIZE(maxs)
  1320. IWMMXT_OP_ENV_SIZE(maxu)
  1321. IWMMXT_OP_ENV_SIZE(subn)
  1322. IWMMXT_OP_ENV_SIZE(addn)
  1323. IWMMXT_OP_ENV_SIZE(subu)
  1324. IWMMXT_OP_ENV_SIZE(addu)
  1325. IWMMXT_OP_ENV_SIZE(subs)
  1326. IWMMXT_OP_ENV_SIZE(adds)
  1327. IWMMXT_OP_ENV(avgb0)
  1328. IWMMXT_OP_ENV(avgb1)
  1329. IWMMXT_OP_ENV(avgw0)
  1330. IWMMXT_OP_ENV(avgw1)
  1331. IWMMXT_OP_ENV(packuw)
  1332. IWMMXT_OP_ENV(packul)
  1333. IWMMXT_OP_ENV(packuq)
  1334. IWMMXT_OP_ENV(packsw)
  1335. IWMMXT_OP_ENV(packsl)
  1336. IWMMXT_OP_ENV(packsq)
  1337. static void gen_op_iwmmxt_set_mup(void)
  1338. {
  1339. TCGv_i32 tmp;
  1340. tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
  1341. tcg_gen_ori_i32(tmp, tmp, 2);
  1342. store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
  1343. }
  1344. static void gen_op_iwmmxt_set_cup(void)
  1345. {
  1346. TCGv_i32 tmp;
  1347. tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
  1348. tcg_gen_ori_i32(tmp, tmp, 1);
  1349. store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
  1350. }
  1351. static void gen_op_iwmmxt_setpsr_nz(void)
  1352. {
  1353. TCGv_i32 tmp = tcg_temp_new_i32();
  1354. gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
  1355. store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
  1356. }
  1357. static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
  1358. {
  1359. iwmmxt_load_reg(cpu_V1, rn);
  1360. tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
  1361. tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
  1362. }
  1363. static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
  1364. TCGv_i32 dest)
  1365. {
  1366. int rd;
  1367. uint32_t offset;
  1368. TCGv_i32 tmp;
  1369. rd = (insn >> 16) & 0xf;
  1370. tmp = load_reg(s, rd);
  1371. offset = (insn & 0xff) << ((insn >> 7) & 2);
  1372. if (insn & (1 << 24)) {
  1373. /* Pre indexed */
  1374. if (insn & (1 << 23))
  1375. tcg_gen_addi_i32(tmp, tmp, offset);
  1376. else
  1377. tcg_gen_addi_i32(tmp, tmp, -offset);
  1378. tcg_gen_mov_i32(dest, tmp);
  1379. if (insn & (1 << 21))
  1380. store_reg(s, rd, tmp);
  1381. else
  1382. tcg_temp_free_i32(tmp);
  1383. } else if (insn & (1 << 21)) {
  1384. /* Post indexed */
  1385. tcg_gen_mov_i32(dest, tmp);
  1386. if (insn & (1 << 23))
  1387. tcg_gen_addi_i32(tmp, tmp, offset);
  1388. else
  1389. tcg_gen_addi_i32(tmp, tmp, -offset);
  1390. store_reg(s, rd, tmp);
  1391. } else if (!(insn & (1 << 23)))
  1392. return 1;
  1393. return 0;
  1394. }
  1395. static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
  1396. {
  1397. int rd = (insn >> 0) & 0xf;
  1398. TCGv_i32 tmp;
  1399. if (insn & (1 << 8)) {
  1400. if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
  1401. return 1;
  1402. } else {
  1403. tmp = iwmmxt_load_creg(rd);
  1404. }
  1405. } else {
  1406. tmp = tcg_temp_new_i32();
  1407. iwmmxt_load_reg(cpu_V0, rd);
  1408. tcg_gen_extrl_i64_i32(tmp, cpu_V0);
  1409. }
  1410. tcg_gen_andi_i32(tmp, tmp, mask);
  1411. tcg_gen_mov_i32(dest, tmp);
  1412. tcg_temp_free_i32(tmp);
  1413. return 0;
  1414. }
  1415. /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
  1416. (ie. an undefined instruction). */
  1417. static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
  1418. {
  1419. int rd, wrd;
  1420. int rdhi, rdlo, rd0, rd1, i;
  1421. TCGv_i32 addr;
  1422. TCGv_i32 tmp, tmp2, tmp3;
  1423. if ((insn & 0x0e000e00) == 0x0c000000) {
  1424. if ((insn & 0x0fe00ff0) == 0x0c400000) {
  1425. wrd = insn & 0xf;
  1426. rdlo = (insn >> 12) & 0xf;
  1427. rdhi = (insn >> 16) & 0xf;
  1428. if (insn & ARM_CP_RW_BIT) { /* TMRRC */
  1429. iwmmxt_load_reg(cpu_V0, wrd);
  1430. tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
  1431. tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
  1432. tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
  1433. } else { /* TMCRR */
  1434. tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
  1435. iwmmxt_store_reg(cpu_V0, wrd);
  1436. gen_op_iwmmxt_set_mup();
  1437. }
  1438. return 0;
  1439. }
  1440. wrd = (insn >> 12) & 0xf;
  1441. addr = tcg_temp_new_i32();
  1442. if (gen_iwmmxt_address(s, insn, addr)) {
  1443. tcg_temp_free_i32(addr);
  1444. return 1;
  1445. }
  1446. if (insn & ARM_CP_RW_BIT) {
  1447. if ((insn >> 28) == 0xf) { /* WLDRW wCx */
  1448. tmp = tcg_temp_new_i32();
  1449. gen_aa32_ld32u(tmp, addr, get_mem_index(s));
  1450. iwmmxt_store_creg(wrd, tmp);
  1451. } else {
  1452. i = 1;
  1453. if (insn & (1 << 8)) {
  1454. if (insn & (1 << 22)) { /* WLDRD */
  1455. gen_aa32_ld64(cpu_M0, addr, get_mem_index(s));
  1456. i = 0;
  1457. } else { /* WLDRW wRd */
  1458. tmp = tcg_temp_new_i32();
  1459. gen_aa32_ld32u(tmp, addr, get_mem_index(s));
  1460. }
  1461. } else {
  1462. tmp = tcg_temp_new_i32();
  1463. if (insn & (1 << 22)) { /* WLDRH */
  1464. gen_aa32_ld16u(tmp, addr, get_mem_index(s));
  1465. } else { /* WLDRB */
  1466. gen_aa32_ld8u(tmp, addr, get_mem_index(s));
  1467. }
  1468. }
  1469. if (i) {
  1470. tcg_gen_extu_i32_i64(cpu_M0, tmp);
  1471. tcg_temp_free_i32(tmp);
  1472. }
  1473. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1474. }
  1475. } else {
  1476. if ((insn >> 28) == 0xf) { /* WSTRW wCx */
  1477. tmp = iwmmxt_load_creg(wrd);
  1478. gen_aa32_st32(tmp, addr, get_mem_index(s));
  1479. } else {
  1480. gen_op_iwmmxt_movq_M0_wRn(wrd);
  1481. tmp = tcg_temp_new_i32();
  1482. if (insn & (1 << 8)) {
  1483. if (insn & (1 << 22)) { /* WSTRD */
  1484. gen_aa32_st64(cpu_M0, addr, get_mem_index(s));
  1485. } else { /* WSTRW wRd */
  1486. tcg_gen_extrl_i64_i32(tmp, cpu_M0);
  1487. gen_aa32_st32(tmp, addr, get_mem_index(s));
  1488. }
  1489. } else {
  1490. if (insn & (1 << 22)) { /* WSTRH */
  1491. tcg_gen_extrl_i64_i32(tmp, cpu_M0);
  1492. gen_aa32_st16(tmp, addr, get_mem_index(s));
  1493. } else { /* WSTRB */
  1494. tcg_gen_extrl_i64_i32(tmp, cpu_M0);
  1495. gen_aa32_st8(tmp, addr, get_mem_index(s));
  1496. }
  1497. }
  1498. }
  1499. tcg_temp_free_i32(tmp);
  1500. }
  1501. tcg_temp_free_i32(addr);
  1502. return 0;
  1503. }
  1504. if ((insn & 0x0f000000) != 0x0e000000)
  1505. return 1;
  1506. switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
  1507. case 0x000: /* WOR */
  1508. wrd = (insn >> 12) & 0xf;
  1509. rd0 = (insn >> 0) & 0xf;
  1510. rd1 = (insn >> 16) & 0xf;
  1511. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1512. gen_op_iwmmxt_orq_M0_wRn(rd1);
  1513. gen_op_iwmmxt_setpsr_nz();
  1514. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1515. gen_op_iwmmxt_set_mup();
  1516. gen_op_iwmmxt_set_cup();
  1517. break;
  1518. case 0x011: /* TMCR */
  1519. if (insn & 0xf)
  1520. return 1;
  1521. rd = (insn >> 12) & 0xf;
  1522. wrd = (insn >> 16) & 0xf;
  1523. switch (wrd) {
  1524. case ARM_IWMMXT_wCID:
  1525. case ARM_IWMMXT_wCASF:
  1526. break;
  1527. case ARM_IWMMXT_wCon:
  1528. gen_op_iwmmxt_set_cup();
  1529. /* Fall through. */
  1530. case ARM_IWMMXT_wCSSF:
  1531. tmp = iwmmxt_load_creg(wrd);
  1532. tmp2 = load_reg(s, rd);
  1533. tcg_gen_andc_i32(tmp, tmp, tmp2);
  1534. tcg_temp_free_i32(tmp2);
  1535. iwmmxt_store_creg(wrd, tmp);
  1536. break;
  1537. case ARM_IWMMXT_wCGR0:
  1538. case ARM_IWMMXT_wCGR1:
  1539. case ARM_IWMMXT_wCGR2:
  1540. case ARM_IWMMXT_wCGR3:
  1541. gen_op_iwmmxt_set_cup();
  1542. tmp = load_reg(s, rd);
  1543. iwmmxt_store_creg(wrd, tmp);
  1544. break;
  1545. default:
  1546. return 1;
  1547. }
  1548. break;
  1549. case 0x100: /* WXOR */
  1550. wrd = (insn >> 12) & 0xf;
  1551. rd0 = (insn >> 0) & 0xf;
  1552. rd1 = (insn >> 16) & 0xf;
  1553. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1554. gen_op_iwmmxt_xorq_M0_wRn(rd1);
  1555. gen_op_iwmmxt_setpsr_nz();
  1556. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1557. gen_op_iwmmxt_set_mup();
  1558. gen_op_iwmmxt_set_cup();
  1559. break;
  1560. case 0x111: /* TMRC */
  1561. if (insn & 0xf)
  1562. return 1;
  1563. rd = (insn >> 12) & 0xf;
  1564. wrd = (insn >> 16) & 0xf;
  1565. tmp = iwmmxt_load_creg(wrd);
  1566. store_reg(s, rd, tmp);
  1567. break;
  1568. case 0x300: /* WANDN */
  1569. wrd = (insn >> 12) & 0xf;
  1570. rd0 = (insn >> 0) & 0xf;
  1571. rd1 = (insn >> 16) & 0xf;
  1572. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1573. tcg_gen_neg_i64(cpu_M0, cpu_M0);
  1574. gen_op_iwmmxt_andq_M0_wRn(rd1);
  1575. gen_op_iwmmxt_setpsr_nz();
  1576. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1577. gen_op_iwmmxt_set_mup();
  1578. gen_op_iwmmxt_set_cup();
  1579. break;
  1580. case 0x200: /* WAND */
  1581. wrd = (insn >> 12) & 0xf;
  1582. rd0 = (insn >> 0) & 0xf;
  1583. rd1 = (insn >> 16) & 0xf;
  1584. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1585. gen_op_iwmmxt_andq_M0_wRn(rd1);
  1586. gen_op_iwmmxt_setpsr_nz();
  1587. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1588. gen_op_iwmmxt_set_mup();
  1589. gen_op_iwmmxt_set_cup();
  1590. break;
  1591. case 0x810: case 0xa10: /* WMADD */
  1592. wrd = (insn >> 12) & 0xf;
  1593. rd0 = (insn >> 0) & 0xf;
  1594. rd1 = (insn >> 16) & 0xf;
  1595. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1596. if (insn & (1 << 21))
  1597. gen_op_iwmmxt_maddsq_M0_wRn(rd1);
  1598. else
  1599. gen_op_iwmmxt_madduq_M0_wRn(rd1);
  1600. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1601. gen_op_iwmmxt_set_mup();
  1602. break;
  1603. case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
  1604. wrd = (insn >> 12) & 0xf;
  1605. rd0 = (insn >> 16) & 0xf;
  1606. rd1 = (insn >> 0) & 0xf;
  1607. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1608. switch ((insn >> 22) & 3) {
  1609. case 0:
  1610. gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
  1611. break;
  1612. case 1:
  1613. gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
  1614. break;
  1615. case 2:
  1616. gen_op_iwmmxt_unpackll_M0_wRn(rd1);
  1617. break;
  1618. case 3:
  1619. return 1;
  1620. }
  1621. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1622. gen_op_iwmmxt_set_mup();
  1623. gen_op_iwmmxt_set_cup();
  1624. break;
  1625. case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
  1626. wrd = (insn >> 12) & 0xf;
  1627. rd0 = (insn >> 16) & 0xf;
  1628. rd1 = (insn >> 0) & 0xf;
  1629. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1630. switch ((insn >> 22) & 3) {
  1631. case 0:
  1632. gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
  1633. break;
  1634. case 1:
  1635. gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
  1636. break;
  1637. case 2:
  1638. gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
  1639. break;
  1640. case 3:
  1641. return 1;
  1642. }
  1643. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1644. gen_op_iwmmxt_set_mup();
  1645. gen_op_iwmmxt_set_cup();
  1646. break;
  1647. case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
  1648. wrd = (insn >> 12) & 0xf;
  1649. rd0 = (insn >> 16) & 0xf;
  1650. rd1 = (insn >> 0) & 0xf;
  1651. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1652. if (insn & (1 << 22))
  1653. gen_op_iwmmxt_sadw_M0_wRn(rd1);
  1654. else
  1655. gen_op_iwmmxt_sadb_M0_wRn(rd1);
  1656. if (!(insn & (1 << 20)))
  1657. gen_op_iwmmxt_addl_M0_wRn(wrd);
  1658. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1659. gen_op_iwmmxt_set_mup();
  1660. break;
  1661. case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
  1662. wrd = (insn >> 12) & 0xf;
  1663. rd0 = (insn >> 16) & 0xf;
  1664. rd1 = (insn >> 0) & 0xf;
  1665. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1666. if (insn & (1 << 21)) {
  1667. if (insn & (1 << 20))
  1668. gen_op_iwmmxt_mulshw_M0_wRn(rd1);
  1669. else
  1670. gen_op_iwmmxt_mulslw_M0_wRn(rd1);
  1671. } else {
  1672. if (insn & (1 << 20))
  1673. gen_op_iwmmxt_muluhw_M0_wRn(rd1);
  1674. else
  1675. gen_op_iwmmxt_mululw_M0_wRn(rd1);
  1676. }
  1677. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1678. gen_op_iwmmxt_set_mup();
  1679. break;
  1680. case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
  1681. wrd = (insn >> 12) & 0xf;
  1682. rd0 = (insn >> 16) & 0xf;
  1683. rd1 = (insn >> 0) & 0xf;
  1684. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1685. if (insn & (1 << 21))
  1686. gen_op_iwmmxt_macsw_M0_wRn(rd1);
  1687. else
  1688. gen_op_iwmmxt_macuw_M0_wRn(rd1);
  1689. if (!(insn & (1 << 20))) {
  1690. iwmmxt_load_reg(cpu_V1, wrd);
  1691. tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
  1692. }
  1693. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1694. gen_op_iwmmxt_set_mup();
  1695. break;
  1696. case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
  1697. wrd = (insn >> 12) & 0xf;
  1698. rd0 = (insn >> 16) & 0xf;
  1699. rd1 = (insn >> 0) & 0xf;
  1700. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1701. switch ((insn >> 22) & 3) {
  1702. case 0:
  1703. gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
  1704. break;
  1705. case 1:
  1706. gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
  1707. break;
  1708. case 2:
  1709. gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
  1710. break;
  1711. case 3:
  1712. return 1;
  1713. }
  1714. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1715. gen_op_iwmmxt_set_mup();
  1716. gen_op_iwmmxt_set_cup();
  1717. break;
  1718. case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
  1719. wrd = (insn >> 12) & 0xf;
  1720. rd0 = (insn >> 16) & 0xf;
  1721. rd1 = (insn >> 0) & 0xf;
  1722. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1723. if (insn & (1 << 22)) {
  1724. if (insn & (1 << 20))
  1725. gen_op_iwmmxt_avgw1_M0_wRn(rd1);
  1726. else
  1727. gen_op_iwmmxt_avgw0_M0_wRn(rd1);
  1728. } else {
  1729. if (insn & (1 << 20))
  1730. gen_op_iwmmxt_avgb1_M0_wRn(rd1);
  1731. else
  1732. gen_op_iwmmxt_avgb0_M0_wRn(rd1);
  1733. }
  1734. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1735. gen_op_iwmmxt_set_mup();
  1736. gen_op_iwmmxt_set_cup();
  1737. break;
  1738. case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
  1739. wrd = (insn >> 12) & 0xf;
  1740. rd0 = (insn >> 16) & 0xf;
  1741. rd1 = (insn >> 0) & 0xf;
  1742. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1743. tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
  1744. tcg_gen_andi_i32(tmp, tmp, 7);
  1745. iwmmxt_load_reg(cpu_V1, rd1);
  1746. gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
  1747. tcg_temp_free_i32(tmp);
  1748. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1749. gen_op_iwmmxt_set_mup();
  1750. break;
  1751. case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
  1752. if (((insn >> 6) & 3) == 3)
  1753. return 1;
  1754. rd = (insn >> 12) & 0xf;
  1755. wrd = (insn >> 16) & 0xf;
  1756. tmp = load_reg(s, rd);
  1757. gen_op_iwmmxt_movq_M0_wRn(wrd);
  1758. switch ((insn >> 6) & 3) {
  1759. case 0:
  1760. tmp2 = tcg_const_i32(0xff);
  1761. tmp3 = tcg_const_i32((insn & 7) << 3);
  1762. break;
  1763. case 1:
  1764. tmp2 = tcg_const_i32(0xffff);
  1765. tmp3 = tcg_const_i32((insn & 3) << 4);
  1766. break;
  1767. case 2:
  1768. tmp2 = tcg_const_i32(0xffffffff);
  1769. tmp3 = tcg_const_i32((insn & 1) << 5);
  1770. break;
  1771. default:
  1772. TCGV_UNUSED_I32(tmp2);
  1773. TCGV_UNUSED_I32(tmp3);
  1774. }
  1775. gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
  1776. tcg_temp_free_i32(tmp3);
  1777. tcg_temp_free_i32(tmp2);
  1778. tcg_temp_free_i32(tmp);
  1779. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1780. gen_op_iwmmxt_set_mup();
  1781. break;
  1782. case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
  1783. rd = (insn >> 12) & 0xf;
  1784. wrd = (insn >> 16) & 0xf;
  1785. if (rd == 15 || ((insn >> 22) & 3) == 3)
  1786. return 1;
  1787. gen_op_iwmmxt_movq_M0_wRn(wrd);
  1788. tmp = tcg_temp_new_i32();
  1789. switch ((insn >> 22) & 3) {
  1790. case 0:
  1791. tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
  1792. tcg_gen_extrl_i64_i32(tmp, cpu_M0);
  1793. if (insn & 8) {
  1794. tcg_gen_ext8s_i32(tmp, tmp);
  1795. } else {
  1796. tcg_gen_andi_i32(tmp, tmp, 0xff);
  1797. }
  1798. break;
  1799. case 1:
  1800. tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
  1801. tcg_gen_extrl_i64_i32(tmp, cpu_M0);
  1802. if (insn & 8) {
  1803. tcg_gen_ext16s_i32(tmp, tmp);
  1804. } else {
  1805. tcg_gen_andi_i32(tmp, tmp, 0xffff);
  1806. }
  1807. break;
  1808. case 2:
  1809. tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
  1810. tcg_gen_extrl_i64_i32(tmp, cpu_M0);
  1811. break;
  1812. }
  1813. store_reg(s, rd, tmp);
  1814. break;
  1815. case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
  1816. if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
  1817. return 1;
  1818. tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
  1819. switch ((insn >> 22) & 3) {
  1820. case 0:
  1821. tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
  1822. break;
  1823. case 1:
  1824. tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
  1825. break;
  1826. case 2:
  1827. tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
  1828. break;
  1829. }
  1830. tcg_gen_shli_i32(tmp, tmp, 28);
  1831. gen_set_nzcv(tmp);
  1832. tcg_temp_free_i32(tmp);
  1833. break;
  1834. case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
  1835. if (((insn >> 6) & 3) == 3)
  1836. return 1;
  1837. rd = (insn >> 12) & 0xf;
  1838. wrd = (insn >> 16) & 0xf;
  1839. tmp = load_reg(s, rd);
  1840. switch ((insn >> 6) & 3) {
  1841. case 0:
  1842. gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
  1843. break;
  1844. case 1:
  1845. gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
  1846. break;
  1847. case 2:
  1848. gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
  1849. break;
  1850. }
  1851. tcg_temp_free_i32(tmp);
  1852. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1853. gen_op_iwmmxt_set_mup();
  1854. break;
  1855. case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
  1856. if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
  1857. return 1;
  1858. tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
  1859. tmp2 = tcg_temp_new_i32();
  1860. tcg_gen_mov_i32(tmp2, tmp);
  1861. switch ((insn >> 22) & 3) {
  1862. case 0:
  1863. for (i = 0; i < 7; i ++) {
  1864. tcg_gen_shli_i32(tmp2, tmp2, 4);
  1865. tcg_gen_and_i32(tmp, tmp, tmp2);
  1866. }
  1867. break;
  1868. case 1:
  1869. for (i = 0; i < 3; i ++) {
  1870. tcg_gen_shli_i32(tmp2, tmp2, 8);
  1871. tcg_gen_and_i32(tmp, tmp, tmp2);
  1872. }
  1873. break;
  1874. case 2:
  1875. tcg_gen_shli_i32(tmp2, tmp2, 16);
  1876. tcg_gen_and_i32(tmp, tmp, tmp2);
  1877. break;
  1878. }
  1879. gen_set_nzcv(tmp);
  1880. tcg_temp_free_i32(tmp2);
  1881. tcg_temp_free_i32(tmp);
  1882. break;
  1883. case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
  1884. wrd = (insn >> 12) & 0xf;
  1885. rd0 = (insn >> 16) & 0xf;
  1886. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1887. switch ((insn >> 22) & 3) {
  1888. case 0:
  1889. gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
  1890. break;
  1891. case 1:
  1892. gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
  1893. break;
  1894. case 2:
  1895. gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
  1896. break;
  1897. case 3:
  1898. return 1;
  1899. }
  1900. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1901. gen_op_iwmmxt_set_mup();
  1902. break;
  1903. case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
  1904. if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
  1905. return 1;
  1906. tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
  1907. tmp2 = tcg_temp_new_i32();
  1908. tcg_gen_mov_i32(tmp2, tmp);
  1909. switch ((insn >> 22) & 3) {
  1910. case 0:
  1911. for (i = 0; i < 7; i ++) {
  1912. tcg_gen_shli_i32(tmp2, tmp2, 4);
  1913. tcg_gen_or_i32(tmp, tmp, tmp2);
  1914. }
  1915. break;
  1916. case 1:
  1917. for (i = 0; i < 3; i ++) {
  1918. tcg_gen_shli_i32(tmp2, tmp2, 8);
  1919. tcg_gen_or_i32(tmp, tmp, tmp2);
  1920. }
  1921. break;
  1922. case 2:
  1923. tcg_gen_shli_i32(tmp2, tmp2, 16);
  1924. tcg_gen_or_i32(tmp, tmp, tmp2);
  1925. break;
  1926. }
  1927. gen_set_nzcv(tmp);
  1928. tcg_temp_free_i32(tmp2);
  1929. tcg_temp_free_i32(tmp);
  1930. break;
  1931. case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
  1932. rd = (insn >> 12) & 0xf;
  1933. rd0 = (insn >> 16) & 0xf;
  1934. if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
  1935. return 1;
  1936. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1937. tmp = tcg_temp_new_i32();
  1938. switch ((insn >> 22) & 3) {
  1939. case 0:
  1940. gen_helper_iwmmxt_msbb(tmp, cpu_M0);
  1941. break;
  1942. case 1:
  1943. gen_helper_iwmmxt_msbw(tmp, cpu_M0);
  1944. break;
  1945. case 2:
  1946. gen_helper_iwmmxt_msbl(tmp, cpu_M0);
  1947. break;
  1948. }
  1949. store_reg(s, rd, tmp);
  1950. break;
  1951. case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
  1952. case 0x906: case 0xb06: case 0xd06: case 0xf06:
  1953. wrd = (insn >> 12) & 0xf;
  1954. rd0 = (insn >> 16) & 0xf;
  1955. rd1 = (insn >> 0) & 0xf;
  1956. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1957. switch ((insn >> 22) & 3) {
  1958. case 0:
  1959. if (insn & (1 << 21))
  1960. gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
  1961. else
  1962. gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
  1963. break;
  1964. case 1:
  1965. if (insn & (1 << 21))
  1966. gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
  1967. else
  1968. gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
  1969. break;
  1970. case 2:
  1971. if (insn & (1 << 21))
  1972. gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
  1973. else
  1974. gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
  1975. break;
  1976. case 3:
  1977. return 1;
  1978. }
  1979. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1980. gen_op_iwmmxt_set_mup();
  1981. gen_op_iwmmxt_set_cup();
  1982. break;
  1983. case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
  1984. case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
  1985. wrd = (insn >> 12) & 0xf;
  1986. rd0 = (insn >> 16) & 0xf;
  1987. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1988. switch ((insn >> 22) & 3) {
  1989. case 0:
  1990. if (insn & (1 << 21))
  1991. gen_op_iwmmxt_unpacklsb_M0();
  1992. else
  1993. gen_op_iwmmxt_unpacklub_M0();
  1994. break;
  1995. case 1:
  1996. if (insn & (1 << 21))
  1997. gen_op_iwmmxt_unpacklsw_M0();
  1998. else
  1999. gen_op_iwmmxt_unpackluw_M0();
  2000. break;
  2001. case 2:
  2002. if (insn & (1 << 21))
  2003. gen_op_iwmmxt_unpacklsl_M0();
  2004. else
  2005. gen_op_iwmmxt_unpacklul_M0();
  2006. break;
  2007. case 3:
  2008. return 1;
  2009. }
  2010. gen_op_iwmmxt_movq_wRn_M0(wrd);
  2011. gen_op_iwmmxt_set_mup();
  2012. gen_op_iwmmxt_set_cup();
  2013. break;
  2014. case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
  2015. case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
  2016. wrd = (insn >> 12) & 0xf;
  2017. rd0 = (insn >> 16) & 0xf;
  2018. gen_op_iwmmxt_movq_M0_wRn(rd0);
  2019. switch ((insn >> 22) & 3) {
  2020. case 0:
  2021. if (insn & (1 << 21))
  2022. gen_op_iwmmxt_unpackhsb_M0();
  2023. else
  2024. gen_op_iwmmxt_unpackhub_M0();
  2025. break;
  2026. case 1:
  2027. if (insn & (1 << 21))
  2028. gen_op_iwmmxt_unpackhsw_M0();
  2029. else
  2030. gen_op_iwmmxt_unpackhuw_M0();
  2031. break;
  2032. case 2:
  2033. if (insn & (1 << 21))
  2034. gen_op_iwmmxt_unpackhsl_M0();
  2035. else
  2036. gen_op_iwmmxt_unpackhul_M0();
  2037. break;
  2038. case 3:
  2039. return 1;
  2040. }
  2041. gen_op_iwmmxt_movq_wRn_M0(wrd);
  2042. gen_op_iwmmxt_set_mup();
  2043. gen_op_iwmmxt_set_cup();
  2044. break;
  2045. case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
  2046. case 0x214: case 0x614: case 0xa14: case 0xe14:
  2047. if (((insn >> 22) & 3) == 0)
  2048. return 1;
  2049. wrd = (insn >> 12) & 0xf;
  2050. rd0 = (insn >> 16) & 0xf;
  2051. gen_op_iwmmxt_movq_M0_wRn(rd0);
  2052. tmp = tcg_temp_new_i32();
  2053. if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
  2054. tcg_temp_free_i32(tmp);
  2055. return 1;
  2056. }
  2057. switch ((insn >> 22) & 3) {
  2058. case 1:
  2059. gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
  2060. break;
  2061. case 2:
  2062. gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
  2063. break;
  2064. case 3:
  2065. gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
  2066. break;
  2067. }
  2068. tcg_temp_free_i32(tmp);
  2069. gen_op_iwmmxt_movq_wRn_M0(wrd);
  2070. gen_op_iwmmxt_set_mup();
  2071. gen_op_iwmmxt_set_cup();
  2072. break;
  2073. case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
  2074. case 0x014: case 0x414: case 0x814: case 0xc14:
  2075. if (((insn >> 22) & 3) == 0)
  2076. return 1;
  2077. wrd = (insn >> 12) & 0xf;
  2078. rd0 = (insn >> 16) & 0xf;
  2079. gen_op_iwmmxt_movq_M0_wRn(rd0);
  2080. tmp = tcg_temp_new_i32();
  2081. if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
  2082. tcg_temp_free_i32(tmp);
  2083. return 1;
  2084. }
  2085. switch ((insn >> 22) & 3) {
  2086. case 1:
  2087. gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
  2088. break;
  2089. case 2:
  2090. gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
  2091. break;
  2092. case 3:
  2093. gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
  2094. break;
  2095. }
  2096. tcg_temp_free_i32(tmp);
  2097. gen_op_iwmmxt_movq_wRn_M0(wrd);
  2098. gen_op_iwmmxt_set_mup();
  2099. gen_op_iwmmxt_set_cup();
  2100. break;
  2101. case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
  2102. case 0x114: case 0x514: case 0x914: case 0xd14:
  2103. if (((insn >> 22) & 3) == 0)
  2104. return 1;
  2105. wrd = (insn >> 12) & 0xf;
  2106. rd0 = (insn >> 16) & 0xf;
  2107. gen_op_iwmmxt_movq_M0_wRn(rd0);
  2108. tmp = tcg_temp_new_i32();
  2109. if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
  2110. tcg_temp_free_i32(tmp);
  2111. return 1;
  2112. }
  2113. switch ((insn >> 22) & 3) {
  2114. case 1:
  2115. gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
  2116. break;
  2117. case 2:
  2118. gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
  2119. break;
  2120. case 3:
  2121. gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
  2122. break;
  2123. }
  2124. tcg_temp_free_i32(tmp);
  2125. gen_op_iwmmxt_movq_wRn_M0(wrd);
  2126. gen_op_iwmmxt_set_mup();
  2127. gen_op_iwmmxt_set_cup();
  2128. break;
  2129. case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
  2130. case 0x314: case 0x714: case 0xb14: case 0xf14:
  2131. if (((insn >> 22) & 3) == 0)
  2132. return 1;
  2133. wrd = (insn >> 12) & 0xf;
  2134. rd0 = (insn >> 16) & 0xf;
  2135. gen_op_iwmmxt_movq_M0_wRn(rd0);
  2136. tmp = tcg_temp_new_i32();
  2137. switch ((insn >> 22) & 3) {
  2138. case 1:
  2139. if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
  2140. tcg_temp_free_i32(tmp);
  2141. return 1;
  2142. }
  2143. gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
  2144. break;
  2145. case 2:
  2146. if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
  2147. tcg_temp_free_i32(tmp);
  2148. return 1;
  2149. }
  2150. gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
  2151. break;
  2152. case 3:
  2153. if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
  2154. tcg_temp_free_i32(tmp);
  2155. return 1;
  2156. }
  2157. gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
  2158. break;
  2159. }
  2160. tcg_temp_free_i32(tmp);
  2161. gen_op_iwmmxt_movq_wRn_M0(wrd);
  2162. gen_op_iwmmxt_set_mup();
  2163. gen_op_iwmmxt_set_cup();
  2164. break;
  2165. case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
  2166. case 0x916: case 0xb16: case 0xd16: case 0xf16:
  2167. wrd = (insn >> 12) & 0xf;
  2168. rd0 = (insn >> 16) & 0xf;
  2169. rd1 = (insn >> 0) & 0xf;
  2170. gen_op_iwmmxt_movq_M0_wRn(rd0);
  2171. switch ((insn >> 22) & 3) {
  2172. case 0:
  2173. if (insn & (1 << 21))
  2174. gen_op_iwmmxt_minsb_M0_wRn(rd1);
  2175. else
  2176. gen_op_iwmmxt_minub_M0_wRn(rd1);
  2177. break;
  2178. case 1:
  2179. if (insn & (1 << 21))
  2180. gen_op_iwmmxt_minsw_M0_wRn(rd1);
  2181. else
  2182. gen_op_iwmmxt_minuw_M0_wRn(rd1);
  2183. break;
  2184. case 2:
  2185. if (insn & (1 << 21))
  2186. gen_op_iwmmxt_minsl_M0_wRn(rd1);
  2187. else
  2188. gen_op_iwmmxt_minul_M0_wRn(rd1);
  2189. break;
  2190. case 3:
  2191. return 1;
  2192. }
  2193. gen_op_iwmmxt_movq_wRn_M0(wrd);
  2194. gen_op_iwmmxt_set_mup();
  2195. break;
  2196. case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
  2197. case 0x816: case 0xa16: case 0xc16: case 0xe16:
  2198. wrd = (insn >> 12) & 0xf;
  2199. rd0 = (insn >> 16) & 0xf;
  2200. rd1 = (insn >> 0) & 0xf;
  2201. gen_op_iwmmxt_movq_M0_wRn(rd0);
  2202. switch ((insn >> 22) & 3) {
  2203. case 0:
  2204. if (insn & (1 << 21))
  2205. gen_op_iwmmxt_maxsb_M0_wRn(rd1);
  2206. else
  2207. gen_op_iwmmxt_maxub_M0_wRn(rd1);
  2208. break;
  2209. case 1:
  2210. if (insn & (1 << 21))
  2211. gen_op_iwmmxt_maxsw_M0_wRn(rd1);
  2212. else
  2213. gen_op_iwmmxt_maxuw_M0_wRn(rd1);
  2214. break;
  2215. case 2:
  2216. if (insn & (1 << 21))
  2217. gen_op_iwmmxt_maxsl_M0_wRn(rd1);
  2218. else
  2219. gen_op_iwmmxt_maxul_M0_wRn(rd1);
  2220. break;
  2221. case 3:
  2222. return 1;
  2223. }
  2224. gen_op_iwmmxt_movq_wRn_M0(wrd);
  2225. gen_op_iwmmxt_set_mup();
  2226. break;
  2227. case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
  2228. case 0x402: case 0x502: case 0x602: case 0x702:
  2229. wrd = (insn >> 12) & 0xf;
  2230. rd0 = (insn >> 16) & 0xf;
  2231. rd1 = (insn >> 0) & 0xf;
  2232. gen_op_iwmmxt_movq_M0_wRn(rd0);
  2233. tmp = tcg_const_i32((insn >> 20) & 3);
  2234. iwmmxt_load_reg(cpu_V1, rd1);
  2235. gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
  2236. tcg_temp_free_i32(tmp);
  2237. gen_op_iwmmxt_movq_wRn_M0(wrd);
  2238. gen_op_iwmmxt_set_mup();
  2239. break;
  2240. case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
  2241. case 0x41a: case 0x51a: case 0x61a: case 0x71a:
  2242. case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
  2243. case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
  2244. wrd = (insn >> 12) & 0xf;
  2245. rd0 = (insn >> 16) & 0xf;
  2246. rd1 = (insn >> 0) & 0xf;
  2247. gen_op_iwmmxt_movq_M0_wRn(rd0);
  2248. switch ((insn >> 20) & 0xf) {
  2249. case 0x0:
  2250. gen_op_iwmmxt_subnb_M0_wRn(rd1);
  2251. break;
  2252. case 0x1:
  2253. gen_op_iwmmxt_subub_M0_wRn(rd1);
  2254. break;
  2255. case 0x3:
  2256. gen_op_iwmmxt_subsb_M0_wRn(rd1);
  2257. break;
  2258. case 0x4:
  2259. gen_op_iwmmxt_subnw_M0_wRn(rd1);
  2260. break;
  2261. case 0x5:
  2262. gen_op_iwmmxt_subuw_M0_wRn(rd1);
  2263. break;
  2264. case 0x7:
  2265. gen_op_iwmmxt_subsw_M0_wRn(rd1);
  2266. break;
  2267. case 0x8:
  2268. gen_op_iwmmxt_subnl_M0_wRn(rd1);
  2269. break;
  2270. case 0x9:
  2271. gen_op_iwmmxt_subul_M0_wRn(rd1);
  2272. break;
  2273. case 0xb:
  2274. gen_op_iwmmxt_subsl_M0_wRn(rd1);
  2275. break;
  2276. default:
  2277. return 1;
  2278. }
  2279. gen_op_iwmmxt_movq_wRn_M0(wrd);
  2280. gen_op_iwmmxt_set_mup();
  2281. gen_op_iwmmxt_set_cup();
  2282. break;
  2283. case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
  2284. case 0x41e: case 0x51e: case 0x61e: case 0x71e:
  2285. case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
  2286. case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
  2287. wrd = (insn >> 12) & 0xf;
  2288. rd0 = (insn >> 16) & 0xf;
  2289. gen_op_iwmmxt_movq_M0_wRn(rd0);
  2290. tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
  2291. gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
  2292. tcg_temp_free_i32(tmp);
  2293. gen_op_iwmmxt_movq_wRn_M0(wrd);
  2294. gen_op_iwmmxt_set_mup();
  2295. gen_op_iwmmxt_set_cup();
  2296. break;
  2297. case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
  2298. case 0x418: case 0x518: case 0x618: case 0x718:
  2299. case 0x818: case 0x918: case 0xa18: case 0xb18:
  2300. case 0xc18: case 0xd18: case 0xe18: case 0xf18:
  2301. wrd = (insn >> 12) & 0xf;
  2302. rd0 = (insn >> 16) & 0xf;
  2303. rd1 = (insn >> 0) & 0xf;
  2304. gen_op_iwmmxt_movq_M0_wRn(rd0);
  2305. switch ((insn >> 20) & 0xf) {
  2306. case 0x0:
  2307. gen_op_iwmmxt_addnb_M0_wRn(rd1);
  2308. break;
  2309. case 0x1:
  2310. gen_op_iwmmxt_addub_M0_wRn(rd1);
  2311. break;
  2312. case 0x3:
  2313. gen_op_iwmmxt_addsb_M0_wRn(rd1);
  2314. break;
  2315. case 0x4:
  2316. gen_op_iwmmxt_addnw_M0_wRn(rd1);
  2317. break;
  2318. case 0x5:
  2319. gen_op_iwmmxt_adduw_M0_wRn(rd1);
  2320. break;
  2321. case 0x7:
  2322. gen_op_iwmmxt_addsw_M0_wRn(rd1);
  2323. break;
  2324. case 0x8:
  2325. gen_op_iwmmxt_addnl_M0_wRn(rd1);
  2326. break;
  2327. case 0x9:
  2328. gen_op_iwmmxt_addul_M0_wRn(rd1);
  2329. break;
  2330. case 0xb:
  2331. gen_op_iwmmxt_addsl_M0_wRn(rd1);
  2332. break;
  2333. default:
  2334. return 1;
  2335. }
  2336. gen_op_iwmmxt_movq_wRn_M0(wrd);
  2337. gen_op_iwmmxt_set_mup();
  2338. gen_op_iwmmxt_set_cup();
  2339. break;
  2340. case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
  2341. case 0x408: case 0x508: case 0x608: case 0x708:
  2342. case 0x808: case 0x908: case 0xa08: case 0xb08:
  2343. case 0xc08: case 0xd08: case 0xe08: case 0xf08:
  2344. if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
  2345. return 1;
  2346. wrd = (insn >> 12) & 0xf;
  2347. rd0 = (insn >> 16) & 0xf;
  2348. rd1 = (insn >> 0) & 0xf;
  2349. gen_op_iwmmxt_movq_M0_wRn(rd0);
  2350. switch ((insn >> 22) & 3) {
  2351. case 1:
  2352. if (insn & (1 << 21))
  2353. gen_op_iwmmxt_packsw_M0_wRn(rd1);
  2354. else
  2355. gen_op_iwmmxt_packuw_M0_wRn(rd1);
  2356. break;
  2357. case 2:
  2358. if (insn & (1 << 21))
  2359. gen_op_iwmmxt_packsl_M0_wRn(rd1);
  2360. else
  2361. gen_op_iwmmxt_packul_M0_wRn(rd1);
  2362. break;
  2363. case 3:
  2364. if (insn & (1 << 21))
  2365. gen_op_iwmmxt_packsq_M0_wRn(rd1);
  2366. else
  2367. gen_op_iwmmxt_packuq_M0_wRn(rd1);
  2368. break;
  2369. }
  2370. gen_op_iwmmxt_movq_wRn_M0(wrd);
  2371. gen_op_iwmmxt_set_mup();
  2372. gen_op_iwmmxt_set_cup();
  2373. break;
  2374. case 0x201: case 0x203: case 0x205: case 0x207:
  2375. case 0x209: case 0x20b: case 0x20d: case 0x20f:
  2376. case 0x211: case 0x213: case 0x215: case 0x217:
  2377. case 0x219: case 0x21b: case 0x21d: case 0x21f:
  2378. wrd = (insn >> 5) & 0xf;
  2379. rd0 = (insn >> 12) & 0xf;
  2380. rd1 = (insn >> 0) & 0xf;
  2381. if (rd0 == 0xf || rd1 == 0xf)
  2382. return 1;
  2383. gen_op_iwmmxt_movq_M0_wRn(wrd);
  2384. tmp = load_reg(s, rd0);
  2385. tmp2 = load_reg(s, rd1);
  2386. switch ((insn >> 16) & 0xf) {
  2387. case 0x0: /* TMIA */
  2388. gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
  2389. break;
  2390. case 0x8: /* TMIAPH */
  2391. gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
  2392. break;
  2393. case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
  2394. if (insn & (1 << 16))
  2395. tcg_gen_shri_i32(tmp, tmp, 16);
  2396. if (insn & (1 << 17))
  2397. tcg_gen_shri_i32(tmp2, tmp2, 16);
  2398. gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
  2399. break;
  2400. default:
  2401. tcg_temp_free_i32(tmp2);
  2402. tcg_temp_free_i32(tmp);
  2403. return 1;
  2404. }
  2405. tcg_temp_free_i32(tmp2);
  2406. tcg_temp_free_i32(tmp);
  2407. gen_op_iwmmxt_movq_wRn_M0(wrd);
  2408. gen_op_iwmmxt_set_mup();
  2409. break;
  2410. default:
  2411. return 1;
  2412. }
  2413. return 0;
  2414. }
  2415. /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
  2416. (ie. an undefined instruction). */
  2417. static int disas_dsp_insn(DisasContext *s, uint32_t insn)
  2418. {
  2419. int acc, rd0, rd1, rdhi, rdlo;
  2420. TCGv_i32 tmp, tmp2;
  2421. if ((insn & 0x0ff00f10) == 0x0e200010) {
  2422. /* Multiply with Internal Accumulate Format */
  2423. rd0 = (insn >> 12) & 0xf;
  2424. rd1 = insn & 0xf;
  2425. acc = (insn >> 5) & 7;
  2426. if (acc != 0)
  2427. return 1;
  2428. tmp = load_reg(s, rd0);
  2429. tmp2 = load_reg(s, rd1);
  2430. switch ((insn >> 16) & 0xf) {
  2431. case 0x0: /* MIA */
  2432. gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
  2433. break;
  2434. case 0x8: /* MIAPH */
  2435. gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
  2436. break;
  2437. case 0xc: /* MIABB */
  2438. case 0xd: /* MIABT */
  2439. case 0xe: /* MIATB */
  2440. case 0xf: /* MIATT */
  2441. if (insn & (1 << 16))
  2442. tcg_gen_shri_i32(tmp, tmp, 16);
  2443. if (insn & (1 << 17))
  2444. tcg_gen_shri_i32(tmp2, tmp2, 16);
  2445. gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
  2446. break;
  2447. default:
  2448. return 1;
  2449. }
  2450. tcg_temp_free_i32(tmp2);
  2451. tcg_temp_free_i32(tmp);
  2452. gen_op_iwmmxt_movq_wRn_M0(acc);
  2453. return 0;
  2454. }
  2455. if ((insn & 0x0fe00ff8) == 0x0c400000) {
  2456. /* Internal Accumulator Access Format */
  2457. rdhi = (insn >> 16) & 0xf;
  2458. rdlo = (insn >> 12) & 0xf;
  2459. acc = insn & 7;
  2460. if (acc != 0)
  2461. return 1;
  2462. if (insn & ARM_CP_RW_BIT) { /* MRA */
  2463. iwmmxt_load_reg(cpu_V0, acc);
  2464. tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
  2465. tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
  2466. tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
  2467. tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
  2468. } else { /* MAR */
  2469. tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
  2470. iwmmxt_store_reg(cpu_V0, acc);
  2471. }
  2472. return 0;
  2473. }
  2474. return 1;
  2475. }
  2476. #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
  2477. #define VFP_SREG(insn, bigbit, smallbit) \
  2478. ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
  2479. #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
  2480. if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
  2481. reg = (((insn) >> (bigbit)) & 0x0f) \
  2482. | (((insn) >> ((smallbit) - 4)) & 0x10); \
  2483. } else { \
  2484. if (insn & (1 << (smallbit))) \
  2485. return 1; \
  2486. reg = ((insn) >> (bigbit)) & 0x0f; \
  2487. }} while (0)
  2488. #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
  2489. #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
  2490. #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
  2491. #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
  2492. #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
  2493. #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
  2494. /* Move between integer and VFP cores. */
  2495. static TCGv_i32 gen_vfp_mrs(void)
  2496. {
  2497. TCGv_i32 tmp = tcg_temp_new_i32();
  2498. tcg_gen_mov_i32(tmp, cpu_F0s);
  2499. return tmp;
  2500. }
  2501. static void gen_vfp_msr(TCGv_i32 tmp)
  2502. {
  2503. tcg_gen_mov_i32(cpu_F0s, tmp);
  2504. tcg_temp_free_i32(tmp);
  2505. }
  2506. static void gen_neon_dup_u8(TCGv_i32 var, int shift)
  2507. {
  2508. TCGv_i32 tmp = tcg_temp_new_i32();
  2509. if (shift)
  2510. tcg_gen_shri_i32(var, var, shift);
  2511. tcg_gen_ext8u_i32(var, var);
  2512. tcg_gen_shli_i32(tmp, var, 8);
  2513. tcg_gen_or_i32(var, var, tmp);
  2514. tcg_gen_shli_i32(tmp, var, 16);
  2515. tcg_gen_or_i32(var, var, tmp);
  2516. tcg_temp_free_i32(tmp);
  2517. }
  2518. static void gen_neon_dup_low16(TCGv_i32 var)
  2519. {
  2520. TCGv_i32 tmp = tcg_temp_new_i32();
  2521. tcg_gen_ext16u_i32(var, var);
  2522. tcg_gen_shli_i32(tmp, var, 16);
  2523. tcg_gen_or_i32(var, var, tmp);
  2524. tcg_temp_free_i32(tmp);
  2525. }
  2526. static void gen_neon_dup_high16(TCGv_i32 var)
  2527. {
  2528. TCGv_i32 tmp = tcg_temp_new_i32();
  2529. tcg_gen_andi_i32(var, var, 0xffff0000);
  2530. tcg_gen_shri_i32(tmp, var, 16);
  2531. tcg_gen_or_i32(var, var, tmp);
  2532. tcg_temp_free_i32(tmp);
  2533. }
  2534. static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
  2535. {
  2536. /* Load a single Neon element and replicate into a 32 bit TCG reg */
  2537. TCGv_i32 tmp = tcg_temp_new_i32();
  2538. switch (size) {
  2539. case 0:
  2540. gen_aa32_ld8u(tmp, addr, get_mem_index(s));
  2541. gen_neon_dup_u8(tmp, 0);
  2542. break;
  2543. case 1:
  2544. gen_aa32_ld16u(tmp, addr, get_mem_index(s));
  2545. gen_neon_dup_low16(tmp);
  2546. break;
  2547. case 2:
  2548. gen_aa32_ld32u(tmp, addr, get_mem_index(s));
  2549. break;
  2550. default: /* Avoid compiler warnings. */
  2551. abort();
  2552. }
  2553. return tmp;
  2554. }
  2555. static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
  2556. uint32_t dp)
  2557. {
  2558. uint32_t cc = extract32(insn, 20, 2);
  2559. if (dp) {
  2560. TCGv_i64 frn, frm, dest;
  2561. TCGv_i64 tmp, zero, zf, nf, vf;
  2562. zero = tcg_const_i64(0);
  2563. frn = tcg_temp_new_i64();
  2564. frm = tcg_temp_new_i64();
  2565. dest = tcg_temp_new_i64();
  2566. zf = tcg_temp_new_i64();
  2567. nf = tcg_temp_new_i64();
  2568. vf = tcg_temp_new_i64();
  2569. tcg_gen_extu_i32_i64(zf, cpu_ZF);
  2570. tcg_gen_ext_i32_i64(nf, cpu_NF);
  2571. tcg_gen_ext_i32_i64(vf, cpu_VF);
  2572. tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
  2573. tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
  2574. switch (cc) {
  2575. case 0: /* eq: Z */
  2576. tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
  2577. frn, frm);
  2578. break;
  2579. case 1: /* vs: V */
  2580. tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
  2581. frn, frm);
  2582. break;
  2583. case 2: /* ge: N == V -> N ^ V == 0 */
  2584. tmp = tcg_temp_new_i64();
  2585. tcg_gen_xor_i64(tmp, vf, nf);
  2586. tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
  2587. frn, frm);
  2588. tcg_temp_free_i64(tmp);
  2589. break;
  2590. case 3: /* gt: !Z && N == V */
  2591. tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
  2592. frn, frm);
  2593. tmp = tcg_temp_new_i64();
  2594. tcg_gen_xor_i64(tmp, vf, nf);
  2595. tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
  2596. dest, frm);
  2597. tcg_temp_free_i64(tmp);
  2598. break;
  2599. }
  2600. tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
  2601. tcg_temp_free_i64(frn);
  2602. tcg_temp_free_i64(frm);
  2603. tcg_temp_free_i64(dest);
  2604. tcg_temp_free_i64(zf);
  2605. tcg_temp_free_i64(nf);
  2606. tcg_temp_free_i64(vf);
  2607. tcg_temp_free_i64(zero);
  2608. } else {
  2609. TCGv_i32 frn, frm, dest;
  2610. TCGv_i32 tmp, zero;
  2611. zero = tcg_const_i32(0);
  2612. frn = tcg_temp_new_i32();
  2613. frm = tcg_temp_new_i32();
  2614. dest = tcg_temp_new_i32();
  2615. tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
  2616. tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
  2617. switch (cc) {
  2618. case 0: /* eq: Z */
  2619. tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
  2620. frn, frm);
  2621. break;
  2622. case 1: /* vs: V */
  2623. tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
  2624. frn, frm);
  2625. break;
  2626. case 2: /* ge: N == V -> N ^ V == 0 */
  2627. tmp = tcg_temp_new_i32();
  2628. tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
  2629. tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
  2630. frn, frm);
  2631. tcg_temp_free_i32(tmp);
  2632. break;
  2633. case 3: /* gt: !Z && N == V */
  2634. tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
  2635. frn, frm);
  2636. tmp = tcg_temp_new_i32();
  2637. tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
  2638. tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
  2639. dest, frm);
  2640. tcg_temp_free_i32(tmp);
  2641. break;
  2642. }
  2643. tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
  2644. tcg_temp_free_i32(frn);
  2645. tcg_temp_free_i32(frm);
  2646. tcg_temp_free_i32(dest);
  2647. tcg_temp_free_i32(zero);
  2648. }
  2649. return 0;
  2650. }
  2651. static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
  2652. uint32_t rm, uint32_t dp)
  2653. {
  2654. uint32_t vmin = extract32(insn, 6, 1);
  2655. TCGv_ptr fpst = get_fpstatus_ptr(0);
  2656. if (dp) {
  2657. TCGv_i64 frn, frm, dest;
  2658. frn = tcg_temp_new_i64();
  2659. frm = tcg_temp_new_i64();
  2660. dest = tcg_temp_new_i64();
  2661. tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
  2662. tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
  2663. if (vmin) {
  2664. gen_helper_vfp_minnumd(dest, frn, frm, fpst);
  2665. } else {
  2666. gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
  2667. }
  2668. tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
  2669. tcg_temp_free_i64(frn);
  2670. tcg_temp_free_i64(frm);
  2671. tcg_temp_free_i64(dest);
  2672. } else {
  2673. TCGv_i32 frn, frm, dest;
  2674. frn = tcg_temp_new_i32();
  2675. frm = tcg_temp_new_i32();
  2676. dest = tcg_temp_new_i32();
  2677. tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
  2678. tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
  2679. if (vmin) {
  2680. gen_helper_vfp_minnums(dest, frn, frm, fpst);
  2681. } else {
  2682. gen_helper_vfp_maxnums(dest, frn, frm, fpst);
  2683. }
  2684. tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
  2685. tcg_temp_free_i32(frn);
  2686. tcg_temp_free_i32(frm);
  2687. tcg_temp_free_i32(dest);
  2688. }
  2689. tcg_temp_free_ptr(fpst);
  2690. return 0;
  2691. }
  2692. static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
  2693. int rounding)
  2694. {
  2695. TCGv_ptr fpst = get_fpstatus_ptr(0);
  2696. TCGv_i32 tcg_rmode;
  2697. tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
  2698. gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
  2699. if (dp) {
  2700. TCGv_i64 tcg_op;
  2701. TCGv_i64 tcg_res;
  2702. tcg_op = tcg_temp_new_i64();
  2703. tcg_res = tcg_temp_new_i64();
  2704. tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
  2705. gen_helper_rintd(tcg_res, tcg_op, fpst);
  2706. tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
  2707. tcg_temp_free_i64(tcg_op);
  2708. tcg_temp_free_i64(tcg_res);
  2709. } else {
  2710. TCGv_i32 tcg_op;
  2711. TCGv_i32 tcg_res;
  2712. tcg_op = tcg_temp_new_i32();
  2713. tcg_res = tcg_temp_new_i32();
  2714. tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
  2715. gen_helper_rints(tcg_res, tcg_op, fpst);
  2716. tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
  2717. tcg_temp_free_i32(tcg_op);
  2718. tcg_temp_free_i32(tcg_res);
  2719. }
  2720. gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
  2721. tcg_temp_free_i32(tcg_rmode);
  2722. tcg_temp_free_ptr(fpst);
  2723. return 0;
  2724. }
  2725. static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
  2726. int rounding)
  2727. {
  2728. bool is_signed = extract32(insn, 7, 1);
  2729. TCGv_ptr fpst = get_fpstatus_ptr(0);
  2730. TCGv_i32 tcg_rmode, tcg_shift;
  2731. tcg_shift = tcg_const_i32(0);
  2732. tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
  2733. gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
  2734. if (dp) {
  2735. TCGv_i64 tcg_double, tcg_res;
  2736. TCGv_i32 tcg_tmp;
  2737. /* Rd is encoded as a single precision register even when the source
  2738. * is double precision.
  2739. */
  2740. rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
  2741. tcg_double = tcg_temp_new_i64();
  2742. tcg_res = tcg_temp_new_i64();
  2743. tcg_tmp = tcg_temp_new_i32();
  2744. tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
  2745. if (is_signed) {
  2746. gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
  2747. } else {
  2748. gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
  2749. }
  2750. tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
  2751. tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
  2752. tcg_temp_free_i32(tcg_tmp);
  2753. tcg_temp_free_i64(tcg_res);
  2754. tcg_temp_free_i64(tcg_double);
  2755. } else {
  2756. TCGv_i32 tcg_single, tcg_res;
  2757. tcg_single = tcg_temp_new_i32();
  2758. tcg_res = tcg_temp_new_i32();
  2759. tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
  2760. if (is_signed) {
  2761. gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
  2762. } else {
  2763. gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
  2764. }
  2765. tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
  2766. tcg_temp_free_i32(tcg_res);
  2767. tcg_temp_free_i32(tcg_single);
  2768. }
  2769. gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
  2770. tcg_temp_free_i32(tcg_rmode);
  2771. tcg_temp_free_i32(tcg_shift);
  2772. tcg_temp_free_ptr(fpst);
  2773. return 0;
  2774. }
  2775. /* Table for converting the most common AArch32 encoding of
  2776. * rounding mode to arm_fprounding order (which matches the
  2777. * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
  2778. */
  2779. static const uint8_t fp_decode_rm[] = {
  2780. FPROUNDING_TIEAWAY,
  2781. FPROUNDING_TIEEVEN,
  2782. FPROUNDING_POSINF,
  2783. FPROUNDING_NEGINF,
  2784. };
  2785. static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
  2786. {
  2787. uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
  2788. if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
  2789. return 1;
  2790. }
  2791. if (dp) {
  2792. VFP_DREG_D(rd, insn);
  2793. VFP_DREG_N(rn, insn);
  2794. VFP_DREG_M(rm, insn);
  2795. } else {
  2796. rd = VFP_SREG_D(insn);
  2797. rn = VFP_SREG_N(insn);
  2798. rm = VFP_SREG_M(insn);
  2799. }
  2800. if ((insn & 0x0f800e50) == 0x0e000a00) {
  2801. return handle_vsel(insn, rd, rn, rm, dp);
  2802. } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
  2803. return handle_vminmaxnm(insn, rd, rn, rm, dp);
  2804. } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
  2805. /* VRINTA, VRINTN, VRINTP, VRINTM */
  2806. int rounding = fp_decode_rm[extract32(insn, 16, 2)];
  2807. return handle_vrint(insn, rd, rm, dp, rounding);
  2808. } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
  2809. /* VCVTA, VCVTN, VCVTP, VCVTM */
  2810. int rounding = fp_decode_rm[extract32(insn, 16, 2)];
  2811. return handle_vcvt(insn, rd, rm, dp, rounding);
  2812. }
  2813. return 1;
  2814. }
  2815. /* Disassemble a VFP instruction. Returns nonzero if an error occurred
  2816. (ie. an undefined instruction). */
  2817. static int disas_vfp_insn(DisasContext *s, uint32_t insn)
  2818. {
  2819. uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
  2820. int dp, veclen;
  2821. TCGv_i32 addr;
  2822. TCGv_i32 tmp;
  2823. TCGv_i32 tmp2;
  2824. if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
  2825. return 1;
  2826. }
  2827. /* FIXME: this access check should not take precedence over UNDEF
  2828. * for invalid encodings; we will generate incorrect syndrome information
  2829. * for attempts to execute invalid vfp/neon encodings with FP disabled.
  2830. */
  2831. if (s->fp_excp_el) {
  2832. gen_exception_insn(s, 4, EXCP_UDEF,
  2833. syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
  2834. return 0;
  2835. }
  2836. if (!s->vfp_enabled) {
  2837. /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
  2838. if ((insn & 0x0fe00fff) != 0x0ee00a10)
  2839. return 1;
  2840. rn = (insn >> 16) & 0xf;
  2841. if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
  2842. && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
  2843. return 1;
  2844. }
  2845. }
  2846. if (extract32(insn, 28, 4) == 0xf) {
  2847. /* Encodings with T=1 (Thumb) or unconditional (ARM):
  2848. * only used in v8 and above.
  2849. */
  2850. return disas_vfp_v8_insn(s, insn);
  2851. }
  2852. dp = ((insn & 0xf00) == 0xb00);
  2853. switch ((insn >> 24) & 0xf) {
  2854. case 0xe:
  2855. if (insn & (1 << 4)) {
  2856. /* single register transfer */
  2857. rd = (insn >> 12) & 0xf;
  2858. if (dp) {
  2859. int size;
  2860. int pass;
  2861. VFP_DREG_N(rn, insn);
  2862. if (insn & 0xf)
  2863. return 1;
  2864. if (insn & 0x00c00060
  2865. && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
  2866. return 1;
  2867. }
  2868. pass = (insn >> 21) & 1;
  2869. if (insn & (1 << 22)) {
  2870. size = 0;
  2871. offset = ((insn >> 5) & 3) * 8;
  2872. } else if (insn & (1 << 5)) {
  2873. size = 1;
  2874. offset = (insn & (1 << 6)) ? 16 : 0;
  2875. } else {
  2876. size = 2;
  2877. offset = 0;
  2878. }
  2879. if (insn & ARM_CP_RW_BIT) {
  2880. /* vfp->arm */
  2881. tmp = neon_load_reg(rn, pass);
  2882. switch (size) {
  2883. case 0:
  2884. if (offset)
  2885. tcg_gen_shri_i32(tmp, tmp, offset);
  2886. if (insn & (1 << 23))
  2887. gen_uxtb(tmp);
  2888. else
  2889. gen_sxtb(tmp);
  2890. break;
  2891. case 1:
  2892. if (insn & (1 << 23)) {
  2893. if (offset) {
  2894. tcg_gen_shri_i32(tmp, tmp, 16);
  2895. } else {
  2896. gen_uxth(tmp);
  2897. }
  2898. } else {
  2899. if (offset) {
  2900. tcg_gen_sari_i32(tmp, tmp, 16);
  2901. } else {
  2902. gen_sxth(tmp);
  2903. }
  2904. }
  2905. break;
  2906. case 2:
  2907. break;
  2908. }
  2909. store_reg(s, rd, tmp);
  2910. } else {
  2911. /* arm->vfp */
  2912. tmp = load_reg(s, rd);
  2913. if (insn & (1 << 23)) {
  2914. /* VDUP */
  2915. if (size == 0) {
  2916. gen_neon_dup_u8(tmp, 0);
  2917. } else if (size == 1) {
  2918. gen_neon_dup_low16(tmp);
  2919. }
  2920. for (n = 0; n <= pass * 2; n++) {
  2921. tmp2 = tcg_temp_new_i32();
  2922. tcg_gen_mov_i32(tmp2, tmp);
  2923. neon_store_reg(rn, n, tmp2);
  2924. }
  2925. neon_store_reg(rn, n, tmp);
  2926. } else {
  2927. /* VMOV */
  2928. switch (size) {
  2929. case 0:
  2930. tmp2 = neon_load_reg(rn, pass);
  2931. tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
  2932. tcg_temp_free_i32(tmp2);
  2933. break;
  2934. case 1:
  2935. tmp2 = neon_load_reg(rn, pass);
  2936. tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
  2937. tcg_temp_free_i32(tmp2);
  2938. break;
  2939. case 2:
  2940. break;
  2941. }
  2942. neon_store_reg(rn, pass, tmp);
  2943. }
  2944. }
  2945. } else { /* !dp */
  2946. if ((insn & 0x6f) != 0x00)
  2947. return 1;
  2948. rn = VFP_SREG_N(insn);
  2949. if (insn & ARM_CP_RW_BIT) {
  2950. /* vfp->arm */
  2951. if (insn & (1 << 21)) {
  2952. /* system register */
  2953. rn >>= 1;
  2954. switch (rn) {
  2955. case ARM_VFP_FPSID:
  2956. /* VFP2 allows access to FSID from userspace.
  2957. VFP3 restricts all id registers to privileged
  2958. accesses. */
  2959. if (IS_USER(s)
  2960. && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
  2961. return 1;
  2962. }
  2963. tmp = load_cpu_field(vfp.xregs[rn]);
  2964. break;
  2965. case ARM_VFP_FPEXC:
  2966. if (IS_USER(s))
  2967. return 1;
  2968. tmp = load_cpu_field(vfp.xregs[rn]);
  2969. break;
  2970. case ARM_VFP_FPINST:
  2971. case ARM_VFP_FPINST2:
  2972. /* Not present in VFP3. */
  2973. if (IS_USER(s)
  2974. || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
  2975. return 1;
  2976. }
  2977. tmp = load_cpu_field(vfp.xregs[rn]);
  2978. break;
  2979. case ARM_VFP_FPSCR:
  2980. if (rd == 15) {
  2981. tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
  2982. tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
  2983. } else {
  2984. tmp = tcg_temp_new_i32();
  2985. gen_helper_vfp_get_fpscr(tmp, cpu_env);
  2986. }
  2987. break;
  2988. case ARM_VFP_MVFR2:
  2989. if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
  2990. return 1;
  2991. }
  2992. /* fall through */
  2993. case ARM_VFP_MVFR0:
  2994. case ARM_VFP_MVFR1:
  2995. if (IS_USER(s)
  2996. || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
  2997. return 1;
  2998. }
  2999. tmp = load_cpu_field(vfp.xregs[rn]);
  3000. break;
  3001. default:
  3002. return 1;
  3003. }
  3004. } else {
  3005. gen_mov_F0_vreg(0, rn);
  3006. tmp = gen_vfp_mrs();
  3007. }
  3008. if (rd == 15) {
  3009. /* Set the 4 flag bits in the CPSR. */
  3010. gen_set_nzcv(tmp);
  3011. tcg_temp_free_i32(tmp);
  3012. } else {
  3013. store_reg(s, rd, tmp);
  3014. }
  3015. } else {
  3016. /* arm->vfp */
  3017. if (insn & (1 << 21)) {
  3018. rn >>= 1;
  3019. /* system register */
  3020. switch (rn) {
  3021. case ARM_VFP_FPSID:
  3022. case ARM_VFP_MVFR0:
  3023. case ARM_VFP_MVFR1:
  3024. /* Writes are ignored. */
  3025. break;
  3026. case ARM_VFP_FPSCR:
  3027. tmp = load_reg(s, rd);
  3028. gen_helper_vfp_set_fpscr(cpu_env, tmp);
  3029. tcg_temp_free_i32(tmp);
  3030. gen_lookup_tb(s);
  3031. break;
  3032. case ARM_VFP_FPEXC:
  3033. if (IS_USER(s))
  3034. return 1;
  3035. /* TODO: VFP subarchitecture support.
  3036. * For now, keep the EN bit only */
  3037. tmp = load_reg(s, rd);
  3038. tcg_gen_andi_i32(tmp, tmp, 1 << 30);
  3039. store_cpu_field(tmp, vfp.xregs[rn]);
  3040. gen_lookup_tb(s);
  3041. break;
  3042. case ARM_VFP_FPINST:
  3043. case ARM_VFP_FPINST2:
  3044. if (IS_USER(s)) {
  3045. return 1;
  3046. }
  3047. tmp = load_reg(s, rd);
  3048. store_cpu_field(tmp, vfp.xregs[rn]);
  3049. break;
  3050. default:
  3051. return 1;
  3052. }
  3053. } else {
  3054. tmp = load_reg(s, rd);
  3055. gen_vfp_msr(tmp);
  3056. gen_mov_vreg_F0(0, rn);
  3057. }
  3058. }
  3059. }
  3060. } else {
  3061. /* data processing */
  3062. /* The opcode is in bits 23, 21, 20 and 6. */
  3063. op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
  3064. if (dp) {
  3065. if (op == 15) {
  3066. /* rn is opcode */
  3067. rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
  3068. } else {
  3069. /* rn is register number */
  3070. VFP_DREG_N(rn, insn);
  3071. }
  3072. if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
  3073. ((rn & 0x1e) == 0x6))) {
  3074. /* Integer or single/half precision destination. */
  3075. rd = VFP_SREG_D(insn);
  3076. } else {
  3077. VFP_DREG_D(rd, insn);
  3078. }
  3079. if (op == 15 &&
  3080. (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
  3081. ((rn & 0x1e) == 0x4))) {
  3082. /* VCVT from int or half precision is always from S reg
  3083. * regardless of dp bit. VCVT with immediate frac_bits
  3084. * has same format as SREG_M.
  3085. */
  3086. rm = VFP_SREG_M(insn);
  3087. } else {
  3088. VFP_DREG_M(rm, insn);
  3089. }
  3090. } else {
  3091. rn = VFP_SREG_N(insn);
  3092. if (op == 15 && rn == 15) {
  3093. /* Double precision destination. */
  3094. VFP_DREG_D(rd, insn);
  3095. } else {
  3096. rd = VFP_SREG_D(insn);
  3097. }
  3098. /* NB that we implicitly rely on the encoding for the frac_bits
  3099. * in VCVT of fixed to float being the same as that of an SREG_M
  3100. */
  3101. rm = VFP_SREG_M(insn);
  3102. }
  3103. veclen = s->vec_len;
  3104. if (op == 15 && rn > 3)
  3105. veclen = 0;
  3106. /* Shut up compiler warnings. */
  3107. delta_m = 0;
  3108. delta_d = 0;
  3109. bank_mask = 0;
  3110. if (veclen > 0) {
  3111. if (dp)
  3112. bank_mask = 0xc;
  3113. else
  3114. bank_mask = 0x18;
  3115. /* Figure out what type of vector operation this is. */
  3116. if ((rd & bank_mask) == 0) {
  3117. /* scalar */
  3118. veclen = 0;
  3119. } else {
  3120. if (dp)
  3121. delta_d = (s->vec_stride >> 1) + 1;
  3122. else
  3123. delta_d = s->vec_stride + 1;
  3124. if ((rm & bank_mask) == 0) {
  3125. /* mixed scalar/vector */
  3126. delta_m = 0;
  3127. } else {
  3128. /* vector */
  3129. delta_m = delta_d;
  3130. }
  3131. }
  3132. }
  3133. /* Load the initial operands. */
  3134. if (op == 15) {
  3135. switch (rn) {
  3136. case 16:
  3137. case 17:
  3138. /* Integer source */
  3139. gen_mov_F0_vreg(0, rm);
  3140. break;
  3141. case 8:
  3142. case 9:
  3143. /* Compare */
  3144. gen_mov_F0_vreg(dp, rd);
  3145. gen_mov_F1_vreg(dp, rm);
  3146. break;
  3147. case 10:
  3148. case 11:
  3149. /* Compare with zero */
  3150. gen_mov_F0_vreg(dp, rd);
  3151. gen_vfp_F1_ld0(dp);
  3152. break;
  3153. case 20:
  3154. case 21:
  3155. case 22:
  3156. case 23:
  3157. case 28:
  3158. case 29:
  3159. case 30:
  3160. case 31:
  3161. /* Source and destination the same. */
  3162. gen_mov_F0_vreg(dp, rd);
  3163. break;
  3164. case 4:
  3165. case 5:
  3166. case 6:
  3167. case 7:
  3168. /* VCVTB, VCVTT: only present with the halfprec extension
  3169. * UNPREDICTABLE if bit 8 is set prior to ARMv8
  3170. * (we choose to UNDEF)
  3171. */
  3172. if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
  3173. !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
  3174. return 1;
  3175. }
  3176. if (!extract32(rn, 1, 1)) {
  3177. /* Half precision source. */
  3178. gen_mov_F0_vreg(0, rm);
  3179. break;
  3180. }
  3181. /* Otherwise fall through */
  3182. default:
  3183. /* One source operand. */
  3184. gen_mov_F0_vreg(dp, rm);
  3185. break;
  3186. }
  3187. } else {
  3188. /* Two source operands. */
  3189. gen_mov_F0_vreg(dp, rn);
  3190. gen_mov_F1_vreg(dp, rm);
  3191. }
  3192. for (;;) {
  3193. /* Perform the calculation. */
  3194. switch (op) {
  3195. case 0: /* VMLA: fd + (fn * fm) */
  3196. /* Note that order of inputs to the add matters for NaNs */
  3197. gen_vfp_F1_mul(dp);
  3198. gen_mov_F0_vreg(dp, rd);
  3199. gen_vfp_add(dp);
  3200. break;
  3201. case 1: /* VMLS: fd + -(fn * fm) */
  3202. gen_vfp_mul(dp);
  3203. gen_vfp_F1_neg(dp);
  3204. gen_mov_F0_vreg(dp, rd);
  3205. gen_vfp_add(dp);
  3206. break;
  3207. case 2: /* VNMLS: -fd + (fn * fm) */
  3208. /* Note that it isn't valid to replace (-A + B) with (B - A)
  3209. * or similar plausible looking simplifications
  3210. * because this will give wrong results for NaNs.
  3211. */
  3212. gen_vfp_F1_mul(dp);
  3213. gen_mov_F0_vreg(dp, rd);
  3214. gen_vfp_neg(dp);
  3215. gen_vfp_add(dp);
  3216. break;
  3217. case 3: /* VNMLA: -fd + -(fn * fm) */
  3218. gen_vfp_mul(dp);
  3219. gen_vfp_F1_neg(dp);
  3220. gen_mov_F0_vreg(dp, rd);
  3221. gen_vfp_neg(dp);
  3222. gen_vfp_add(dp);
  3223. break;
  3224. case 4: /* mul: fn * fm */
  3225. gen_vfp_mul(dp);
  3226. break;
  3227. case 5: /* nmul: -(fn * fm) */
  3228. gen_vfp_mul(dp);
  3229. gen_vfp_neg(dp);
  3230. break;
  3231. case 6: /* add: fn + fm */
  3232. gen_vfp_add(dp);
  3233. break;
  3234. case 7: /* sub: fn - fm */
  3235. gen_vfp_sub(dp);
  3236. break;
  3237. case 8: /* div: fn / fm */
  3238. gen_vfp_div(dp);
  3239. break;
  3240. case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
  3241. case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
  3242. case 12: /* VFMA : fd = muladd( fd, fn, fm) */
  3243. case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
  3244. /* These are fused multiply-add, and must be done as one
  3245. * floating point operation with no rounding between the
  3246. * multiplication and addition steps.
  3247. * NB that doing the negations here as separate steps is
  3248. * correct : an input NaN should come out with its sign bit
  3249. * flipped if it is a negated-input.
  3250. */
  3251. if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
  3252. return 1;
  3253. }
  3254. if (dp) {
  3255. TCGv_ptr fpst;
  3256. TCGv_i64 frd;
  3257. if (op & 1) {
  3258. /* VFNMS, VFMS */
  3259. gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
  3260. }
  3261. frd = tcg_temp_new_i64();
  3262. tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
  3263. if (op & 2) {
  3264. /* VFNMA, VFNMS */
  3265. gen_helper_vfp_negd(frd, frd);
  3266. }
  3267. fpst = get_fpstatus_ptr(0);
  3268. gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
  3269. cpu_F1d, frd, fpst);
  3270. tcg_temp_free_ptr(fpst);
  3271. tcg_temp_free_i64(frd);
  3272. } else {
  3273. TCGv_ptr fpst;
  3274. TCGv_i32 frd;
  3275. if (op & 1) {
  3276. /* VFNMS, VFMS */
  3277. gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
  3278. }
  3279. frd = tcg_temp_new_i32();
  3280. tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
  3281. if (op & 2) {
  3282. gen_helper_vfp_negs(frd, frd);
  3283. }
  3284. fpst = get_fpstatus_ptr(0);
  3285. gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
  3286. cpu_F1s, frd, fpst);
  3287. tcg_temp_free_ptr(fpst);
  3288. tcg_temp_free_i32(frd);
  3289. }
  3290. break;
  3291. case 14: /* fconst */
  3292. if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
  3293. return 1;
  3294. }
  3295. n = (insn << 12) & 0x80000000;
  3296. i = ((insn >> 12) & 0x70) | (insn & 0xf);
  3297. if (dp) {
  3298. if (i & 0x40)
  3299. i |= 0x3f80;
  3300. else
  3301. i |= 0x4000;
  3302. n |= i << 16;
  3303. tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
  3304. } else {
  3305. if (i & 0x40)
  3306. i |= 0x780;
  3307. else
  3308. i |= 0x800;
  3309. n |= i << 19;
  3310. tcg_gen_movi_i32(cpu_F0s, n);
  3311. }
  3312. break;
  3313. case 15: /* extension space */
  3314. switch (rn) {
  3315. case 0: /* cpy */
  3316. /* no-op */
  3317. break;
  3318. case 1: /* abs */
  3319. gen_vfp_abs(dp);
  3320. break;
  3321. case 2: /* neg */
  3322. gen_vfp_neg(dp);
  3323. break;
  3324. case 3: /* sqrt */
  3325. gen_vfp_sqrt(dp);
  3326. break;
  3327. case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
  3328. tmp = gen_vfp_mrs();
  3329. tcg_gen_ext16u_i32(tmp, tmp);
  3330. if (dp) {
  3331. gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
  3332. cpu_env);
  3333. } else {
  3334. gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
  3335. cpu_env);
  3336. }
  3337. tcg_temp_free_i32(tmp);
  3338. break;
  3339. case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
  3340. tmp = gen_vfp_mrs();
  3341. tcg_gen_shri_i32(tmp, tmp, 16);
  3342. if (dp) {
  3343. gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
  3344. cpu_env);
  3345. } else {
  3346. gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
  3347. cpu_env);
  3348. }
  3349. tcg_temp_free_i32(tmp);
  3350. break;
  3351. case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
  3352. tmp = tcg_temp_new_i32();
  3353. if (dp) {
  3354. gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
  3355. cpu_env);
  3356. } else {
  3357. gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
  3358. cpu_env);
  3359. }
  3360. gen_mov_F0_vreg(0, rd);
  3361. tmp2 = gen_vfp_mrs();
  3362. tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
  3363. tcg_gen_or_i32(tmp, tmp, tmp2);
  3364. tcg_temp_free_i32(tmp2);
  3365. gen_vfp_msr(tmp);
  3366. break;
  3367. case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
  3368. tmp = tcg_temp_new_i32();
  3369. if (dp) {
  3370. gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
  3371. cpu_env);
  3372. } else {
  3373. gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
  3374. cpu_env);
  3375. }
  3376. tcg_gen_shli_i32(tmp, tmp, 16);
  3377. gen_mov_F0_vreg(0, rd);
  3378. tmp2 = gen_vfp_mrs();
  3379. tcg_gen_ext16u_i32(tmp2, tmp2);
  3380. tcg_gen_or_i32(tmp, tmp, tmp2);
  3381. tcg_temp_free_i32(tmp2);
  3382. gen_vfp_msr(tmp);
  3383. break;
  3384. case 8: /* cmp */
  3385. gen_vfp_cmp(dp);
  3386. break;
  3387. case 9: /* cmpe */
  3388. gen_vfp_cmpe(dp);
  3389. break;
  3390. case 10: /* cmpz */
  3391. gen_vfp_cmp(dp);
  3392. break;
  3393. case 11: /* cmpez */
  3394. gen_vfp_F1_ld0(dp);
  3395. gen_vfp_cmpe(dp);
  3396. break;
  3397. case 12: /* vrintr */
  3398. {
  3399. TCGv_ptr fpst = get_fpstatus_ptr(0);
  3400. if (dp) {
  3401. gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
  3402. } else {
  3403. gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
  3404. }
  3405. tcg_temp_free_ptr(fpst);
  3406. break;
  3407. }
  3408. case 13: /* vrintz */
  3409. {
  3410. TCGv_ptr fpst = get_fpstatus_ptr(0);
  3411. TCGv_i32 tcg_rmode;
  3412. tcg_rmode = tcg_const_i32(float_round_to_zero);
  3413. gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
  3414. if (dp) {
  3415. gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
  3416. } else {
  3417. gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
  3418. }
  3419. gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
  3420. tcg_temp_free_i32(tcg_rmode);
  3421. tcg_temp_free_ptr(fpst);
  3422. break;
  3423. }
  3424. case 14: /* vrintx */
  3425. {
  3426. TCGv_ptr fpst = get_fpstatus_ptr(0);
  3427. if (dp) {
  3428. gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
  3429. } else {
  3430. gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
  3431. }
  3432. tcg_temp_free_ptr(fpst);
  3433. break;
  3434. }
  3435. case 15: /* single<->double conversion */
  3436. if (dp)
  3437. gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
  3438. else
  3439. gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
  3440. break;
  3441. case 16: /* fuito */
  3442. gen_vfp_uito(dp, 0);
  3443. break;
  3444. case 17: /* fsito */
  3445. gen_vfp_sito(dp, 0);
  3446. break;
  3447. case 20: /* fshto */
  3448. if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
  3449. return 1;
  3450. }
  3451. gen_vfp_shto(dp, 16 - rm, 0);
  3452. break;
  3453. case 21: /* fslto */
  3454. if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
  3455. return 1;
  3456. }
  3457. gen_vfp_slto(dp, 32 - rm, 0);
  3458. break;
  3459. case 22: /* fuhto */
  3460. if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
  3461. return 1;
  3462. }
  3463. gen_vfp_uhto(dp, 16 - rm, 0);
  3464. break;
  3465. case 23: /* fulto */
  3466. if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
  3467. return 1;
  3468. }
  3469. gen_vfp_ulto(dp, 32 - rm, 0);
  3470. break;
  3471. case 24: /* ftoui */
  3472. gen_vfp_toui(dp, 0);
  3473. break;
  3474. case 25: /* ftouiz */
  3475. gen_vfp_touiz(dp, 0);
  3476. break;
  3477. case 26: /* ftosi */
  3478. gen_vfp_tosi(dp, 0);
  3479. break;
  3480. case 27: /* ftosiz */
  3481. gen_vfp_tosiz(dp, 0);
  3482. break;
  3483. case 28: /* ftosh */
  3484. if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
  3485. return 1;
  3486. }
  3487. gen_vfp_tosh(dp, 16 - rm, 0);
  3488. break;
  3489. case 29: /* ftosl */
  3490. if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
  3491. return 1;
  3492. }
  3493. gen_vfp_tosl(dp, 32 - rm, 0);
  3494. break;
  3495. case 30: /* ftouh */
  3496. if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
  3497. return 1;
  3498. }
  3499. gen_vfp_touh(dp, 16 - rm, 0);
  3500. break;
  3501. case 31: /* ftoul */
  3502. if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
  3503. return 1;
  3504. }
  3505. gen_vfp_toul(dp, 32 - rm, 0);
  3506. break;
  3507. default: /* undefined */
  3508. return 1;
  3509. }
  3510. break;
  3511. default: /* undefined */
  3512. return 1;
  3513. }
  3514. /* Write back the result. */
  3515. if (op == 15 && (rn >= 8 && rn <= 11)) {
  3516. /* Comparison, do nothing. */
  3517. } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
  3518. (rn & 0x1e) == 0x6)) {
  3519. /* VCVT double to int: always integer result.
  3520. * VCVT double to half precision is always a single
  3521. * precision result.
  3522. */
  3523. gen_mov_vreg_F0(0, rd);
  3524. } else if (op == 15 && rn == 15) {
  3525. /* conversion */
  3526. gen_mov_vreg_F0(!dp, rd);
  3527. } else {
  3528. gen_mov_vreg_F0(dp, rd);
  3529. }
  3530. /* break out of the loop if we have finished */
  3531. if (veclen == 0)
  3532. break;
  3533. if (op == 15 && delta_m == 0) {
  3534. /* single source one-many */
  3535. while (veclen--) {
  3536. rd = ((rd + delta_d) & (bank_mask - 1))
  3537. | (rd & bank_mask);
  3538. gen_mov_vreg_F0(dp, rd);
  3539. }
  3540. break;
  3541. }
  3542. /* Setup the next operands. */
  3543. veclen--;
  3544. rd = ((rd + delta_d) & (bank_mask - 1))
  3545. | (rd & bank_mask);
  3546. if (op == 15) {
  3547. /* One source operand. */
  3548. rm = ((rm + delta_m) & (bank_mask - 1))
  3549. | (rm & bank_mask);
  3550. gen_mov_F0_vreg(dp, rm);
  3551. } else {
  3552. /* Two source operands. */
  3553. rn = ((rn + delta_d) & (bank_mask - 1))
  3554. | (rn & bank_mask);
  3555. gen_mov_F0_vreg(dp, rn);
  3556. if (delta_m) {
  3557. rm = ((rm + delta_m) & (bank_mask - 1))
  3558. | (rm & bank_mask);
  3559. gen_mov_F1_vreg(dp, rm);
  3560. }
  3561. }
  3562. }
  3563. }
  3564. break;
  3565. case 0xc:
  3566. case 0xd:
  3567. if ((insn & 0x03e00000) == 0x00400000) {
  3568. /* two-register transfer */
  3569. rn = (insn >> 16) & 0xf;
  3570. rd = (insn >> 12) & 0xf;
  3571. if (dp) {
  3572. VFP_DREG_M(rm, insn);
  3573. } else {
  3574. rm = VFP_SREG_M(insn);
  3575. }
  3576. if (insn & ARM_CP_RW_BIT) {
  3577. /* vfp->arm */
  3578. if (dp) {
  3579. gen_mov_F0_vreg(0, rm * 2);
  3580. tmp = gen_vfp_mrs();
  3581. store_reg(s, rd, tmp);
  3582. gen_mov_F0_vreg(0, rm * 2 + 1);
  3583. tmp = gen_vfp_mrs();
  3584. store_reg(s, rn, tmp);
  3585. } else {
  3586. gen_mov_F0_vreg(0, rm);
  3587. tmp = gen_vfp_mrs();
  3588. store_reg(s, rd, tmp);
  3589. gen_mov_F0_vreg(0, rm + 1);
  3590. tmp = gen_vfp_mrs();
  3591. store_reg(s, rn, tmp);
  3592. }
  3593. } else {
  3594. /* arm->vfp */
  3595. if (dp) {
  3596. tmp = load_reg(s, rd);
  3597. gen_vfp_msr(tmp);
  3598. gen_mov_vreg_F0(0, rm * 2);
  3599. tmp = load_reg(s, rn);
  3600. gen_vfp_msr(tmp);
  3601. gen_mov_vreg_F0(0, rm * 2 + 1);
  3602. } else {
  3603. tmp = load_reg(s, rd);
  3604. gen_vfp_msr(tmp);
  3605. gen_mov_vreg_F0(0, rm);
  3606. tmp = load_reg(s, rn);
  3607. gen_vfp_msr(tmp);
  3608. gen_mov_vreg_F0(0, rm + 1);
  3609. }
  3610. }
  3611. } else {
  3612. /* Load/store */
  3613. rn = (insn >> 16) & 0xf;
  3614. if (dp)
  3615. VFP_DREG_D(rd, insn);
  3616. else
  3617. rd = VFP_SREG_D(insn);
  3618. if ((insn & 0x01200000) == 0x01000000) {
  3619. /* Single load/store */
  3620. offset = (insn & 0xff) << 2;
  3621. if ((insn & (1 << 23)) == 0)
  3622. offset = -offset;
  3623. if (s->thumb && rn == 15) {
  3624. /* This is actually UNPREDICTABLE */
  3625. addr = tcg_temp_new_i32();
  3626. tcg_gen_movi_i32(addr, s->pc & ~2);
  3627. } else {
  3628. addr = load_reg(s, rn);
  3629. }
  3630. tcg_gen_addi_i32(addr, addr, offset);
  3631. if (insn & (1 << 20)) {
  3632. gen_vfp_ld(s, dp, addr);
  3633. gen_mov_vreg_F0(dp, rd);
  3634. } else {
  3635. gen_mov_F0_vreg(dp, rd);
  3636. gen_vfp_st(s, dp, addr);
  3637. }
  3638. tcg_temp_free_i32(addr);
  3639. } else {
  3640. /* load/store multiple */
  3641. int w = insn & (1 << 21);
  3642. if (dp)
  3643. n = (insn >> 1) & 0x7f;
  3644. else
  3645. n = insn & 0xff;
  3646. if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
  3647. /* P == U , W == 1 => UNDEF */
  3648. return 1;
  3649. }
  3650. if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
  3651. /* UNPREDICTABLE cases for bad immediates: we choose to
  3652. * UNDEF to avoid generating huge numbers of TCG ops
  3653. */
  3654. return 1;
  3655. }
  3656. if (rn == 15 && w) {
  3657. /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
  3658. return 1;
  3659. }
  3660. if (s->thumb && rn == 15) {
  3661. /* This is actually UNPREDICTABLE */
  3662. addr = tcg_temp_new_i32();
  3663. tcg_gen_movi_i32(addr, s->pc & ~2);
  3664. } else {
  3665. addr = load_reg(s, rn);
  3666. }
  3667. if (insn & (1 << 24)) /* pre-decrement */
  3668. tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
  3669. if (dp)
  3670. offset = 8;
  3671. else
  3672. offset = 4;
  3673. for (i = 0; i < n; i++) {
  3674. if (insn & ARM_CP_RW_BIT) {
  3675. /* load */
  3676. gen_vfp_ld(s, dp, addr);
  3677. gen_mov_vreg_F0(dp, rd + i);
  3678. } else {
  3679. /* store */
  3680. gen_mov_F0_vreg(dp, rd + i);
  3681. gen_vfp_st(s, dp, addr);
  3682. }
  3683. tcg_gen_addi_i32(addr, addr, offset);
  3684. }
  3685. if (w) {
  3686. /* writeback */
  3687. if (insn & (1 << 24))
  3688. offset = -offset * n;
  3689. else if (dp && (insn & 1))
  3690. offset = 4;
  3691. else
  3692. offset = 0;
  3693. if (offset != 0)
  3694. tcg_gen_addi_i32(addr, addr, offset);
  3695. store_reg(s, rn, addr);
  3696. } else {
  3697. tcg_temp_free_i32(addr);
  3698. }
  3699. }
  3700. }
  3701. break;
  3702. default:
  3703. /* Should never happen. */
  3704. return 1;
  3705. }
  3706. return 0;
  3707. }
  3708. static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
  3709. {
  3710. TranslationBlock *tb;
  3711. tb = s->tb;
  3712. if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
  3713. tcg_gen_goto_tb(n);
  3714. gen_set_pc_im(s, dest);
  3715. tcg_gen_exit_tb((uintptr_t)tb + n);
  3716. } else {
  3717. gen_set_pc_im(s, dest);
  3718. tcg_gen_exit_tb(0);
  3719. }
  3720. }
  3721. static inline void gen_jmp (DisasContext *s, uint32_t dest)
  3722. {
  3723. if (unlikely(s->singlestep_enabled || s->ss_active)) {
  3724. /* An indirect jump so that we still trigger the debug exception. */
  3725. if (s->thumb)
  3726. dest |= 1;
  3727. gen_bx_im(s, dest);
  3728. } else {
  3729. gen_goto_tb(s, 0, dest);
  3730. s->is_jmp = DISAS_TB_JUMP;
  3731. }
  3732. }
  3733. static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
  3734. {
  3735. if (x)
  3736. tcg_gen_sari_i32(t0, t0, 16);
  3737. else
  3738. gen_sxth(t0);
  3739. if (y)
  3740. tcg_gen_sari_i32(t1, t1, 16);
  3741. else
  3742. gen_sxth(t1);
  3743. tcg_gen_mul_i32(t0, t0, t1);
  3744. }
  3745. /* Return the mask of PSR bits set by a MSR instruction. */
  3746. static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
  3747. {
  3748. uint32_t mask;
  3749. mask = 0;
  3750. if (flags & (1 << 0))
  3751. mask |= 0xff;
  3752. if (flags & (1 << 1))
  3753. mask |= 0xff00;
  3754. if (flags & (1 << 2))
  3755. mask |= 0xff0000;
  3756. if (flags & (1 << 3))
  3757. mask |= 0xff000000;
  3758. /* Mask out undefined bits. */
  3759. mask &= ~CPSR_RESERVED;
  3760. if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
  3761. mask &= ~CPSR_T;
  3762. }
  3763. if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
  3764. mask &= ~CPSR_Q; /* V5TE in reality*/
  3765. }
  3766. if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
  3767. mask &= ~(CPSR_E | CPSR_GE);
  3768. }
  3769. if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
  3770. mask &= ~CPSR_IT;
  3771. }
  3772. /* Mask out execution state and reserved bits. */
  3773. if (!spsr) {
  3774. mask &= ~(CPSR_EXEC | CPSR_RESERVED);
  3775. }
  3776. /* Mask out privileged bits. */
  3777. if (IS_USER(s))
  3778. mask &= CPSR_USER;
  3779. return mask;
  3780. }
  3781. /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
  3782. static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
  3783. {
  3784. TCGv_i32 tmp;
  3785. if (spsr) {
  3786. /* ??? This is also undefined in system mode. */
  3787. if (IS_USER(s))
  3788. return 1;
  3789. tmp = load_cpu_field(spsr);
  3790. tcg_gen_andi_i32(tmp, tmp, ~mask);
  3791. tcg_gen_andi_i32(t0, t0, mask);
  3792. tcg_gen_or_i32(tmp, tmp, t0);
  3793. store_cpu_field(tmp, spsr);
  3794. } else {
  3795. gen_set_cpsr(t0, mask);
  3796. }
  3797. tcg_temp_free_i32(t0);
  3798. gen_lookup_tb(s);
  3799. return 0;
  3800. }
  3801. /* Returns nonzero if access to the PSR is not permitted. */
  3802. static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
  3803. {
  3804. TCGv_i32 tmp;
  3805. tmp = tcg_temp_new_i32();
  3806. tcg_gen_movi_i32(tmp, val);
  3807. return gen_set_psr(s, mask, spsr, tmp);
  3808. }
  3809. /* Generate an old-style exception return. Marks pc as dead. */
  3810. static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
  3811. {
  3812. TCGv_i32 tmp;
  3813. store_reg(s, 15, pc);
  3814. tmp = load_cpu_field(spsr);
  3815. gen_set_cpsr(tmp, CPSR_ERET_MASK);
  3816. tcg_temp_free_i32(tmp);
  3817. s->is_jmp = DISAS_UPDATE;
  3818. }
  3819. /* Generate a v6 exception return. Marks both values as dead. */
  3820. static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
  3821. {
  3822. gen_set_cpsr(cpsr, CPSR_ERET_MASK);
  3823. tcg_temp_free_i32(cpsr);
  3824. store_reg(s, 15, pc);
  3825. s->is_jmp = DISAS_UPDATE;
  3826. }
  3827. static void gen_nop_hint(DisasContext *s, int val)
  3828. {
  3829. switch (val) {
  3830. case 1: /* yield */
  3831. gen_set_pc_im(s, s->pc);
  3832. s->is_jmp = DISAS_YIELD;
  3833. break;
  3834. case 3: /* wfi */
  3835. gen_set_pc_im(s, s->pc);
  3836. s->is_jmp = DISAS_WFI;
  3837. break;
  3838. case 2: /* wfe */
  3839. gen_set_pc_im(s, s->pc);
  3840. s->is_jmp = DISAS_WFE;
  3841. break;
  3842. case 4: /* sev */
  3843. case 5: /* sevl */
  3844. /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
  3845. default: /* nop */
  3846. break;
  3847. }
  3848. }
  3849. #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
  3850. static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
  3851. {
  3852. switch (size) {
  3853. case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
  3854. case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
  3855. case 2: tcg_gen_add_i32(t0, t0, t1); break;
  3856. default: abort();
  3857. }
  3858. }
  3859. static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
  3860. {
  3861. switch (size) {
  3862. case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
  3863. case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
  3864. case 2: tcg_gen_sub_i32(t0, t1, t0); break;
  3865. default: return;
  3866. }
  3867. }
  3868. /* 32-bit pairwise ops end up the same as the elementwise versions. */
  3869. #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
  3870. #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
  3871. #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
  3872. #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
  3873. #define GEN_NEON_INTEGER_OP_ENV(name) do { \
  3874. switch ((size << 1) | u) { \
  3875. case 0: \
  3876. gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
  3877. break; \
  3878. case 1: \
  3879. gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
  3880. break; \
  3881. case 2: \
  3882. gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
  3883. break; \
  3884. case 3: \
  3885. gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
  3886. break; \
  3887. case 4: \
  3888. gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
  3889. break; \
  3890. case 5: \
  3891. gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
  3892. break; \
  3893. default: return 1; \
  3894. }} while (0)
  3895. #define GEN_NEON_INTEGER_OP(name) do { \
  3896. switch ((size << 1) | u) { \
  3897. case 0: \
  3898. gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
  3899. break; \
  3900. case 1: \
  3901. gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
  3902. break; \
  3903. case 2: \
  3904. gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
  3905. break; \
  3906. case 3: \
  3907. gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
  3908. break; \
  3909. case 4: \
  3910. gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
  3911. break; \
  3912. case 5: \
  3913. gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
  3914. break; \
  3915. default: return 1; \
  3916. }} while (0)
  3917. static TCGv_i32 neon_load_scratch(int scratch)
  3918. {
  3919. TCGv_i32 tmp = tcg_temp_new_i32();
  3920. tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
  3921. return tmp;
  3922. }
  3923. static void neon_store_scratch(int scratch, TCGv_i32 var)
  3924. {
  3925. tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
  3926. tcg_temp_free_i32(var);
  3927. }
  3928. static inline TCGv_i32 neon_get_scalar(int size, int reg)
  3929. {
  3930. TCGv_i32 tmp;
  3931. if (size == 1) {
  3932. tmp = neon_load_reg(reg & 7, reg >> 4);
  3933. if (reg & 8) {
  3934. gen_neon_dup_high16(tmp);
  3935. } else {
  3936. gen_neon_dup_low16(tmp);
  3937. }
  3938. } else {
  3939. tmp = neon_load_reg(reg & 15, reg >> 4);
  3940. }
  3941. return tmp;
  3942. }
  3943. static int gen_neon_unzip(int rd, int rm, int size, int q)
  3944. {
  3945. TCGv_i32 tmp, tmp2;
  3946. if (!q && size == 2) {
  3947. return 1;
  3948. }
  3949. tmp = tcg_const_i32(rd);
  3950. tmp2 = tcg_const_i32(rm);
  3951. if (q) {
  3952. switch (size) {
  3953. case 0:
  3954. gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
  3955. break;
  3956. case 1:
  3957. gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
  3958. break;
  3959. case 2:
  3960. gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
  3961. break;
  3962. default:
  3963. abort();
  3964. }
  3965. } else {
  3966. switch (size) {
  3967. case 0:
  3968. gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
  3969. break;
  3970. case 1:
  3971. gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
  3972. break;
  3973. default:
  3974. abort();
  3975. }
  3976. }
  3977. tcg_temp_free_i32(tmp);
  3978. tcg_temp_free_i32(tmp2);
  3979. return 0;
  3980. }
  3981. static int gen_neon_zip(int rd, int rm, int size, int q)
  3982. {
  3983. TCGv_i32 tmp, tmp2;
  3984. if (!q && size == 2) {
  3985. return 1;
  3986. }
  3987. tmp = tcg_const_i32(rd);
  3988. tmp2 = tcg_const_i32(rm);
  3989. if (q) {
  3990. switch (size) {
  3991. case 0:
  3992. gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
  3993. break;
  3994. case 1:
  3995. gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
  3996. break;
  3997. case 2:
  3998. gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
  3999. break;
  4000. default:
  4001. abort();
  4002. }
  4003. } else {
  4004. switch (size) {
  4005. case 0:
  4006. gen_helper_neon_zip8(cpu_env, tmp, tmp2);
  4007. break;
  4008. case 1:
  4009. gen_helper_neon_zip16(cpu_env, tmp, tmp2);
  4010. break;
  4011. default:
  4012. abort();
  4013. }
  4014. }
  4015. tcg_temp_free_i32(tmp);
  4016. tcg_temp_free_i32(tmp2);
  4017. return 0;
  4018. }
  4019. static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
  4020. {
  4021. TCGv_i32 rd, tmp;
  4022. rd = tcg_temp_new_i32();
  4023. tmp = tcg_temp_new_i32();
  4024. tcg_gen_shli_i32(rd, t0, 8);
  4025. tcg_gen_andi_i32(rd, rd, 0xff00ff00);
  4026. tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
  4027. tcg_gen_or_i32(rd, rd, tmp);
  4028. tcg_gen_shri_i32(t1, t1, 8);
  4029. tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
  4030. tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
  4031. tcg_gen_or_i32(t1, t1, tmp);
  4032. tcg_gen_mov_i32(t0, rd);
  4033. tcg_temp_free_i32(tmp);
  4034. tcg_temp_free_i32(rd);
  4035. }
  4036. static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
  4037. {
  4038. TCGv_i32 rd, tmp;
  4039. rd = tcg_temp_new_i32();
  4040. tmp = tcg_temp_new_i32();
  4041. tcg_gen_shli_i32(rd, t0, 16);
  4042. tcg_gen_andi_i32(tmp, t1, 0xffff);
  4043. tcg_gen_or_i32(rd, rd, tmp);
  4044. tcg_gen_shri_i32(t1, t1, 16);
  4045. tcg_gen_andi_i32(tmp, t0, 0xffff0000);
  4046. tcg_gen_or_i32(t1, t1, tmp);
  4047. tcg_gen_mov_i32(t0, rd);
  4048. tcg_temp_free_i32(tmp);
  4049. tcg_temp_free_i32(rd);
  4050. }
  4051. static struct {
  4052. int nregs;
  4053. int interleave;
  4054. int spacing;
  4055. } neon_ls_element_type[11] = {
  4056. {4, 4, 1},
  4057. {4, 4, 2},
  4058. {4, 1, 1},
  4059. {4, 2, 1},
  4060. {3, 3, 1},
  4061. {3, 3, 2},
  4062. {3, 1, 1},
  4063. {1, 1, 1},
  4064. {2, 2, 1},
  4065. {2, 2, 2},
  4066. {2, 1, 1}
  4067. };
  4068. /* Translate a NEON load/store element instruction. Return nonzero if the
  4069. instruction is invalid. */
  4070. static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
  4071. {
  4072. int rd, rn, rm;
  4073. int op;
  4074. int nregs;
  4075. int interleave;
  4076. int spacing;
  4077. int stride;
  4078. int size;
  4079. int reg;
  4080. int pass;
  4081. int load;
  4082. int shift;
  4083. int n;
  4084. TCGv_i32 addr;
  4085. TCGv_i32 tmp;
  4086. TCGv_i32 tmp2;
  4087. TCGv_i64 tmp64;
  4088. /* FIXME: this access check should not take precedence over UNDEF
  4089. * for invalid encodings; we will generate incorrect syndrome information
  4090. * for attempts to execute invalid vfp/neon encodings with FP disabled.
  4091. */
  4092. if (s->fp_excp_el) {
  4093. gen_exception_insn(s, 4, EXCP_UDEF,
  4094. syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
  4095. return 0;
  4096. }
  4097. if (!s->vfp_enabled)
  4098. return 1;
  4099. VFP_DREG_D(rd, insn);
  4100. rn = (insn >> 16) & 0xf;
  4101. rm = insn & 0xf;
  4102. load = (insn & (1 << 21)) != 0;
  4103. if ((insn & (1 << 23)) == 0) {
  4104. /* Load store all elements. */
  4105. op = (insn >> 8) & 0xf;
  4106. size = (insn >> 6) & 3;
  4107. if (op > 10)
  4108. return 1;
  4109. /* Catch UNDEF cases for bad values of align field */
  4110. switch (op & 0xc) {
  4111. case 4:
  4112. if (((insn >> 5) & 1) == 1) {
  4113. return 1;
  4114. }
  4115. break;
  4116. case 8:
  4117. if (((insn >> 4) & 3) == 3) {
  4118. return 1;
  4119. }
  4120. break;
  4121. default:
  4122. break;
  4123. }
  4124. nregs = neon_ls_element_type[op].nregs;
  4125. interleave = neon_ls_element_type[op].interleave;
  4126. spacing = neon_ls_element_type[op].spacing;
  4127. if (size == 3 && (interleave | spacing) != 1)
  4128. return 1;
  4129. addr = tcg_temp_new_i32();
  4130. load_reg_var(s, addr, rn);
  4131. stride = (1 << size) * interleave;
  4132. for (reg = 0; reg < nregs; reg++) {
  4133. if (interleave > 2 || (interleave == 2 && nregs == 2)) {
  4134. load_reg_var(s, addr, rn);
  4135. tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
  4136. } else if (interleave == 2 && nregs == 4 && reg == 2) {
  4137. load_reg_var(s, addr, rn);
  4138. tcg_gen_addi_i32(addr, addr, 1 << size);
  4139. }
  4140. if (size == 3) {
  4141. tmp64 = tcg_temp_new_i64();
  4142. if (load) {
  4143. gen_aa32_ld64(tmp64, addr, get_mem_index(s));
  4144. neon_store_reg64(tmp64, rd);
  4145. } else {
  4146. neon_load_reg64(tmp64, rd);
  4147. gen_aa32_st64(tmp64, addr, get_mem_index(s));
  4148. }
  4149. tcg_temp_free_i64(tmp64);
  4150. tcg_gen_addi_i32(addr, addr, stride);
  4151. } else {
  4152. for (pass = 0; pass < 2; pass++) {
  4153. if (size == 2) {
  4154. if (load) {
  4155. tmp = tcg_temp_new_i32();
  4156. gen_aa32_ld32u(tmp, addr, get_mem_index(s));
  4157. neon_store_reg(rd, pass, tmp);
  4158. } else {
  4159. tmp = neon_load_reg(rd, pass);
  4160. gen_aa32_st32(tmp, addr, get_mem_index(s));
  4161. tcg_temp_free_i32(tmp);
  4162. }
  4163. tcg_gen_addi_i32(addr, addr, stride);
  4164. } else if (size == 1) {
  4165. if (load) {
  4166. tmp = tcg_temp_new_i32();
  4167. gen_aa32_ld16u(tmp, addr, get_mem_index(s));
  4168. tcg_gen_addi_i32(addr, addr, stride);
  4169. tmp2 = tcg_temp_new_i32();
  4170. gen_aa32_ld16u(tmp2, addr, get_mem_index(s));
  4171. tcg_gen_addi_i32(addr, addr, stride);
  4172. tcg_gen_shli_i32(tmp2, tmp2, 16);
  4173. tcg_gen_or_i32(tmp, tmp, tmp2);
  4174. tcg_temp_free_i32(tmp2);
  4175. neon_store_reg(rd, pass, tmp);
  4176. } else {
  4177. tmp = neon_load_reg(rd, pass);
  4178. tmp2 = tcg_temp_new_i32();
  4179. tcg_gen_shri_i32(tmp2, tmp, 16);
  4180. gen_aa32_st16(tmp, addr, get_mem_index(s));
  4181. tcg_temp_free_i32(tmp);
  4182. tcg_gen_addi_i32(addr, addr, stride);
  4183. gen_aa32_st16(tmp2, addr, get_mem_index(s));
  4184. tcg_temp_free_i32(tmp2);
  4185. tcg_gen_addi_i32(addr, addr, stride);
  4186. }
  4187. } else /* size == 0 */ {
  4188. if (load) {
  4189. TCGV_UNUSED_I32(tmp2);
  4190. for (n = 0; n < 4; n++) {
  4191. tmp = tcg_temp_new_i32();
  4192. gen_aa32_ld8u(tmp, addr, get_mem_index(s));
  4193. tcg_gen_addi_i32(addr, addr, stride);
  4194. if (n == 0) {
  4195. tmp2 = tmp;
  4196. } else {
  4197. tcg_gen_shli_i32(tmp, tmp, n * 8);
  4198. tcg_gen_or_i32(tmp2, tmp2, tmp);
  4199. tcg_temp_free_i32(tmp);
  4200. }
  4201. }
  4202. neon_store_reg(rd, pass, tmp2);
  4203. } else {
  4204. tmp2 = neon_load_reg(rd, pass);
  4205. for (n = 0; n < 4; n++) {
  4206. tmp = tcg_temp_new_i32();
  4207. if (n == 0) {
  4208. tcg_gen_mov_i32(tmp, tmp2);
  4209. } else {
  4210. tcg_gen_shri_i32(tmp, tmp2, n * 8);
  4211. }
  4212. gen_aa32_st8(tmp, addr, get_mem_index(s));
  4213. tcg_temp_free_i32(tmp);
  4214. tcg_gen_addi_i32(addr, addr, stride);
  4215. }
  4216. tcg_temp_free_i32(tmp2);
  4217. }
  4218. }
  4219. }
  4220. }
  4221. rd += spacing;
  4222. }
  4223. tcg_temp_free_i32(addr);
  4224. stride = nregs * 8;
  4225. } else {
  4226. size = (insn >> 10) & 3;
  4227. if (size == 3) {
  4228. /* Load single element to all lanes. */
  4229. int a = (insn >> 4) & 1;
  4230. if (!load) {
  4231. return 1;
  4232. }
  4233. size = (insn >> 6) & 3;
  4234. nregs = ((insn >> 8) & 3) + 1;
  4235. if (size == 3) {
  4236. if (nregs != 4 || a == 0) {
  4237. return 1;
  4238. }
  4239. /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
  4240. size = 2;
  4241. }
  4242. if (nregs == 1 && a == 1 && size == 0) {
  4243. return 1;
  4244. }
  4245. if (nregs == 3 && a == 1) {
  4246. return 1;
  4247. }
  4248. addr = tcg_temp_new_i32();
  4249. load_reg_var(s, addr, rn);
  4250. if (nregs == 1) {
  4251. /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
  4252. tmp = gen_load_and_replicate(s, addr, size);
  4253. tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
  4254. tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
  4255. if (insn & (1 << 5)) {
  4256. tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
  4257. tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
  4258. }
  4259. tcg_temp_free_i32(tmp);
  4260. } else {
  4261. /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
  4262. stride = (insn & (1 << 5)) ? 2 : 1;
  4263. for (reg = 0; reg < nregs; reg++) {
  4264. tmp = gen_load_and_replicate(s, addr, size);
  4265. tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
  4266. tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
  4267. tcg_temp_free_i32(tmp);
  4268. tcg_gen_addi_i32(addr, addr, 1 << size);
  4269. rd += stride;
  4270. }
  4271. }
  4272. tcg_temp_free_i32(addr);
  4273. stride = (1 << size) * nregs;
  4274. } else {
  4275. /* Single element. */
  4276. int idx = (insn >> 4) & 0xf;
  4277. pass = (insn >> 7) & 1;
  4278. switch (size) {
  4279. case 0:
  4280. shift = ((insn >> 5) & 3) * 8;
  4281. stride = 1;
  4282. break;
  4283. case 1:
  4284. shift = ((insn >> 6) & 1) * 16;
  4285. stride = (insn & (1 << 5)) ? 2 : 1;
  4286. break;
  4287. case 2:
  4288. shift = 0;
  4289. stride = (insn & (1 << 6)) ? 2 : 1;
  4290. break;
  4291. default:
  4292. abort();
  4293. }
  4294. nregs = ((insn >> 8) & 3) + 1;
  4295. /* Catch the UNDEF cases. This is unavoidably a bit messy. */
  4296. switch (nregs) {
  4297. case 1:
  4298. if (((idx & (1 << size)) != 0) ||
  4299. (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
  4300. return 1;
  4301. }
  4302. break;
  4303. case 3:
  4304. if ((idx & 1) != 0) {
  4305. return 1;
  4306. }
  4307. /* fall through */
  4308. case 2:
  4309. if (size == 2 && (idx & 2) != 0) {
  4310. return 1;
  4311. }
  4312. break;
  4313. case 4:
  4314. if ((size == 2) && ((idx & 3) == 3)) {
  4315. return 1;
  4316. }
  4317. break;
  4318. default:
  4319. abort();
  4320. }
  4321. if ((rd + stride * (nregs - 1)) > 31) {
  4322. /* Attempts to write off the end of the register file
  4323. * are UNPREDICTABLE; we choose to UNDEF because otherwise
  4324. * the neon_load_reg() would write off the end of the array.
  4325. */
  4326. return 1;
  4327. }
  4328. addr = tcg_temp_new_i32();
  4329. load_reg_var(s, addr, rn);
  4330. for (reg = 0; reg < nregs; reg++) {
  4331. if (load) {
  4332. tmp = tcg_temp_new_i32();
  4333. switch (size) {
  4334. case 0:
  4335. gen_aa32_ld8u(tmp, addr, get_mem_index(s));
  4336. break;
  4337. case 1:
  4338. gen_aa32_ld16u(tmp, addr, get_mem_index(s));
  4339. break;
  4340. case 2:
  4341. gen_aa32_ld32u(tmp, addr, get_mem_index(s));
  4342. break;
  4343. default: /* Avoid compiler warnings. */
  4344. abort();
  4345. }
  4346. if (size != 2) {
  4347. tmp2 = neon_load_reg(rd, pass);
  4348. tcg_gen_deposit_i32(tmp, tmp2, tmp,
  4349. shift, size ? 16 : 8);
  4350. tcg_temp_free_i32(tmp2);
  4351. }
  4352. neon_store_reg(rd, pass, tmp);
  4353. } else { /* Store */
  4354. tmp = neon_load_reg(rd, pass);
  4355. if (shift)
  4356. tcg_gen_shri_i32(tmp, tmp, shift);
  4357. switch (size) {
  4358. case 0:
  4359. gen_aa32_st8(tmp, addr, get_mem_index(s));
  4360. break;
  4361. case 1:
  4362. gen_aa32_st16(tmp, addr, get_mem_index(s));
  4363. break;
  4364. case 2:
  4365. gen_aa32_st32(tmp, addr, get_mem_index(s));
  4366. break;
  4367. }
  4368. tcg_temp_free_i32(tmp);
  4369. }
  4370. rd += stride;
  4371. tcg_gen_addi_i32(addr, addr, 1 << size);
  4372. }
  4373. tcg_temp_free_i32(addr);
  4374. stride = nregs * (1 << size);
  4375. }
  4376. }
  4377. if (rm != 15) {
  4378. TCGv_i32 base;
  4379. base = load_reg(s, rn);
  4380. if (rm == 13) {
  4381. tcg_gen_addi_i32(base, base, stride);
  4382. } else {
  4383. TCGv_i32 index;
  4384. index = load_reg(s, rm);
  4385. tcg_gen_add_i32(base, base, index);
  4386. tcg_temp_free_i32(index);
  4387. }
  4388. store_reg(s, rn, base);
  4389. }
  4390. return 0;
  4391. }
  4392. /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
  4393. static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
  4394. {
  4395. tcg_gen_and_i32(t, t, c);
  4396. tcg_gen_andc_i32(f, f, c);
  4397. tcg_gen_or_i32(dest, t, f);
  4398. }
  4399. static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
  4400. {
  4401. switch (size) {
  4402. case 0: gen_helper_neon_narrow_u8(dest, src); break;
  4403. case 1: gen_helper_neon_narrow_u16(dest, src); break;
  4404. case 2: tcg_gen_extrl_i64_i32(dest, src); break;
  4405. default: abort();
  4406. }
  4407. }
  4408. static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
  4409. {
  4410. switch (size) {
  4411. case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
  4412. case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
  4413. case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
  4414. default: abort();
  4415. }
  4416. }
  4417. static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
  4418. {
  4419. switch (size) {
  4420. case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
  4421. case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
  4422. case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
  4423. default: abort();
  4424. }
  4425. }
  4426. static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
  4427. {
  4428. switch (size) {
  4429. case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
  4430. case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
  4431. case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
  4432. default: abort();
  4433. }
  4434. }
  4435. static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
  4436. int q, int u)
  4437. {
  4438. if (q) {
  4439. if (u) {
  4440. switch (size) {
  4441. case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
  4442. case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
  4443. default: abort();
  4444. }
  4445. } else {
  4446. switch (size) {
  4447. case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
  4448. case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
  4449. default: abort();
  4450. }
  4451. }
  4452. } else {
  4453. if (u) {
  4454. switch (size) {
  4455. case 1: gen_helper_neon_shl_u16(var, var, shift); break;
  4456. case 2: gen_helper_neon_shl_u32(var, var, shift); break;
  4457. default: abort();
  4458. }
  4459. } else {
  4460. switch (size) {
  4461. case 1: gen_helper_neon_shl_s16(var, var, shift); break;
  4462. case 2: gen_helper_neon_shl_s32(var, var, shift); break;
  4463. default: abort();
  4464. }
  4465. }
  4466. }
  4467. }
  4468. static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
  4469. {
  4470. if (u) {
  4471. switch (size) {
  4472. case 0: gen_helper_neon_widen_u8(dest, src); break;
  4473. case 1: gen_helper_neon_widen_u16(dest, src); break;
  4474. case 2: tcg_gen_extu_i32_i64(dest, src); break;
  4475. default: abort();
  4476. }
  4477. } else {
  4478. switch (size) {
  4479. case 0: gen_helper_neon_widen_s8(dest, src); break;
  4480. case 1: gen_helper_neon_widen_s16(dest, src); break;
  4481. case 2: tcg_gen_ext_i32_i64(dest, src); break;
  4482. default: abort();
  4483. }
  4484. }
  4485. tcg_temp_free_i32(src);
  4486. }
  4487. static inline void gen_neon_addl(int size)
  4488. {
  4489. switch (size) {
  4490. case 0: gen_helper_neon_addl_u16(CPU_V001); break;
  4491. case 1: gen_helper_neon_addl_u32(CPU_V001); break;
  4492. case 2: tcg_gen_add_i64(CPU_V001); break;
  4493. default: abort();
  4494. }
  4495. }
  4496. static inline void gen_neon_subl(int size)
  4497. {
  4498. switch (size) {
  4499. case 0: gen_helper_neon_subl_u16(CPU_V001); break;
  4500. case 1: gen_helper_neon_subl_u32(CPU_V001); break;
  4501. case 2: tcg_gen_sub_i64(CPU_V001); break;
  4502. default: abort();
  4503. }
  4504. }
  4505. static inline void gen_neon_negl(TCGv_i64 var, int size)
  4506. {
  4507. switch (size) {
  4508. case 0: gen_helper_neon_negl_u16(var, var); break;
  4509. case 1: gen_helper_neon_negl_u32(var, var); break;
  4510. case 2:
  4511. tcg_gen_neg_i64(var, var);
  4512. break;
  4513. default: abort();
  4514. }
  4515. }
  4516. static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
  4517. {
  4518. switch (size) {
  4519. case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
  4520. case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
  4521. default: abort();
  4522. }
  4523. }
  4524. static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
  4525. int size, int u)
  4526. {
  4527. TCGv_i64 tmp;
  4528. switch ((size << 1) | u) {
  4529. case 0: gen_helper_neon_mull_s8(dest, a, b); break;
  4530. case 1: gen_helper_neon_mull_u8(dest, a, b); break;
  4531. case 2: gen_helper_neon_mull_s16(dest, a, b); break;
  4532. case 3: gen_helper_neon_mull_u16(dest, a, b); break;
  4533. case 4:
  4534. tmp = gen_muls_i64_i32(a, b);
  4535. tcg_gen_mov_i64(dest, tmp);
  4536. tcg_temp_free_i64(tmp);
  4537. break;
  4538. case 5:
  4539. tmp = gen_mulu_i64_i32(a, b);
  4540. tcg_gen_mov_i64(dest, tmp);
  4541. tcg_temp_free_i64(tmp);
  4542. break;
  4543. default: abort();
  4544. }
  4545. /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
  4546. Don't forget to clean them now. */
  4547. if (size < 2) {
  4548. tcg_temp_free_i32(a);
  4549. tcg_temp_free_i32(b);
  4550. }
  4551. }
  4552. static void gen_neon_narrow_op(int op, int u, int size,
  4553. TCGv_i32 dest, TCGv_i64 src)
  4554. {
  4555. if (op) {
  4556. if (u) {
  4557. gen_neon_unarrow_sats(size, dest, src);
  4558. } else {
  4559. gen_neon_narrow(size, dest, src);
  4560. }
  4561. } else {
  4562. if (u) {
  4563. gen_neon_narrow_satu(size, dest, src);
  4564. } else {
  4565. gen_neon_narrow_sats(size, dest, src);
  4566. }
  4567. }
  4568. }
  4569. /* Symbolic constants for op fields for Neon 3-register same-length.
  4570. * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
  4571. * table A7-9.
  4572. */
  4573. #define NEON_3R_VHADD 0
  4574. #define NEON_3R_VQADD 1
  4575. #define NEON_3R_VRHADD 2
  4576. #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
  4577. #define NEON_3R_VHSUB 4
  4578. #define NEON_3R_VQSUB 5
  4579. #define NEON_3R_VCGT 6
  4580. #define NEON_3R_VCGE 7
  4581. #define NEON_3R_VSHL 8
  4582. #define NEON_3R_VQSHL 9
  4583. #define NEON_3R_VRSHL 10
  4584. #define NEON_3R_VQRSHL 11
  4585. #define NEON_3R_VMAX 12
  4586. #define NEON_3R_VMIN 13
  4587. #define NEON_3R_VABD 14
  4588. #define NEON_3R_VABA 15
  4589. #define NEON_3R_VADD_VSUB 16
  4590. #define NEON_3R_VTST_VCEQ 17
  4591. #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
  4592. #define NEON_3R_VMUL 19
  4593. #define NEON_3R_VPMAX 20
  4594. #define NEON_3R_VPMIN 21
  4595. #define NEON_3R_VQDMULH_VQRDMULH 22
  4596. #define NEON_3R_VPADD 23
  4597. #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
  4598. #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
  4599. #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
  4600. #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
  4601. #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
  4602. #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
  4603. #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
  4604. #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
  4605. static const uint8_t neon_3r_sizes[] = {
  4606. [NEON_3R_VHADD] = 0x7,
  4607. [NEON_3R_VQADD] = 0xf,
  4608. [NEON_3R_VRHADD] = 0x7,
  4609. [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
  4610. [NEON_3R_VHSUB] = 0x7,
  4611. [NEON_3R_VQSUB] = 0xf,
  4612. [NEON_3R_VCGT] = 0x7,
  4613. [NEON_3R_VCGE] = 0x7,
  4614. [NEON_3R_VSHL] = 0xf,
  4615. [NEON_3R_VQSHL] = 0xf,
  4616. [NEON_3R_VRSHL] = 0xf,
  4617. [NEON_3R_VQRSHL] = 0xf,
  4618. [NEON_3R_VMAX] = 0x7,
  4619. [NEON_3R_VMIN] = 0x7,
  4620. [NEON_3R_VABD] = 0x7,
  4621. [NEON_3R_VABA] = 0x7,
  4622. [NEON_3R_VADD_VSUB] = 0xf,
  4623. [NEON_3R_VTST_VCEQ] = 0x7,
  4624. [NEON_3R_VML] = 0x7,
  4625. [NEON_3R_VMUL] = 0x7,
  4626. [NEON_3R_VPMAX] = 0x7,
  4627. [NEON_3R_VPMIN] = 0x7,
  4628. [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
  4629. [NEON_3R_VPADD] = 0x7,
  4630. [NEON_3R_SHA] = 0xf, /* size field encodes op type */
  4631. [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
  4632. [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
  4633. [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
  4634. [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
  4635. [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
  4636. [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
  4637. [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
  4638. };
  4639. /* Symbolic constants for op fields for Neon 2-register miscellaneous.
  4640. * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
  4641. * table A7-13.
  4642. */
  4643. #define NEON_2RM_VREV64 0
  4644. #define NEON_2RM_VREV32 1
  4645. #define NEON_2RM_VREV16 2
  4646. #define NEON_2RM_VPADDL 4
  4647. #define NEON_2RM_VPADDL_U 5
  4648. #define NEON_2RM_AESE 6 /* Includes AESD */
  4649. #define NEON_2RM_AESMC 7 /* Includes AESIMC */
  4650. #define NEON_2RM_VCLS 8
  4651. #define NEON_2RM_VCLZ 9
  4652. #define NEON_2RM_VCNT 10
  4653. #define NEON_2RM_VMVN 11
  4654. #define NEON_2RM_VPADAL 12
  4655. #define NEON_2RM_VPADAL_U 13
  4656. #define NEON_2RM_VQABS 14
  4657. #define NEON_2RM_VQNEG 15
  4658. #define NEON_2RM_VCGT0 16
  4659. #define NEON_2RM_VCGE0 17
  4660. #define NEON_2RM_VCEQ0 18
  4661. #define NEON_2RM_VCLE0 19
  4662. #define NEON_2RM_VCLT0 20
  4663. #define NEON_2RM_SHA1H 21
  4664. #define NEON_2RM_VABS 22
  4665. #define NEON_2RM_VNEG 23
  4666. #define NEON_2RM_VCGT0_F 24
  4667. #define NEON_2RM_VCGE0_F 25
  4668. #define NEON_2RM_VCEQ0_F 26
  4669. #define NEON_2RM_VCLE0_F 27
  4670. #define NEON_2RM_VCLT0_F 28
  4671. #define NEON_2RM_VABS_F 30
  4672. #define NEON_2RM_VNEG_F 31
  4673. #define NEON_2RM_VSWP 32
  4674. #define NEON_2RM_VTRN 33
  4675. #define NEON_2RM_VUZP 34
  4676. #define NEON_2RM_VZIP 35
  4677. #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
  4678. #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
  4679. #define NEON_2RM_VSHLL 38
  4680. #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
  4681. #define NEON_2RM_VRINTN 40
  4682. #define NEON_2RM_VRINTX 41
  4683. #define NEON_2RM_VRINTA 42
  4684. #define NEON_2RM_VRINTZ 43
  4685. #define NEON_2RM_VCVT_F16_F32 44
  4686. #define NEON_2RM_VRINTM 45
  4687. #define NEON_2RM_VCVT_F32_F16 46
  4688. #define NEON_2RM_VRINTP 47
  4689. #define NEON_2RM_VCVTAU 48
  4690. #define NEON_2RM_VCVTAS 49
  4691. #define NEON_2RM_VCVTNU 50
  4692. #define NEON_2RM_VCVTNS 51
  4693. #define NEON_2RM_VCVTPU 52
  4694. #define NEON_2RM_VCVTPS 53
  4695. #define NEON_2RM_VCVTMU 54
  4696. #define NEON_2RM_VCVTMS 55
  4697. #define NEON_2RM_VRECPE 56
  4698. #define NEON_2RM_VRSQRTE 57
  4699. #define NEON_2RM_VRECPE_F 58
  4700. #define NEON_2RM_VRSQRTE_F 59
  4701. #define NEON_2RM_VCVT_FS 60
  4702. #define NEON_2RM_VCVT_FU 61
  4703. #define NEON_2RM_VCVT_SF 62
  4704. #define NEON_2RM_VCVT_UF 63
  4705. static int neon_2rm_is_float_op(int op)
  4706. {
  4707. /* Return true if this neon 2reg-misc op is float-to-float */
  4708. return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
  4709. (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
  4710. op == NEON_2RM_VRINTM ||
  4711. (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
  4712. op >= NEON_2RM_VRECPE_F);
  4713. }
  4714. /* Each entry in this array has bit n set if the insn allows
  4715. * size value n (otherwise it will UNDEF). Since unallocated
  4716. * op values will have no bits set they always UNDEF.
  4717. */
  4718. static const uint8_t neon_2rm_sizes[] = {
  4719. [NEON_2RM_VREV64] = 0x7,
  4720. [NEON_2RM_VREV32] = 0x3,
  4721. [NEON_2RM_VREV16] = 0x1,
  4722. [NEON_2RM_VPADDL] = 0x7,
  4723. [NEON_2RM_VPADDL_U] = 0x7,
  4724. [NEON_2RM_AESE] = 0x1,
  4725. [NEON_2RM_AESMC] = 0x1,
  4726. [NEON_2RM_VCLS] = 0x7,
  4727. [NEON_2RM_VCLZ] = 0x7,
  4728. [NEON_2RM_VCNT] = 0x1,
  4729. [NEON_2RM_VMVN] = 0x1,
  4730. [NEON_2RM_VPADAL] = 0x7,
  4731. [NEON_2RM_VPADAL_U] = 0x7,
  4732. [NEON_2RM_VQABS] = 0x7,
  4733. [NEON_2RM_VQNEG] = 0x7,
  4734. [NEON_2RM_VCGT0] = 0x7,
  4735. [NEON_2RM_VCGE0] = 0x7,
  4736. [NEON_2RM_VCEQ0] = 0x7,
  4737. [NEON_2RM_VCLE0] = 0x7,
  4738. [NEON_2RM_VCLT0] = 0x7,
  4739. [NEON_2RM_SHA1H] = 0x4,
  4740. [NEON_2RM_VABS] = 0x7,
  4741. [NEON_2RM_VNEG] = 0x7,
  4742. [NEON_2RM_VCGT0_F] = 0x4,
  4743. [NEON_2RM_VCGE0_F] = 0x4,
  4744. [NEON_2RM_VCEQ0_F] = 0x4,
  4745. [NEON_2RM_VCLE0_F] = 0x4,
  4746. [NEON_2RM_VCLT0_F] = 0x4,
  4747. [NEON_2RM_VABS_F] = 0x4,
  4748. [NEON_2RM_VNEG_F] = 0x4,
  4749. [NEON_2RM_VSWP] = 0x1,
  4750. [NEON_2RM_VTRN] = 0x7,
  4751. [NEON_2RM_VUZP] = 0x7,
  4752. [NEON_2RM_VZIP] = 0x7,
  4753. [NEON_2RM_VMOVN] = 0x7,
  4754. [NEON_2RM_VQMOVN] = 0x7,
  4755. [NEON_2RM_VSHLL] = 0x7,
  4756. [NEON_2RM_SHA1SU1] = 0x4,
  4757. [NEON_2RM_VRINTN] = 0x4,
  4758. [NEON_2RM_VRINTX] = 0x4,
  4759. [NEON_2RM_VRINTA] = 0x4,
  4760. [NEON_2RM_VRINTZ] = 0x4,
  4761. [NEON_2RM_VCVT_F16_F32] = 0x2,
  4762. [NEON_2RM_VRINTM] = 0x4,
  4763. [NEON_2RM_VCVT_F32_F16] = 0x2,
  4764. [NEON_2RM_VRINTP] = 0x4,
  4765. [NEON_2RM_VCVTAU] = 0x4,
  4766. [NEON_2RM_VCVTAS] = 0x4,
  4767. [NEON_2RM_VCVTNU] = 0x4,
  4768. [NEON_2RM_VCVTNS] = 0x4,
  4769. [NEON_2RM_VCVTPU] = 0x4,
  4770. [NEON_2RM_VCVTPS] = 0x4,
  4771. [NEON_2RM_VCVTMU] = 0x4,
  4772. [NEON_2RM_VCVTMS] = 0x4,
  4773. [NEON_2RM_VRECPE] = 0x4,
  4774. [NEON_2RM_VRSQRTE] = 0x4,
  4775. [NEON_2RM_VRECPE_F] = 0x4,
  4776. [NEON_2RM_VRSQRTE_F] = 0x4,
  4777. [NEON_2RM_VCVT_FS] = 0x4,
  4778. [NEON_2RM_VCVT_FU] = 0x4,
  4779. [NEON_2RM_VCVT_SF] = 0x4,
  4780. [NEON_2RM_VCVT_UF] = 0x4,
  4781. };
  4782. /* Translate a NEON data processing instruction. Return nonzero if the
  4783. instruction is invalid.
  4784. We process data in a mixture of 32-bit and 64-bit chunks.
  4785. Mostly we use 32-bit chunks so we can use normal scalar instructions. */
  4786. static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
  4787. {
  4788. int op;
  4789. int q;
  4790. int rd, rn, rm;
  4791. int size;
  4792. int shift;
  4793. int pass;
  4794. int count;
  4795. int pairwise;
  4796. int u;
  4797. uint32_t imm, mask;
  4798. TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
  4799. TCGv_i64 tmp64;
  4800. /* FIXME: this access check should not take precedence over UNDEF
  4801. * for invalid encodings; we will generate incorrect syndrome information
  4802. * for attempts to execute invalid vfp/neon encodings with FP disabled.
  4803. */
  4804. if (s->fp_excp_el) {
  4805. gen_exception_insn(s, 4, EXCP_UDEF,
  4806. syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
  4807. return 0;
  4808. }
  4809. if (!s->vfp_enabled)
  4810. return 1;
  4811. q = (insn & (1 << 6)) != 0;
  4812. u = (insn >> 24) & 1;
  4813. VFP_DREG_D(rd, insn);
  4814. VFP_DREG_N(rn, insn);
  4815. VFP_DREG_M(rm, insn);
  4816. size = (insn >> 20) & 3;
  4817. if ((insn & (1 << 23)) == 0) {
  4818. /* Three register same length. */
  4819. op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
  4820. /* Catch invalid op and bad size combinations: UNDEF */
  4821. if ((neon_3r_sizes[op] & (1 << size)) == 0) {
  4822. return 1;
  4823. }
  4824. /* All insns of this form UNDEF for either this condition or the
  4825. * superset of cases "Q==1"; we catch the latter later.
  4826. */
  4827. if (q && ((rd | rn | rm) & 1)) {
  4828. return 1;
  4829. }
  4830. /*
  4831. * The SHA-1/SHA-256 3-register instructions require special treatment
  4832. * here, as their size field is overloaded as an op type selector, and
  4833. * they all consume their input in a single pass.
  4834. */
  4835. if (op == NEON_3R_SHA) {
  4836. if (!q) {
  4837. return 1;
  4838. }
  4839. if (!u) { /* SHA-1 */
  4840. if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
  4841. return 1;
  4842. }
  4843. tmp = tcg_const_i32(rd);
  4844. tmp2 = tcg_const_i32(rn);
  4845. tmp3 = tcg_const_i32(rm);
  4846. tmp4 = tcg_const_i32(size);
  4847. gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
  4848. tcg_temp_free_i32(tmp4);
  4849. } else { /* SHA-256 */
  4850. if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
  4851. return 1;
  4852. }
  4853. tmp = tcg_const_i32(rd);
  4854. tmp2 = tcg_const_i32(rn);
  4855. tmp3 = tcg_const_i32(rm);
  4856. switch (size) {
  4857. case 0:
  4858. gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
  4859. break;
  4860. case 1:
  4861. gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
  4862. break;
  4863. case 2:
  4864. gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
  4865. break;
  4866. }
  4867. }
  4868. tcg_temp_free_i32(tmp);
  4869. tcg_temp_free_i32(tmp2);
  4870. tcg_temp_free_i32(tmp3);
  4871. return 0;
  4872. }
  4873. if (size == 3 && op != NEON_3R_LOGIC) {
  4874. /* 64-bit element instructions. */
  4875. for (pass = 0; pass < (q ? 2 : 1); pass++) {
  4876. neon_load_reg64(cpu_V0, rn + pass);
  4877. neon_load_reg64(cpu_V1, rm + pass);
  4878. switch (op) {
  4879. case NEON_3R_VQADD:
  4880. if (u) {
  4881. gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
  4882. cpu_V0, cpu_V1);
  4883. } else {
  4884. gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
  4885. cpu_V0, cpu_V1);
  4886. }
  4887. break;
  4888. case NEON_3R_VQSUB:
  4889. if (u) {
  4890. gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
  4891. cpu_V0, cpu_V1);
  4892. } else {
  4893. gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
  4894. cpu_V0, cpu_V1);
  4895. }
  4896. break;
  4897. case NEON_3R_VSHL:
  4898. if (u) {
  4899. gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
  4900. } else {
  4901. gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
  4902. }
  4903. break;
  4904. case NEON_3R_VQSHL:
  4905. if (u) {
  4906. gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
  4907. cpu_V1, cpu_V0);
  4908. } else {
  4909. gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
  4910. cpu_V1, cpu_V0);
  4911. }
  4912. break;
  4913. case NEON_3R_VRSHL:
  4914. if (u) {
  4915. gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
  4916. } else {
  4917. gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
  4918. }
  4919. break;
  4920. case NEON_3R_VQRSHL:
  4921. if (u) {
  4922. gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
  4923. cpu_V1, cpu_V0);
  4924. } else {
  4925. gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
  4926. cpu_V1, cpu_V0);
  4927. }
  4928. break;
  4929. case NEON_3R_VADD_VSUB:
  4930. if (u) {
  4931. tcg_gen_sub_i64(CPU_V001);
  4932. } else {
  4933. tcg_gen_add_i64(CPU_V001);
  4934. }
  4935. break;
  4936. default:
  4937. abort();
  4938. }
  4939. neon_store_reg64(cpu_V0, rd + pass);
  4940. }
  4941. return 0;
  4942. }
  4943. pairwise = 0;
  4944. switch (op) {
  4945. case NEON_3R_VSHL:
  4946. case NEON_3R_VQSHL:
  4947. case NEON_3R_VRSHL:
  4948. case NEON_3R_VQRSHL:
  4949. {
  4950. int rtmp;
  4951. /* Shift instruction operands are reversed. */
  4952. rtmp = rn;
  4953. rn = rm;
  4954. rm = rtmp;
  4955. }
  4956. break;
  4957. case NEON_3R_VPADD:
  4958. if (u) {
  4959. return 1;
  4960. }
  4961. /* Fall through */
  4962. case NEON_3R_VPMAX:
  4963. case NEON_3R_VPMIN:
  4964. pairwise = 1;
  4965. break;
  4966. case NEON_3R_FLOAT_ARITH:
  4967. pairwise = (u && size < 2); /* if VPADD (float) */
  4968. break;
  4969. case NEON_3R_FLOAT_MINMAX:
  4970. pairwise = u; /* if VPMIN/VPMAX (float) */
  4971. break;
  4972. case NEON_3R_FLOAT_CMP:
  4973. if (!u && size) {
  4974. /* no encoding for U=0 C=1x */
  4975. return 1;
  4976. }
  4977. break;
  4978. case NEON_3R_FLOAT_ACMP:
  4979. if (!u) {
  4980. return 1;
  4981. }
  4982. break;
  4983. case NEON_3R_FLOAT_MISC:
  4984. /* VMAXNM/VMINNM in ARMv8 */
  4985. if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
  4986. return 1;
  4987. }
  4988. break;
  4989. case NEON_3R_VMUL:
  4990. if (u && (size != 0)) {
  4991. /* UNDEF on invalid size for polynomial subcase */
  4992. return 1;
  4993. }
  4994. break;
  4995. case NEON_3R_VFM:
  4996. if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
  4997. return 1;
  4998. }
  4999. break;
  5000. default:
  5001. break;
  5002. }
  5003. if (pairwise && q) {
  5004. /* All the pairwise insns UNDEF if Q is set */
  5005. return 1;
  5006. }
  5007. for (pass = 0; pass < (q ? 4 : 2); pass++) {
  5008. if (pairwise) {
  5009. /* Pairwise. */
  5010. if (pass < 1) {
  5011. tmp = neon_load_reg(rn, 0);
  5012. tmp2 = neon_load_reg(rn, 1);
  5013. } else {
  5014. tmp = neon_load_reg(rm, 0);
  5015. tmp2 = neon_load_reg(rm, 1);
  5016. }
  5017. } else {
  5018. /* Elementwise. */
  5019. tmp = neon_load_reg(rn, pass);
  5020. tmp2 = neon_load_reg(rm, pass);
  5021. }
  5022. switch (op) {
  5023. case NEON_3R_VHADD:
  5024. GEN_NEON_INTEGER_OP(hadd);
  5025. break;
  5026. case NEON_3R_VQADD:
  5027. GEN_NEON_INTEGER_OP_ENV(qadd);
  5028. break;
  5029. case NEON_3R_VRHADD:
  5030. GEN_NEON_INTEGER_OP(rhadd);
  5031. break;
  5032. case NEON_3R_LOGIC: /* Logic ops. */
  5033. switch ((u << 2) | size) {
  5034. case 0: /* VAND */
  5035. tcg_gen_and_i32(tmp, tmp, tmp2);
  5036. break;
  5037. case 1: /* BIC */
  5038. tcg_gen_andc_i32(tmp, tmp, tmp2);
  5039. break;
  5040. case 2: /* VORR */
  5041. tcg_gen_or_i32(tmp, tmp, tmp2);
  5042. break;
  5043. case 3: /* VORN */
  5044. tcg_gen_orc_i32(tmp, tmp, tmp2);
  5045. break;
  5046. case 4: /* VEOR */
  5047. tcg_gen_xor_i32(tmp, tmp, tmp2);
  5048. break;
  5049. case 5: /* VBSL */
  5050. tmp3 = neon_load_reg(rd, pass);
  5051. gen_neon_bsl(tmp, tmp, tmp2, tmp3);
  5052. tcg_temp_free_i32(tmp3);
  5053. break;
  5054. case 6: /* VBIT */
  5055. tmp3 = neon_load_reg(rd, pass);
  5056. gen_neon_bsl(tmp, tmp, tmp3, tmp2);
  5057. tcg_temp_free_i32(tmp3);
  5058. break;
  5059. case 7: /* VBIF */
  5060. tmp3 = neon_load_reg(rd, pass);
  5061. gen_neon_bsl(tmp, tmp3, tmp, tmp2);
  5062. tcg_temp_free_i32(tmp3);
  5063. break;
  5064. }
  5065. break;
  5066. case NEON_3R_VHSUB:
  5067. GEN_NEON_INTEGER_OP(hsub);
  5068. break;
  5069. case NEON_3R_VQSUB:
  5070. GEN_NEON_INTEGER_OP_ENV(qsub);
  5071. break;
  5072. case NEON_3R_VCGT:
  5073. GEN_NEON_INTEGER_OP(cgt);
  5074. break;
  5075. case NEON_3R_VCGE:
  5076. GEN_NEON_INTEGER_OP(cge);
  5077. break;
  5078. case NEON_3R_VSHL:
  5079. GEN_NEON_INTEGER_OP(shl);
  5080. break;
  5081. case NEON_3R_VQSHL:
  5082. GEN_NEON_INTEGER_OP_ENV(qshl);
  5083. break;
  5084. case NEON_3R_VRSHL:
  5085. GEN_NEON_INTEGER_OP(rshl);
  5086. break;
  5087. case NEON_3R_VQRSHL:
  5088. GEN_NEON_INTEGER_OP_ENV(qrshl);
  5089. break;
  5090. case NEON_3R_VMAX:
  5091. GEN_NEON_INTEGER_OP(max);
  5092. break;
  5093. case NEON_3R_VMIN:
  5094. GEN_NEON_INTEGER_OP(min);
  5095. break;
  5096. case NEON_3R_VABD:
  5097. GEN_NEON_INTEGER_OP(abd);
  5098. break;
  5099. case NEON_3R_VABA:
  5100. GEN_NEON_INTEGER_OP(abd);
  5101. tcg_temp_free_i32(tmp2);
  5102. tmp2 = neon_load_reg(rd, pass);
  5103. gen_neon_add(size, tmp, tmp2);
  5104. break;
  5105. case NEON_3R_VADD_VSUB:
  5106. if (!u) { /* VADD */
  5107. gen_neon_add(size, tmp, tmp2);
  5108. } else { /* VSUB */
  5109. switch (size) {
  5110. case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
  5111. case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
  5112. case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
  5113. default: abort();
  5114. }
  5115. }
  5116. break;
  5117. case NEON_3R_VTST_VCEQ:
  5118. if (!u) { /* VTST */
  5119. switch (size) {
  5120. case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
  5121. case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
  5122. case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
  5123. default: abort();
  5124. }
  5125. } else { /* VCEQ */
  5126. switch (size) {
  5127. case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
  5128. case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
  5129. case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
  5130. default: abort();
  5131. }
  5132. }
  5133. break;
  5134. case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
  5135. switch (size) {
  5136. case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
  5137. case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
  5138. case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
  5139. default: abort();
  5140. }
  5141. tcg_temp_free_i32(tmp2);
  5142. tmp2 = neon_load_reg(rd, pass);
  5143. if (u) { /* VMLS */
  5144. gen_neon_rsb(size, tmp, tmp2);
  5145. } else { /* VMLA */
  5146. gen_neon_add(size, tmp, tmp2);
  5147. }
  5148. break;
  5149. case NEON_3R_VMUL:
  5150. if (u) { /* polynomial */
  5151. gen_helper_neon_mul_p8(tmp, tmp, tmp2);
  5152. } else { /* Integer */
  5153. switch (size) {
  5154. case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
  5155. case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
  5156. case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
  5157. default: abort();
  5158. }
  5159. }
  5160. break;
  5161. case NEON_3R_VPMAX:
  5162. GEN_NEON_INTEGER_OP(pmax);
  5163. break;
  5164. case NEON_3R_VPMIN:
  5165. GEN_NEON_INTEGER_OP(pmin);
  5166. break;
  5167. case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
  5168. if (!u) { /* VQDMULH */
  5169. switch (size) {
  5170. case 1:
  5171. gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
  5172. break;
  5173. case 2:
  5174. gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
  5175. break;
  5176. default: abort();
  5177. }
  5178. } else { /* VQRDMULH */
  5179. switch (size) {
  5180. case 1:
  5181. gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
  5182. break;
  5183. case 2:
  5184. gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
  5185. break;
  5186. default: abort();
  5187. }
  5188. }
  5189. break;
  5190. case NEON_3R_VPADD:
  5191. switch (size) {
  5192. case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
  5193. case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
  5194. case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
  5195. default: abort();
  5196. }
  5197. break;
  5198. case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
  5199. {
  5200. TCGv_ptr fpstatus = get_fpstatus_ptr(1);
  5201. switch ((u << 2) | size) {
  5202. case 0: /* VADD */
  5203. case 4: /* VPADD */
  5204. gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
  5205. break;
  5206. case 2: /* VSUB */
  5207. gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
  5208. break;
  5209. case 6: /* VABD */
  5210. gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
  5211. break;
  5212. default:
  5213. abort();
  5214. }
  5215. tcg_temp_free_ptr(fpstatus);
  5216. break;
  5217. }
  5218. case NEON_3R_FLOAT_MULTIPLY:
  5219. {
  5220. TCGv_ptr fpstatus = get_fpstatus_ptr(1);
  5221. gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
  5222. if (!u) {
  5223. tcg_temp_free_i32(tmp2);
  5224. tmp2 = neon_load_reg(rd, pass);
  5225. if (size == 0) {
  5226. gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
  5227. } else {
  5228. gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
  5229. }
  5230. }
  5231. tcg_temp_free_ptr(fpstatus);
  5232. break;
  5233. }
  5234. case NEON_3R_FLOAT_CMP:
  5235. {
  5236. TCGv_ptr fpstatus = get_fpstatus_ptr(1);
  5237. if (!u) {
  5238. gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
  5239. } else {
  5240. if (size == 0) {
  5241. gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
  5242. } else {
  5243. gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
  5244. }
  5245. }
  5246. tcg_temp_free_ptr(fpstatus);
  5247. break;
  5248. }
  5249. case NEON_3R_FLOAT_ACMP:
  5250. {
  5251. TCGv_ptr fpstatus = get_fpstatus_ptr(1);
  5252. if (size == 0) {
  5253. gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
  5254. } else {
  5255. gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
  5256. }
  5257. tcg_temp_free_ptr(fpstatus);
  5258. break;
  5259. }
  5260. case NEON_3R_FLOAT_MINMAX:
  5261. {
  5262. TCGv_ptr fpstatus = get_fpstatus_ptr(1);
  5263. if (size == 0) {
  5264. gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
  5265. } else {
  5266. gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
  5267. }
  5268. tcg_temp_free_ptr(fpstatus);
  5269. break;
  5270. }
  5271. case NEON_3R_FLOAT_MISC:
  5272. if (u) {
  5273. /* VMAXNM/VMINNM */
  5274. TCGv_ptr fpstatus = get_fpstatus_ptr(1);
  5275. if (size == 0) {
  5276. gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
  5277. } else {
  5278. gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
  5279. }
  5280. tcg_temp_free_ptr(fpstatus);
  5281. } else {
  5282. if (size == 0) {
  5283. gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
  5284. } else {
  5285. gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
  5286. }
  5287. }
  5288. break;
  5289. case NEON_3R_VFM:
  5290. {
  5291. /* VFMA, VFMS: fused multiply-add */
  5292. TCGv_ptr fpstatus = get_fpstatus_ptr(1);
  5293. TCGv_i32 tmp3 = neon_load_reg(rd, pass);
  5294. if (size) {
  5295. /* VFMS */
  5296. gen_helper_vfp_negs(tmp, tmp);
  5297. }
  5298. gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
  5299. tcg_temp_free_i32(tmp3);
  5300. tcg_temp_free_ptr(fpstatus);
  5301. break;
  5302. }
  5303. default:
  5304. abort();
  5305. }
  5306. tcg_temp_free_i32(tmp2);
  5307. /* Save the result. For elementwise operations we can put it
  5308. straight into the destination register. For pairwise operations
  5309. we have to be careful to avoid clobbering the source operands. */
  5310. if (pairwise && rd == rm) {
  5311. neon_store_scratch(pass, tmp);
  5312. } else {
  5313. neon_store_reg(rd, pass, tmp);
  5314. }
  5315. } /* for pass */
  5316. if (pairwise && rd == rm) {
  5317. for (pass = 0; pass < (q ? 4 : 2); pass++) {
  5318. tmp = neon_load_scratch(pass);
  5319. neon_store_reg(rd, pass, tmp);
  5320. }
  5321. }
  5322. /* End of 3 register same size operations. */
  5323. } else if (insn & (1 << 4)) {
  5324. if ((insn & 0x00380080) != 0) {
  5325. /* Two registers and shift. */
  5326. op = (insn >> 8) & 0xf;
  5327. if (insn & (1 << 7)) {
  5328. /* 64-bit shift. */
  5329. if (op > 7) {
  5330. return 1;
  5331. }
  5332. size = 3;
  5333. } else {
  5334. size = 2;
  5335. while ((insn & (1 << (size + 19))) == 0)
  5336. size--;
  5337. }
  5338. shift = (insn >> 16) & ((1 << (3 + size)) - 1);
  5339. /* To avoid excessive duplication of ops we implement shift
  5340. by immediate using the variable shift operations. */
  5341. if (op < 8) {
  5342. /* Shift by immediate:
  5343. VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
  5344. if (q && ((rd | rm) & 1)) {
  5345. return 1;
  5346. }
  5347. if (!u && (op == 4 || op == 6)) {
  5348. return 1;
  5349. }
  5350. /* Right shifts are encoded as N - shift, where N is the
  5351. element size in bits. */
  5352. if (op <= 4)
  5353. shift = shift - (1 << (size + 3));
  5354. if (size == 3) {
  5355. count = q + 1;
  5356. } else {
  5357. count = q ? 4: 2;
  5358. }
  5359. switch (size) {
  5360. case 0:
  5361. imm = (uint8_t) shift;
  5362. imm |= imm << 8;
  5363. imm |= imm << 16;
  5364. break;
  5365. case 1:
  5366. imm = (uint16_t) shift;
  5367. imm |= imm << 16;
  5368. break;
  5369. case 2:
  5370. case 3:
  5371. imm = shift;
  5372. break;
  5373. default:
  5374. abort();
  5375. }
  5376. for (pass = 0; pass < count; pass++) {
  5377. if (size == 3) {
  5378. neon_load_reg64(cpu_V0, rm + pass);
  5379. tcg_gen_movi_i64(cpu_V1, imm);
  5380. switch (op) {
  5381. case 0: /* VSHR */
  5382. case 1: /* VSRA */
  5383. if (u)
  5384. gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
  5385. else
  5386. gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
  5387. break;
  5388. case 2: /* VRSHR */
  5389. case 3: /* VRSRA */
  5390. if (u)
  5391. gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
  5392. else
  5393. gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
  5394. break;
  5395. case 4: /* VSRI */
  5396. case 5: /* VSHL, VSLI */
  5397. gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
  5398. break;
  5399. case 6: /* VQSHLU */
  5400. gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
  5401. cpu_V0, cpu_V1);
  5402. break;
  5403. case 7: /* VQSHL */
  5404. if (u) {
  5405. gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
  5406. cpu_V0, cpu_V1);
  5407. } else {
  5408. gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
  5409. cpu_V0, cpu_V1);
  5410. }
  5411. break;
  5412. }
  5413. if (op == 1 || op == 3) {
  5414. /* Accumulate. */
  5415. neon_load_reg64(cpu_V1, rd + pass);
  5416. tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
  5417. } else if (op == 4 || (op == 5 && u)) {
  5418. /* Insert */
  5419. neon_load_reg64(cpu_V1, rd + pass);
  5420. uint64_t mask;
  5421. if (shift < -63 || shift > 63) {
  5422. mask = 0;
  5423. } else {
  5424. if (op == 4) {
  5425. mask = 0xffffffffffffffffull >> -shift;
  5426. } else {
  5427. mask = 0xffffffffffffffffull << shift;
  5428. }
  5429. }
  5430. tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
  5431. tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
  5432. }
  5433. neon_store_reg64(cpu_V0, rd + pass);
  5434. } else { /* size < 3 */
  5435. /* Operands in T0 and T1. */
  5436. tmp = neon_load_reg(rm, pass);
  5437. tmp2 = tcg_temp_new_i32();
  5438. tcg_gen_movi_i32(tmp2, imm);
  5439. switch (op) {
  5440. case 0: /* VSHR */
  5441. case 1: /* VSRA */
  5442. GEN_NEON_INTEGER_OP(shl);
  5443. break;
  5444. case 2: /* VRSHR */
  5445. case 3: /* VRSRA */
  5446. GEN_NEON_INTEGER_OP(rshl);
  5447. break;
  5448. case 4: /* VSRI */
  5449. case 5: /* VSHL, VSLI */
  5450. switch (size) {
  5451. case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
  5452. case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
  5453. case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
  5454. default: abort();
  5455. }
  5456. break;
  5457. case 6: /* VQSHLU */
  5458. switch (size) {
  5459. case 0:
  5460. gen_helper_neon_qshlu_s8(tmp, cpu_env,
  5461. tmp, tmp2);
  5462. break;
  5463. case 1:
  5464. gen_helper_neon_qshlu_s16(tmp, cpu_env,
  5465. tmp, tmp2);
  5466. break;
  5467. case 2:
  5468. gen_helper_neon_qshlu_s32(tmp, cpu_env,
  5469. tmp, tmp2);
  5470. break;
  5471. default:
  5472. abort();
  5473. }
  5474. break;
  5475. case 7: /* VQSHL */
  5476. GEN_NEON_INTEGER_OP_ENV(qshl);
  5477. break;
  5478. }
  5479. tcg_temp_free_i32(tmp2);
  5480. if (op == 1 || op == 3) {
  5481. /* Accumulate. */
  5482. tmp2 = neon_load_reg(rd, pass);
  5483. gen_neon_add(size, tmp, tmp2);
  5484. tcg_temp_free_i32(tmp2);
  5485. } else if (op == 4 || (op == 5 && u)) {
  5486. /* Insert */
  5487. switch (size) {
  5488. case 0:
  5489. if (op == 4)
  5490. mask = 0xff >> -shift;
  5491. else
  5492. mask = (uint8_t)(0xff << shift);
  5493. mask |= mask << 8;
  5494. mask |= mask << 16;
  5495. break;
  5496. case 1:
  5497. if (op == 4)
  5498. mask = 0xffff >> -shift;
  5499. else
  5500. mask = (uint16_t)(0xffff << shift);
  5501. mask |= mask << 16;
  5502. break;
  5503. case 2:
  5504. if (shift < -31 || shift > 31) {
  5505. mask = 0;
  5506. } else {
  5507. if (op == 4)
  5508. mask = 0xffffffffu >> -shift;
  5509. else
  5510. mask = 0xffffffffu << shift;
  5511. }
  5512. break;
  5513. default:
  5514. abort();
  5515. }
  5516. tmp2 = neon_load_reg(rd, pass);
  5517. tcg_gen_andi_i32(tmp, tmp, mask);
  5518. tcg_gen_andi_i32(tmp2, tmp2, ~mask);
  5519. tcg_gen_or_i32(tmp, tmp, tmp2);
  5520. tcg_temp_free_i32(tmp2);
  5521. }
  5522. neon_store_reg(rd, pass, tmp);
  5523. }
  5524. } /* for pass */
  5525. } else if (op < 10) {
  5526. /* Shift by immediate and narrow:
  5527. VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
  5528. int input_unsigned = (op == 8) ? !u : u;
  5529. if (rm & 1) {
  5530. return 1;
  5531. }
  5532. shift = shift - (1 << (size + 3));
  5533. size++;
  5534. if (size == 3) {
  5535. tmp64 = tcg_const_i64(shift);
  5536. neon_load_reg64(cpu_V0, rm);
  5537. neon_load_reg64(cpu_V1, rm + 1);
  5538. for (pass = 0; pass < 2; pass++) {
  5539. TCGv_i64 in;
  5540. if (pass == 0) {
  5541. in = cpu_V0;
  5542. } else {
  5543. in = cpu_V1;
  5544. }
  5545. if (q) {
  5546. if (input_unsigned) {
  5547. gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
  5548. } else {
  5549. gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
  5550. }
  5551. } else {
  5552. if (input_unsigned) {
  5553. gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
  5554. } else {
  5555. gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
  5556. }
  5557. }
  5558. tmp = tcg_temp_new_i32();
  5559. gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
  5560. neon_store_reg(rd, pass, tmp);
  5561. } /* for pass */
  5562. tcg_temp_free_i64(tmp64);
  5563. } else {
  5564. if (size == 1) {
  5565. imm = (uint16_t)shift;
  5566. imm |= imm << 16;
  5567. } else {
  5568. /* size == 2 */
  5569. imm = (uint32_t)shift;
  5570. }
  5571. tmp2 = tcg_const_i32(imm);
  5572. tmp4 = neon_load_reg(rm + 1, 0);
  5573. tmp5 = neon_load_reg(rm + 1, 1);
  5574. for (pass = 0; pass < 2; pass++) {
  5575. if (pass == 0) {
  5576. tmp = neon_load_reg(rm, 0);
  5577. } else {
  5578. tmp = tmp4;
  5579. }
  5580. gen_neon_shift_narrow(size, tmp, tmp2, q,
  5581. input_unsigned);
  5582. if (pass == 0) {
  5583. tmp3 = neon_load_reg(rm, 1);
  5584. } else {
  5585. tmp3 = tmp5;
  5586. }
  5587. gen_neon_shift_narrow(size, tmp3, tmp2, q,
  5588. input_unsigned);
  5589. tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
  5590. tcg_temp_free_i32(tmp);
  5591. tcg_temp_free_i32(tmp3);
  5592. tmp = tcg_temp_new_i32();
  5593. gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
  5594. neon_store_reg(rd, pass, tmp);
  5595. } /* for pass */
  5596. tcg_temp_free_i32(tmp2);
  5597. }
  5598. } else if (op == 10) {
  5599. /* VSHLL, VMOVL */
  5600. if (q || (rd & 1)) {
  5601. return 1;
  5602. }
  5603. tmp = neon_load_reg(rm, 0);
  5604. tmp2 = neon_load_reg(rm, 1);
  5605. for (pass = 0; pass < 2; pass++) {
  5606. if (pass == 1)
  5607. tmp = tmp2;
  5608. gen_neon_widen(cpu_V0, tmp, size, u);
  5609. if (shift != 0) {
  5610. /* The shift is less than the width of the source
  5611. type, so we can just shift the whole register. */
  5612. tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
  5613. /* Widen the result of shift: we need to clear
  5614. * the potential overflow bits resulting from
  5615. * left bits of the narrow input appearing as
  5616. * right bits of left the neighbour narrow
  5617. * input. */
  5618. if (size < 2 || !u) {
  5619. uint64_t imm64;
  5620. if (size == 0) {
  5621. imm = (0xffu >> (8 - shift));
  5622. imm |= imm << 16;
  5623. } else if (size == 1) {
  5624. imm = 0xffff >> (16 - shift);
  5625. } else {
  5626. /* size == 2 */
  5627. imm = 0xffffffff >> (32 - shift);
  5628. }
  5629. if (size < 2) {
  5630. imm64 = imm | (((uint64_t)imm) << 32);
  5631. } else {
  5632. imm64 = imm;
  5633. }
  5634. tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
  5635. }
  5636. }
  5637. neon_store_reg64(cpu_V0, rd + pass);
  5638. }
  5639. } else if (op >= 14) {
  5640. /* VCVT fixed-point. */
  5641. if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
  5642. return 1;
  5643. }
  5644. /* We have already masked out the must-be-1 top bit of imm6,
  5645. * hence this 32-shift where the ARM ARM has 64-imm6.
  5646. */
  5647. shift = 32 - shift;
  5648. for (pass = 0; pass < (q ? 4 : 2); pass++) {
  5649. tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
  5650. if (!(op & 1)) {
  5651. if (u)
  5652. gen_vfp_ulto(0, shift, 1);
  5653. else
  5654. gen_vfp_slto(0, shift, 1);
  5655. } else {
  5656. if (u)
  5657. gen_vfp_toul(0, shift, 1);
  5658. else
  5659. gen_vfp_tosl(0, shift, 1);
  5660. }
  5661. tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
  5662. }
  5663. } else {
  5664. return 1;
  5665. }
  5666. } else { /* (insn & 0x00380080) == 0 */
  5667. int invert;
  5668. if (q && (rd & 1)) {
  5669. return 1;
  5670. }
  5671. op = (insn >> 8) & 0xf;
  5672. /* One register and immediate. */
  5673. imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
  5674. invert = (insn & (1 << 5)) != 0;
  5675. /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
  5676. * We choose to not special-case this and will behave as if a
  5677. * valid constant encoding of 0 had been given.
  5678. */
  5679. switch (op) {
  5680. case 0: case 1:
  5681. /* no-op */
  5682. break;
  5683. case 2: case 3:
  5684. imm <<= 8;
  5685. break;
  5686. case 4: case 5:
  5687. imm <<= 16;
  5688. break;
  5689. case 6: case 7:
  5690. imm <<= 24;
  5691. break;
  5692. case 8: case 9:
  5693. imm |= imm << 16;
  5694. break;
  5695. case 10: case 11:
  5696. imm = (imm << 8) | (imm << 24);
  5697. break;
  5698. case 12:
  5699. imm = (imm << 8) | 0xff;
  5700. break;
  5701. case 13:
  5702. imm = (imm << 16) | 0xffff;
  5703. break;
  5704. case 14:
  5705. imm |= (imm << 8) | (imm << 16) | (imm << 24);
  5706. if (invert)
  5707. imm = ~imm;
  5708. break;
  5709. case 15:
  5710. if (invert) {
  5711. return 1;
  5712. }
  5713. imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
  5714. | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
  5715. break;
  5716. }
  5717. if (invert)
  5718. imm = ~imm;
  5719. for (pass = 0; pass < (q ? 4 : 2); pass++) {
  5720. if (op & 1 && op < 12) {
  5721. tmp = neon_load_reg(rd, pass);
  5722. if (invert) {
  5723. /* The immediate value has already been inverted, so
  5724. BIC becomes AND. */
  5725. tcg_gen_andi_i32(tmp, tmp, imm);
  5726. } else {
  5727. tcg_gen_ori_i32(tmp, tmp, imm);
  5728. }
  5729. } else {
  5730. /* VMOV, VMVN. */
  5731. tmp = tcg_temp_new_i32();
  5732. if (op == 14 && invert) {
  5733. int n;
  5734. uint32_t val;
  5735. val = 0;
  5736. for (n = 0; n < 4; n++) {
  5737. if (imm & (1 << (n + (pass & 1) * 4)))
  5738. val |= 0xff << (n * 8);
  5739. }
  5740. tcg_gen_movi_i32(tmp, val);
  5741. } else {
  5742. tcg_gen_movi_i32(tmp, imm);
  5743. }
  5744. }
  5745. neon_store_reg(rd, pass, tmp);
  5746. }
  5747. }
  5748. } else { /* (insn & 0x00800010 == 0x00800000) */
  5749. if (size != 3) {
  5750. op = (insn >> 8) & 0xf;
  5751. if ((insn & (1 << 6)) == 0) {
  5752. /* Three registers of different lengths. */
  5753. int src1_wide;
  5754. int src2_wide;
  5755. int prewiden;
  5756. /* undefreq: bit 0 : UNDEF if size == 0
  5757. * bit 1 : UNDEF if size == 1
  5758. * bit 2 : UNDEF if size == 2
  5759. * bit 3 : UNDEF if U == 1
  5760. * Note that [2:0] set implies 'always UNDEF'
  5761. */
  5762. int undefreq;
  5763. /* prewiden, src1_wide, src2_wide, undefreq */
  5764. static const int neon_3reg_wide[16][4] = {
  5765. {1, 0, 0, 0}, /* VADDL */
  5766. {1, 1, 0, 0}, /* VADDW */
  5767. {1, 0, 0, 0}, /* VSUBL */
  5768. {1, 1, 0, 0}, /* VSUBW */
  5769. {0, 1, 1, 0}, /* VADDHN */
  5770. {0, 0, 0, 0}, /* VABAL */
  5771. {0, 1, 1, 0}, /* VSUBHN */
  5772. {0, 0, 0, 0}, /* VABDL */
  5773. {0, 0, 0, 0}, /* VMLAL */
  5774. {0, 0, 0, 9}, /* VQDMLAL */
  5775. {0, 0, 0, 0}, /* VMLSL */
  5776. {0, 0, 0, 9}, /* VQDMLSL */
  5777. {0, 0, 0, 0}, /* Integer VMULL */
  5778. {0, 0, 0, 1}, /* VQDMULL */
  5779. {0, 0, 0, 0xa}, /* Polynomial VMULL */
  5780. {0, 0, 0, 7}, /* Reserved: always UNDEF */
  5781. };
  5782. prewiden = neon_3reg_wide[op][0];
  5783. src1_wide = neon_3reg_wide[op][1];
  5784. src2_wide = neon_3reg_wide[op][2];
  5785. undefreq = neon_3reg_wide[op][3];
  5786. if ((undefreq & (1 << size)) ||
  5787. ((undefreq & 8) && u)) {
  5788. return 1;
  5789. }
  5790. if ((src1_wide && (rn & 1)) ||
  5791. (src2_wide && (rm & 1)) ||
  5792. (!src2_wide && (rd & 1))) {
  5793. return 1;
  5794. }
  5795. /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
  5796. * outside the loop below as it only performs a single pass.
  5797. */
  5798. if (op == 14 && size == 2) {
  5799. TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
  5800. if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
  5801. return 1;
  5802. }
  5803. tcg_rn = tcg_temp_new_i64();
  5804. tcg_rm = tcg_temp_new_i64();
  5805. tcg_rd = tcg_temp_new_i64();
  5806. neon_load_reg64(tcg_rn, rn);
  5807. neon_load_reg64(tcg_rm, rm);
  5808. gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
  5809. neon_store_reg64(tcg_rd, rd);
  5810. gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
  5811. neon_store_reg64(tcg_rd, rd + 1);
  5812. tcg_temp_free_i64(tcg_rn);
  5813. tcg_temp_free_i64(tcg_rm);
  5814. tcg_temp_free_i64(tcg_rd);
  5815. return 0;
  5816. }
  5817. /* Avoid overlapping operands. Wide source operands are
  5818. always aligned so will never overlap with wide
  5819. destinations in problematic ways. */
  5820. if (rd == rm && !src2_wide) {
  5821. tmp = neon_load_reg(rm, 1);
  5822. neon_store_scratch(2, tmp);
  5823. } else if (rd == rn && !src1_wide) {
  5824. tmp = neon_load_reg(rn, 1);
  5825. neon_store_scratch(2, tmp);
  5826. }
  5827. TCGV_UNUSED_I32(tmp3);
  5828. for (pass = 0; pass < 2; pass++) {
  5829. if (src1_wide) {
  5830. neon_load_reg64(cpu_V0, rn + pass);
  5831. TCGV_UNUSED_I32(tmp);
  5832. } else {
  5833. if (pass == 1 && rd == rn) {
  5834. tmp = neon_load_scratch(2);
  5835. } else {
  5836. tmp = neon_load_reg(rn, pass);
  5837. }
  5838. if (prewiden) {
  5839. gen_neon_widen(cpu_V0, tmp, size, u);
  5840. }
  5841. }
  5842. if (src2_wide) {
  5843. neon_load_reg64(cpu_V1, rm + pass);
  5844. TCGV_UNUSED_I32(tmp2);
  5845. } else {
  5846. if (pass == 1 && rd == rm) {
  5847. tmp2 = neon_load_scratch(2);
  5848. } else {
  5849. tmp2 = neon_load_reg(rm, pass);
  5850. }
  5851. if (prewiden) {
  5852. gen_neon_widen(cpu_V1, tmp2, size, u);
  5853. }
  5854. }
  5855. switch (op) {
  5856. case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
  5857. gen_neon_addl(size);
  5858. break;
  5859. case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
  5860. gen_neon_subl(size);
  5861. break;
  5862. case 5: case 7: /* VABAL, VABDL */
  5863. switch ((size << 1) | u) {
  5864. case 0:
  5865. gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
  5866. break;
  5867. case 1:
  5868. gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
  5869. break;
  5870. case 2:
  5871. gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
  5872. break;
  5873. case 3:
  5874. gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
  5875. break;
  5876. case 4:
  5877. gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
  5878. break;
  5879. case 5:
  5880. gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
  5881. break;
  5882. default: abort();
  5883. }
  5884. tcg_temp_free_i32(tmp2);
  5885. tcg_temp_free_i32(tmp);
  5886. break;
  5887. case 8: case 9: case 10: case 11: case 12: case 13:
  5888. /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
  5889. gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
  5890. break;
  5891. case 14: /* Polynomial VMULL */
  5892. gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
  5893. tcg_temp_free_i32(tmp2);
  5894. tcg_temp_free_i32(tmp);
  5895. break;
  5896. default: /* 15 is RESERVED: caught earlier */
  5897. abort();
  5898. }
  5899. if (op == 13) {
  5900. /* VQDMULL */
  5901. gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
  5902. neon_store_reg64(cpu_V0, rd + pass);
  5903. } else if (op == 5 || (op >= 8 && op <= 11)) {
  5904. /* Accumulate. */
  5905. neon_load_reg64(cpu_V1, rd + pass);
  5906. switch (op) {
  5907. case 10: /* VMLSL */
  5908. gen_neon_negl(cpu_V0, size);
  5909. /* Fall through */
  5910. case 5: case 8: /* VABAL, VMLAL */
  5911. gen_neon_addl(size);
  5912. break;
  5913. case 9: case 11: /* VQDMLAL, VQDMLSL */
  5914. gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
  5915. if (op == 11) {
  5916. gen_neon_negl(cpu_V0, size);
  5917. }
  5918. gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
  5919. break;
  5920. default:
  5921. abort();
  5922. }
  5923. neon_store_reg64(cpu_V0, rd + pass);
  5924. } else if (op == 4 || op == 6) {
  5925. /* Narrowing operation. */
  5926. tmp = tcg_temp_new_i32();
  5927. if (!u) {
  5928. switch (size) {
  5929. case 0:
  5930. gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
  5931. break;
  5932. case 1:
  5933. gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
  5934. break;
  5935. case 2:
  5936. tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
  5937. tcg_gen_extrl_i64_i32(tmp, cpu_V0);
  5938. break;
  5939. default: abort();
  5940. }
  5941. } else {
  5942. switch (size) {
  5943. case 0:
  5944. gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
  5945. break;
  5946. case 1:
  5947. gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
  5948. break;
  5949. case 2:
  5950. tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
  5951. tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
  5952. tcg_gen_extrl_i64_i32(tmp, cpu_V0);
  5953. break;
  5954. default: abort();
  5955. }
  5956. }
  5957. if (pass == 0) {
  5958. tmp3 = tmp;
  5959. } else {
  5960. neon_store_reg(rd, 0, tmp3);
  5961. neon_store_reg(rd, 1, tmp);
  5962. }
  5963. } else {
  5964. /* Write back the result. */
  5965. neon_store_reg64(cpu_V0, rd + pass);
  5966. }
  5967. }
  5968. } else {
  5969. /* Two registers and a scalar. NB that for ops of this form
  5970. * the ARM ARM labels bit 24 as Q, but it is in our variable
  5971. * 'u', not 'q'.
  5972. */
  5973. if (size == 0) {
  5974. return 1;
  5975. }
  5976. switch (op) {
  5977. case 1: /* Float VMLA scalar */
  5978. case 5: /* Floating point VMLS scalar */
  5979. case 9: /* Floating point VMUL scalar */
  5980. if (size == 1) {
  5981. return 1;
  5982. }
  5983. /* fall through */
  5984. case 0: /* Integer VMLA scalar */
  5985. case 4: /* Integer VMLS scalar */
  5986. case 8: /* Integer VMUL scalar */
  5987. case 12: /* VQDMULH scalar */
  5988. case 13: /* VQRDMULH scalar */
  5989. if (u && ((rd | rn) & 1)) {
  5990. return 1;
  5991. }
  5992. tmp = neon_get_scalar(size, rm);
  5993. neon_store_scratch(0, tmp);
  5994. for (pass = 0; pass < (u ? 4 : 2); pass++) {
  5995. tmp = neon_load_scratch(0);
  5996. tmp2 = neon_load_reg(rn, pass);
  5997. if (op == 12) {
  5998. if (size == 1) {
  5999. gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
  6000. } else {
  6001. gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
  6002. }
  6003. } else if (op == 13) {
  6004. if (size == 1) {
  6005. gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
  6006. } else {
  6007. gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
  6008. }
  6009. } else if (op & 1) {
  6010. TCGv_ptr fpstatus = get_fpstatus_ptr(1);
  6011. gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
  6012. tcg_temp_free_ptr(fpstatus);
  6013. } else {
  6014. switch (size) {
  6015. case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
  6016. case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
  6017. case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
  6018. default: abort();
  6019. }
  6020. }
  6021. tcg_temp_free_i32(tmp2);
  6022. if (op < 8) {
  6023. /* Accumulate. */
  6024. tmp2 = neon_load_reg(rd, pass);
  6025. switch (op) {
  6026. case 0:
  6027. gen_neon_add(size, tmp, tmp2);
  6028. break;
  6029. case 1:
  6030. {
  6031. TCGv_ptr fpstatus = get_fpstatus_ptr(1);
  6032. gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
  6033. tcg_temp_free_ptr(fpstatus);
  6034. break;
  6035. }
  6036. case 4:
  6037. gen_neon_rsb(size, tmp, tmp2);
  6038. break;
  6039. case 5:
  6040. {
  6041. TCGv_ptr fpstatus = get_fpstatus_ptr(1);
  6042. gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
  6043. tcg_temp_free_ptr(fpstatus);
  6044. break;
  6045. }
  6046. default:
  6047. abort();
  6048. }
  6049. tcg_temp_free_i32(tmp2);
  6050. }
  6051. neon_store_reg(rd, pass, tmp);
  6052. }
  6053. break;
  6054. case 3: /* VQDMLAL scalar */
  6055. case 7: /* VQDMLSL scalar */
  6056. case 11: /* VQDMULL scalar */
  6057. if (u == 1) {
  6058. return 1;
  6059. }
  6060. /* fall through */
  6061. case 2: /* VMLAL sclar */
  6062. case 6: /* VMLSL scalar */
  6063. case 10: /* VMULL scalar */
  6064. if (rd & 1) {
  6065. return 1;
  6066. }
  6067. tmp2 = neon_get_scalar(size, rm);
  6068. /* We need a copy of tmp2 because gen_neon_mull
  6069. * deletes it during pass 0. */
  6070. tmp4 = tcg_temp_new_i32();
  6071. tcg_gen_mov_i32(tmp4, tmp2);
  6072. tmp3 = neon_load_reg(rn, 1);
  6073. for (pass = 0; pass < 2; pass++) {
  6074. if (pass == 0) {
  6075. tmp = neon_load_reg(rn, 0);
  6076. } else {
  6077. tmp = tmp3;
  6078. tmp2 = tmp4;
  6079. }
  6080. gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
  6081. if (op != 11) {
  6082. neon_load_reg64(cpu_V1, rd + pass);
  6083. }
  6084. switch (op) {
  6085. case 6:
  6086. gen_neon_negl(cpu_V0, size);
  6087. /* Fall through */
  6088. case 2:
  6089. gen_neon_addl(size);
  6090. break;
  6091. case 3: case 7:
  6092. gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
  6093. if (op == 7) {
  6094. gen_neon_negl(cpu_V0, size);
  6095. }
  6096. gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
  6097. break;
  6098. case 10:
  6099. /* no-op */
  6100. break;
  6101. case 11:
  6102. gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
  6103. break;
  6104. default:
  6105. abort();
  6106. }
  6107. neon_store_reg64(cpu_V0, rd + pass);
  6108. }
  6109. break;
  6110. default: /* 14 and 15 are RESERVED */
  6111. return 1;
  6112. }
  6113. }
  6114. } else { /* size == 3 */
  6115. if (!u) {
  6116. /* Extract. */
  6117. imm = (insn >> 8) & 0xf;
  6118. if (imm > 7 && !q)
  6119. return 1;
  6120. if (q && ((rd | rn | rm) & 1)) {
  6121. return 1;
  6122. }
  6123. if (imm == 0) {
  6124. neon_load_reg64(cpu_V0, rn);
  6125. if (q) {
  6126. neon_load_reg64(cpu_V1, rn + 1);
  6127. }
  6128. } else if (imm == 8) {
  6129. neon_load_reg64(cpu_V0, rn + 1);
  6130. if (q) {
  6131. neon_load_reg64(cpu_V1, rm);
  6132. }
  6133. } else if (q) {
  6134. tmp64 = tcg_temp_new_i64();
  6135. if (imm < 8) {
  6136. neon_load_reg64(cpu_V0, rn);
  6137. neon_load_reg64(tmp64, rn + 1);
  6138. } else {
  6139. neon_load_reg64(cpu_V0, rn + 1);
  6140. neon_load_reg64(tmp64, rm);
  6141. }
  6142. tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
  6143. tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
  6144. tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
  6145. if (imm < 8) {
  6146. neon_load_reg64(cpu_V1, rm);
  6147. } else {
  6148. neon_load_reg64(cpu_V1, rm + 1);
  6149. imm -= 8;
  6150. }
  6151. tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
  6152. tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
  6153. tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
  6154. tcg_temp_free_i64(tmp64);
  6155. } else {
  6156. /* BUGFIX */
  6157. neon_load_reg64(cpu_V0, rn);
  6158. tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
  6159. neon_load_reg64(cpu_V1, rm);
  6160. tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
  6161. tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
  6162. }
  6163. neon_store_reg64(cpu_V0, rd);
  6164. if (q) {
  6165. neon_store_reg64(cpu_V1, rd + 1);
  6166. }
  6167. } else if ((insn & (1 << 11)) == 0) {
  6168. /* Two register misc. */
  6169. op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
  6170. size = (insn >> 18) & 3;
  6171. /* UNDEF for unknown op values and bad op-size combinations */
  6172. if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
  6173. return 1;
  6174. }
  6175. if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
  6176. q && ((rm | rd) & 1)) {
  6177. return 1;
  6178. }
  6179. switch (op) {
  6180. case NEON_2RM_VREV64:
  6181. for (pass = 0; pass < (q ? 2 : 1); pass++) {
  6182. tmp = neon_load_reg(rm, pass * 2);
  6183. tmp2 = neon_load_reg(rm, pass * 2 + 1);
  6184. switch (size) {
  6185. case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
  6186. case 1: gen_swap_half(tmp); break;
  6187. case 2: /* no-op */ break;
  6188. default: abort();
  6189. }
  6190. neon_store_reg(rd, pass * 2 + 1, tmp);
  6191. if (size == 2) {
  6192. neon_store_reg(rd, pass * 2, tmp2);
  6193. } else {
  6194. switch (size) {
  6195. case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
  6196. case 1: gen_swap_half(tmp2); break;
  6197. default: abort();
  6198. }
  6199. neon_store_reg(rd, pass * 2, tmp2);
  6200. }
  6201. }
  6202. break;
  6203. case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
  6204. case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
  6205. for (pass = 0; pass < q + 1; pass++) {
  6206. tmp = neon_load_reg(rm, pass * 2);
  6207. gen_neon_widen(cpu_V0, tmp, size, op & 1);
  6208. tmp = neon_load_reg(rm, pass * 2 + 1);
  6209. gen_neon_widen(cpu_V1, tmp, size, op & 1);
  6210. switch (size) {
  6211. case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
  6212. case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
  6213. case 2: tcg_gen_add_i64(CPU_V001); break;
  6214. default: abort();
  6215. }
  6216. if (op >= NEON_2RM_VPADAL) {
  6217. /* Accumulate. */
  6218. neon_load_reg64(cpu_V1, rd + pass);
  6219. gen_neon_addl(size);
  6220. }
  6221. neon_store_reg64(cpu_V0, rd + pass);
  6222. }
  6223. break;
  6224. case NEON_2RM_VTRN:
  6225. if (size == 2) {
  6226. int n;
  6227. for (n = 0; n < (q ? 4 : 2); n += 2) {
  6228. tmp = neon_load_reg(rm, n);
  6229. tmp2 = neon_load_reg(rd, n + 1);
  6230. neon_store_reg(rm, n, tmp2);
  6231. neon_store_reg(rd, n + 1, tmp);
  6232. }
  6233. } else {
  6234. goto elementwise;
  6235. }
  6236. break;
  6237. case NEON_2RM_VUZP:
  6238. if (gen_neon_unzip(rd, rm, size, q)) {
  6239. return 1;
  6240. }
  6241. break;
  6242. case NEON_2RM_VZIP:
  6243. if (gen_neon_zip(rd, rm, size, q)) {
  6244. return 1;
  6245. }
  6246. break;
  6247. case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
  6248. /* also VQMOVUN; op field and mnemonics don't line up */
  6249. if (rm & 1) {
  6250. return 1;
  6251. }
  6252. TCGV_UNUSED_I32(tmp2);
  6253. for (pass = 0; pass < 2; pass++) {
  6254. neon_load_reg64(cpu_V0, rm + pass);
  6255. tmp = tcg_temp_new_i32();
  6256. gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
  6257. tmp, cpu_V0);
  6258. if (pass == 0) {
  6259. tmp2 = tmp;
  6260. } else {
  6261. neon_store_reg(rd, 0, tmp2);
  6262. neon_store_reg(rd, 1, tmp);
  6263. }
  6264. }
  6265. break;
  6266. case NEON_2RM_VSHLL:
  6267. if (q || (rd & 1)) {
  6268. return 1;
  6269. }
  6270. tmp = neon_load_reg(rm, 0);
  6271. tmp2 = neon_load_reg(rm, 1);
  6272. for (pass = 0; pass < 2; pass++) {
  6273. if (pass == 1)
  6274. tmp = tmp2;
  6275. gen_neon_widen(cpu_V0, tmp, size, 1);
  6276. tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
  6277. neon_store_reg64(cpu_V0, rd + pass);
  6278. }
  6279. break;
  6280. case NEON_2RM_VCVT_F16_F32:
  6281. if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
  6282. q || (rm & 1)) {
  6283. return 1;
  6284. }
  6285. tmp = tcg_temp_new_i32();
  6286. tmp2 = tcg_temp_new_i32();
  6287. tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
  6288. gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
  6289. tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
  6290. gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
  6291. tcg_gen_shli_i32(tmp2, tmp2, 16);
  6292. tcg_gen_or_i32(tmp2, tmp2, tmp);
  6293. tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
  6294. gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
  6295. tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
  6296. neon_store_reg(rd, 0, tmp2);
  6297. tmp2 = tcg_temp_new_i32();
  6298. gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
  6299. tcg_gen_shli_i32(tmp2, tmp2, 16);
  6300. tcg_gen_or_i32(tmp2, tmp2, tmp);
  6301. neon_store_reg(rd, 1, tmp2);
  6302. tcg_temp_free_i32(tmp);
  6303. break;
  6304. case NEON_2RM_VCVT_F32_F16:
  6305. if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
  6306. q || (rd & 1)) {
  6307. return 1;
  6308. }
  6309. tmp3 = tcg_temp_new_i32();
  6310. tmp = neon_load_reg(rm, 0);
  6311. tmp2 = neon_load_reg(rm, 1);
  6312. tcg_gen_ext16u_i32(tmp3, tmp);
  6313. gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
  6314. tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
  6315. tcg_gen_shri_i32(tmp3, tmp, 16);
  6316. gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
  6317. tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
  6318. tcg_temp_free_i32(tmp);
  6319. tcg_gen_ext16u_i32(tmp3, tmp2);
  6320. gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
  6321. tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
  6322. tcg_gen_shri_i32(tmp3, tmp2, 16);
  6323. gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
  6324. tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
  6325. tcg_temp_free_i32(tmp2);
  6326. tcg_temp_free_i32(tmp3);
  6327. break;
  6328. case NEON_2RM_AESE: case NEON_2RM_AESMC:
  6329. if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
  6330. || ((rm | rd) & 1)) {
  6331. return 1;
  6332. }
  6333. tmp = tcg_const_i32(rd);
  6334. tmp2 = tcg_const_i32(rm);
  6335. /* Bit 6 is the lowest opcode bit; it distinguishes between
  6336. * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
  6337. */
  6338. tmp3 = tcg_const_i32(extract32(insn, 6, 1));
  6339. if (op == NEON_2RM_AESE) {
  6340. gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
  6341. } else {
  6342. gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
  6343. }
  6344. tcg_temp_free_i32(tmp);
  6345. tcg_temp_free_i32(tmp2);
  6346. tcg_temp_free_i32(tmp3);
  6347. break;
  6348. case NEON_2RM_SHA1H:
  6349. if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
  6350. || ((rm | rd) & 1)) {
  6351. return 1;
  6352. }
  6353. tmp = tcg_const_i32(rd);
  6354. tmp2 = tcg_const_i32(rm);
  6355. gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
  6356. tcg_temp_free_i32(tmp);
  6357. tcg_temp_free_i32(tmp2);
  6358. break;
  6359. case NEON_2RM_SHA1SU1:
  6360. if ((rm | rd) & 1) {
  6361. return 1;
  6362. }
  6363. /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
  6364. if (q) {
  6365. if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
  6366. return 1;
  6367. }
  6368. } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
  6369. return 1;
  6370. }
  6371. tmp = tcg_const_i32(rd);
  6372. tmp2 = tcg_const_i32(rm);
  6373. if (q) {
  6374. gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
  6375. } else {
  6376. gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
  6377. }
  6378. tcg_temp_free_i32(tmp);
  6379. tcg_temp_free_i32(tmp2);
  6380. break;
  6381. default:
  6382. elementwise:
  6383. for (pass = 0; pass < (q ? 4 : 2); pass++) {
  6384. if (neon_2rm_is_float_op(op)) {
  6385. tcg_gen_ld_f32(cpu_F0s, cpu_env,
  6386. neon_reg_offset(rm, pass));
  6387. TCGV_UNUSED_I32(tmp);
  6388. } else {
  6389. tmp = neon_load_reg(rm, pass);
  6390. }
  6391. switch (op) {
  6392. case NEON_2RM_VREV32:
  6393. switch (size) {
  6394. case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
  6395. case 1: gen_swap_half(tmp); break;
  6396. default: abort();
  6397. }
  6398. break;
  6399. case NEON_2RM_VREV16:
  6400. gen_rev16(tmp);
  6401. break;
  6402. case NEON_2RM_VCLS:
  6403. switch (size) {
  6404. case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
  6405. case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
  6406. case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
  6407. default: abort();
  6408. }
  6409. break;
  6410. case NEON_2RM_VCLZ:
  6411. switch (size) {
  6412. case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
  6413. case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
  6414. case 2: gen_helper_clz(tmp, tmp); break;
  6415. default: abort();
  6416. }
  6417. break;
  6418. case NEON_2RM_VCNT:
  6419. gen_helper_neon_cnt_u8(tmp, tmp);
  6420. break;
  6421. case NEON_2RM_VMVN:
  6422. tcg_gen_not_i32(tmp, tmp);
  6423. break;
  6424. case NEON_2RM_VQABS:
  6425. switch (size) {
  6426. case 0:
  6427. gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
  6428. break;
  6429. case 1:
  6430. gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
  6431. break;
  6432. case 2:
  6433. gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
  6434. break;
  6435. default: abort();
  6436. }
  6437. break;
  6438. case NEON_2RM_VQNEG:
  6439. switch (size) {
  6440. case 0:
  6441. gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
  6442. break;
  6443. case 1:
  6444. gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
  6445. break;
  6446. case 2:
  6447. gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
  6448. break;
  6449. default: abort();
  6450. }
  6451. break;
  6452. case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
  6453. tmp2 = tcg_const_i32(0);
  6454. switch(size) {
  6455. case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
  6456. case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
  6457. case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
  6458. default: abort();
  6459. }
  6460. tcg_temp_free_i32(tmp2);
  6461. if (op == NEON_2RM_VCLE0) {
  6462. tcg_gen_not_i32(tmp, tmp);
  6463. }
  6464. break;
  6465. case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
  6466. tmp2 = tcg_const_i32(0);
  6467. switch(size) {
  6468. case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
  6469. case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
  6470. case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
  6471. default: abort();
  6472. }
  6473. tcg_temp_free_i32(tmp2);
  6474. if (op == NEON_2RM_VCLT0) {
  6475. tcg_gen_not_i32(tmp, tmp);
  6476. }
  6477. break;
  6478. case NEON_2RM_VCEQ0:
  6479. tmp2 = tcg_const_i32(0);
  6480. switch(size) {
  6481. case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
  6482. case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
  6483. case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
  6484. default: abort();
  6485. }
  6486. tcg_temp_free_i32(tmp2);
  6487. break;
  6488. case NEON_2RM_VABS:
  6489. switch(size) {
  6490. case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
  6491. case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
  6492. case 2: tcg_gen_abs_i32(tmp, tmp); break;
  6493. default: abort();
  6494. }
  6495. break;
  6496. case NEON_2RM_VNEG:
  6497. tmp2 = tcg_const_i32(0);
  6498. gen_neon_rsb(size, tmp, tmp2);
  6499. tcg_temp_free_i32(tmp2);
  6500. break;
  6501. case NEON_2RM_VCGT0_F:
  6502. {
  6503. TCGv_ptr fpstatus = get_fpstatus_ptr(1);
  6504. tmp2 = tcg_const_i32(0);
  6505. gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
  6506. tcg_temp_free_i32(tmp2);
  6507. tcg_temp_free_ptr(fpstatus);
  6508. break;
  6509. }
  6510. case NEON_2RM_VCGE0_F:
  6511. {
  6512. TCGv_ptr fpstatus = get_fpstatus_ptr(1);
  6513. tmp2 = tcg_const_i32(0);
  6514. gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
  6515. tcg_temp_free_i32(tmp2);
  6516. tcg_temp_free_ptr(fpstatus);
  6517. break;
  6518. }
  6519. case NEON_2RM_VCEQ0_F:
  6520. {
  6521. TCGv_ptr fpstatus = get_fpstatus_ptr(1);
  6522. tmp2 = tcg_const_i32(0);
  6523. gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
  6524. tcg_temp_free_i32(tmp2);
  6525. tcg_temp_free_ptr(fpstatus);
  6526. break;
  6527. }
  6528. case NEON_2RM_VCLE0_F:
  6529. {
  6530. TCGv_ptr fpstatus = get_fpstatus_ptr(1);
  6531. tmp2 = tcg_const_i32(0);
  6532. gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
  6533. tcg_temp_free_i32(tmp2);
  6534. tcg_temp_free_ptr(fpstatus);
  6535. break;
  6536. }
  6537. case NEON_2RM_VCLT0_F:
  6538. {
  6539. TCGv_ptr fpstatus = get_fpstatus_ptr(1);
  6540. tmp2 = tcg_const_i32(0);
  6541. gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
  6542. tcg_temp_free_i32(tmp2);
  6543. tcg_temp_free_ptr(fpstatus);
  6544. break;
  6545. }
  6546. case NEON_2RM_VABS_F:
  6547. gen_vfp_abs(0);
  6548. break;
  6549. case NEON_2RM_VNEG_F:
  6550. gen_vfp_neg(0);
  6551. break;
  6552. case NEON_2RM_VSWP:
  6553. tmp2 = neon_load_reg(rd, pass);
  6554. neon_store_reg(rm, pass, tmp2);
  6555. break;
  6556. case NEON_2RM_VTRN:
  6557. tmp2 = neon_load_reg(rd, pass);
  6558. switch (size) {
  6559. case 0: gen_neon_trn_u8(tmp, tmp2); break;
  6560. case 1: gen_neon_trn_u16(tmp, tmp2); break;
  6561. default: abort();
  6562. }
  6563. neon_store_reg(rm, pass, tmp2);
  6564. break;
  6565. case NEON_2RM_VRINTN:
  6566. case NEON_2RM_VRINTA:
  6567. case NEON_2RM_VRINTM:
  6568. case NEON_2RM_VRINTP:
  6569. case NEON_2RM_VRINTZ:
  6570. {
  6571. TCGv_i32 tcg_rmode;
  6572. TCGv_ptr fpstatus = get_fpstatus_ptr(1);
  6573. int rmode;
  6574. if (op == NEON_2RM_VRINTZ) {
  6575. rmode = FPROUNDING_ZERO;
  6576. } else {
  6577. rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
  6578. }
  6579. tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
  6580. gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
  6581. cpu_env);
  6582. gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
  6583. gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
  6584. cpu_env);
  6585. tcg_temp_free_ptr(fpstatus);
  6586. tcg_temp_free_i32(tcg_rmode);
  6587. break;
  6588. }
  6589. case NEON_2RM_VRINTX:
  6590. {
  6591. TCGv_ptr fpstatus = get_fpstatus_ptr(1);
  6592. gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
  6593. tcg_temp_free_ptr(fpstatus);
  6594. break;
  6595. }
  6596. case NEON_2RM_VCVTAU:
  6597. case NEON_2RM_VCVTAS:
  6598. case NEON_2RM_VCVTNU:
  6599. case NEON_2RM_VCVTNS:
  6600. case NEON_2RM_VCVTPU:
  6601. case NEON_2RM_VCVTPS:
  6602. case NEON_2RM_VCVTMU:
  6603. case NEON_2RM_VCVTMS:
  6604. {
  6605. bool is_signed = !extract32(insn, 7, 1);
  6606. TCGv_ptr fpst = get_fpstatus_ptr(1);
  6607. TCGv_i32 tcg_rmode, tcg_shift;
  6608. int rmode = fp_decode_rm[extract32(insn, 8, 2)];
  6609. tcg_shift = tcg_const_i32(0);
  6610. tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
  6611. gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
  6612. cpu_env);
  6613. if (is_signed) {
  6614. gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
  6615. tcg_shift, fpst);
  6616. } else {
  6617. gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
  6618. tcg_shift, fpst);
  6619. }
  6620. gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
  6621. cpu_env);
  6622. tcg_temp_free_i32(tcg_rmode);
  6623. tcg_temp_free_i32(tcg_shift);
  6624. tcg_temp_free_ptr(fpst);
  6625. break;
  6626. }
  6627. case NEON_2RM_VRECPE:
  6628. {
  6629. TCGv_ptr fpstatus = get_fpstatus_ptr(1);
  6630. gen_helper_recpe_u32(tmp, tmp, fpstatus);
  6631. tcg_temp_free_ptr(fpstatus);
  6632. break;
  6633. }
  6634. case NEON_2RM_VRSQRTE:
  6635. {
  6636. TCGv_ptr fpstatus = get_fpstatus_ptr(1);
  6637. gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
  6638. tcg_temp_free_ptr(fpstatus);
  6639. break;
  6640. }
  6641. case NEON_2RM_VRECPE_F:
  6642. {
  6643. TCGv_ptr fpstatus = get_fpstatus_ptr(1);
  6644. gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
  6645. tcg_temp_free_ptr(fpstatus);
  6646. break;
  6647. }
  6648. case NEON_2RM_VRSQRTE_F:
  6649. {
  6650. TCGv_ptr fpstatus = get_fpstatus_ptr(1);
  6651. gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
  6652. tcg_temp_free_ptr(fpstatus);
  6653. break;
  6654. }
  6655. case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
  6656. gen_vfp_sito(0, 1);
  6657. break;
  6658. case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
  6659. gen_vfp_uito(0, 1);
  6660. break;
  6661. case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
  6662. gen_vfp_tosiz(0, 1);
  6663. break;
  6664. case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
  6665. gen_vfp_touiz(0, 1);
  6666. break;
  6667. default:
  6668. /* Reserved op values were caught by the
  6669. * neon_2rm_sizes[] check earlier.
  6670. */
  6671. abort();
  6672. }
  6673. if (neon_2rm_is_float_op(op)) {
  6674. tcg_gen_st_f32(cpu_F0s, cpu_env,
  6675. neon_reg_offset(rd, pass));
  6676. } else {
  6677. neon_store_reg(rd, pass, tmp);
  6678. }
  6679. }
  6680. break;
  6681. }
  6682. } else if ((insn & (1 << 10)) == 0) {
  6683. /* VTBL, VTBX. */
  6684. int n = ((insn >> 8) & 3) + 1;
  6685. if ((rn + n) > 32) {
  6686. /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
  6687. * helper function running off the end of the register file.
  6688. */
  6689. return 1;
  6690. }
  6691. n <<= 3;
  6692. if (insn & (1 << 6)) {
  6693. tmp = neon_load_reg(rd, 0);
  6694. } else {
  6695. tmp = tcg_temp_new_i32();
  6696. tcg_gen_movi_i32(tmp, 0);
  6697. }
  6698. tmp2 = neon_load_reg(rm, 0);
  6699. tmp4 = tcg_const_i32(rn);
  6700. tmp5 = tcg_const_i32(n);
  6701. gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
  6702. tcg_temp_free_i32(tmp);
  6703. if (insn & (1 << 6)) {
  6704. tmp = neon_load_reg(rd, 1);
  6705. } else {
  6706. tmp = tcg_temp_new_i32();
  6707. tcg_gen_movi_i32(tmp, 0);
  6708. }
  6709. tmp3 = neon_load_reg(rm, 1);
  6710. gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
  6711. tcg_temp_free_i32(tmp5);
  6712. tcg_temp_free_i32(tmp4);
  6713. neon_store_reg(rd, 0, tmp2);
  6714. neon_store_reg(rd, 1, tmp3);
  6715. tcg_temp_free_i32(tmp);
  6716. } else if ((insn & 0x380) == 0) {
  6717. /* VDUP */
  6718. if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
  6719. return 1;
  6720. }
  6721. if (insn & (1 << 19)) {
  6722. tmp = neon_load_reg(rm, 1);
  6723. } else {
  6724. tmp = neon_load_reg(rm, 0);
  6725. }
  6726. if (insn & (1 << 16)) {
  6727. gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
  6728. } else if (insn & (1 << 17)) {
  6729. if ((insn >> 18) & 1)
  6730. gen_neon_dup_high16(tmp);
  6731. else
  6732. gen_neon_dup_low16(tmp);
  6733. }
  6734. for (pass = 0; pass < (q ? 4 : 2); pass++) {
  6735. tmp2 = tcg_temp_new_i32();
  6736. tcg_gen_mov_i32(tmp2, tmp);
  6737. neon_store_reg(rd, pass, tmp2);
  6738. }
  6739. tcg_temp_free_i32(tmp);
  6740. } else {
  6741. return 1;
  6742. }
  6743. }
  6744. }
  6745. return 0;
  6746. }
  6747. static int disas_coproc_insn(DisasContext *s, uint32_t insn)
  6748. {
  6749. int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
  6750. const ARMCPRegInfo *ri;
  6751. cpnum = (insn >> 8) & 0xf;
  6752. /* First check for coprocessor space used for XScale/iwMMXt insns */
  6753. if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
  6754. if (extract32(s->c15_cpar, cpnum, 1) == 0) {
  6755. return 1;
  6756. }
  6757. if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
  6758. return disas_iwmmxt_insn(s, insn);
  6759. } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
  6760. return disas_dsp_insn(s, insn);
  6761. }
  6762. return 1;
  6763. }
  6764. /* Otherwise treat as a generic register access */
  6765. is64 = (insn & (1 << 25)) == 0;
  6766. if (!is64 && ((insn & (1 << 4)) == 0)) {
  6767. /* cdp */
  6768. return 1;
  6769. }
  6770. crm = insn & 0xf;
  6771. if (is64) {
  6772. crn = 0;
  6773. opc1 = (insn >> 4) & 0xf;
  6774. opc2 = 0;
  6775. rt2 = (insn >> 16) & 0xf;
  6776. } else {
  6777. crn = (insn >> 16) & 0xf;
  6778. opc1 = (insn >> 21) & 7;
  6779. opc2 = (insn >> 5) & 7;
  6780. rt2 = 0;
  6781. }
  6782. isread = (insn >> 20) & 1;
  6783. rt = (insn >> 12) & 0xf;
  6784. ri = get_arm_cp_reginfo(s->cp_regs,
  6785. ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
  6786. if (ri) {
  6787. /* Check access permissions */
  6788. if (!cp_access_ok(s->current_el, ri, isread)) {
  6789. return 1;
  6790. }
  6791. if (ri->accessfn ||
  6792. (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
  6793. /* Emit code to perform further access permissions checks at
  6794. * runtime; this may result in an exception.
  6795. * Note that on XScale all cp0..c13 registers do an access check
  6796. * call in order to handle c15_cpar.
  6797. */
  6798. TCGv_ptr tmpptr;
  6799. TCGv_i32 tcg_syn;
  6800. uint32_t syndrome;
  6801. /* Note that since we are an implementation which takes an
  6802. * exception on a trapped conditional instruction only if the
  6803. * instruction passes its condition code check, we can take
  6804. * advantage of the clause in the ARM ARM that allows us to set
  6805. * the COND field in the instruction to 0xE in all cases.
  6806. * We could fish the actual condition out of the insn (ARM)
  6807. * or the condexec bits (Thumb) but it isn't necessary.
  6808. */
  6809. switch (cpnum) {
  6810. case 14:
  6811. if (is64) {
  6812. syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
  6813. isread, s->thumb);
  6814. } else {
  6815. syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
  6816. rt, isread, s->thumb);
  6817. }
  6818. break;
  6819. case 15:
  6820. if (is64) {
  6821. syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
  6822. isread, s->thumb);
  6823. } else {
  6824. syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
  6825. rt, isread, s->thumb);
  6826. }
  6827. break;
  6828. default:
  6829. /* ARMv8 defines that only coprocessors 14 and 15 exist,
  6830. * so this can only happen if this is an ARMv7 or earlier CPU,
  6831. * in which case the syndrome information won't actually be
  6832. * guest visible.
  6833. */
  6834. assert(!arm_dc_feature(s, ARM_FEATURE_V8));
  6835. syndrome = syn_uncategorized();
  6836. break;
  6837. }
  6838. gen_set_pc_im(s, s->pc - 4);
  6839. tmpptr = tcg_const_ptr(ri);
  6840. tcg_syn = tcg_const_i32(syndrome);
  6841. gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn);
  6842. tcg_temp_free_ptr(tmpptr);
  6843. tcg_temp_free_i32(tcg_syn);
  6844. }
  6845. /* Handle special cases first */
  6846. switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
  6847. case ARM_CP_NOP:
  6848. return 0;
  6849. case ARM_CP_WFI:
  6850. if (isread) {
  6851. return 1;
  6852. }
  6853. gen_set_pc_im(s, s->pc);
  6854. s->is_jmp = DISAS_WFI;
  6855. return 0;
  6856. default:
  6857. break;
  6858. }
  6859. if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
  6860. gen_io_start();
  6861. }
  6862. if (isread) {
  6863. /* Read */
  6864. if (is64) {
  6865. TCGv_i64 tmp64;
  6866. TCGv_i32 tmp;
  6867. if (ri->type & ARM_CP_CONST) {
  6868. tmp64 = tcg_const_i64(ri->resetvalue);
  6869. } else if (ri->readfn) {
  6870. TCGv_ptr tmpptr;
  6871. tmp64 = tcg_temp_new_i64();
  6872. tmpptr = tcg_const_ptr(ri);
  6873. gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
  6874. tcg_temp_free_ptr(tmpptr);
  6875. } else {
  6876. tmp64 = tcg_temp_new_i64();
  6877. tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
  6878. }
  6879. tmp = tcg_temp_new_i32();
  6880. tcg_gen_extrl_i64_i32(tmp, tmp64);
  6881. store_reg(s, rt, tmp);
  6882. tcg_gen_shri_i64(tmp64, tmp64, 32);
  6883. tmp = tcg_temp_new_i32();
  6884. tcg_gen_extrl_i64_i32(tmp, tmp64);
  6885. tcg_temp_free_i64(tmp64);
  6886. store_reg(s, rt2, tmp);
  6887. } else {
  6888. TCGv_i32 tmp;
  6889. if (ri->type & ARM_CP_CONST) {
  6890. tmp = tcg_const_i32(ri->resetvalue);
  6891. } else if (ri->readfn) {
  6892. TCGv_ptr tmpptr;
  6893. tmp = tcg_temp_new_i32();
  6894. tmpptr = tcg_const_ptr(ri);
  6895. gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
  6896. tcg_temp_free_ptr(tmpptr);
  6897. } else {
  6898. tmp = load_cpu_offset(ri->fieldoffset);
  6899. }
  6900. if (rt == 15) {
  6901. /* Destination register of r15 for 32 bit loads sets
  6902. * the condition codes from the high 4 bits of the value
  6903. */
  6904. gen_set_nzcv(tmp);
  6905. tcg_temp_free_i32(tmp);
  6906. } else {
  6907. store_reg(s, rt, tmp);
  6908. }
  6909. }
  6910. } else {
  6911. /* Write */
  6912. if (ri->type & ARM_CP_CONST) {
  6913. /* If not forbidden by access permissions, treat as WI */
  6914. return 0;
  6915. }
  6916. if (is64) {
  6917. TCGv_i32 tmplo, tmphi;
  6918. TCGv_i64 tmp64 = tcg_temp_new_i64();
  6919. tmplo = load_reg(s, rt);
  6920. tmphi = load_reg(s, rt2);
  6921. tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
  6922. tcg_temp_free_i32(tmplo);
  6923. tcg_temp_free_i32(tmphi);
  6924. if (ri->writefn) {
  6925. TCGv_ptr tmpptr = tcg_const_ptr(ri);
  6926. gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
  6927. tcg_temp_free_ptr(tmpptr);
  6928. } else {
  6929. tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
  6930. }
  6931. tcg_temp_free_i64(tmp64);
  6932. } else {
  6933. if (ri->writefn) {
  6934. TCGv_i32 tmp;
  6935. TCGv_ptr tmpptr;
  6936. tmp = load_reg(s, rt);
  6937. tmpptr = tcg_const_ptr(ri);
  6938. gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
  6939. tcg_temp_free_ptr(tmpptr);
  6940. tcg_temp_free_i32(tmp);
  6941. } else {
  6942. TCGv_i32 tmp = load_reg(s, rt);
  6943. store_cpu_offset(tmp, ri->fieldoffset);
  6944. }
  6945. }
  6946. }
  6947. if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
  6948. /* I/O operations must end the TB here (whether read or write) */
  6949. gen_io_end();
  6950. gen_lookup_tb(s);
  6951. } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
  6952. /* We default to ending the TB on a coprocessor register write,
  6953. * but allow this to be suppressed by the register definition
  6954. * (usually only necessary to work around guest bugs).
  6955. */
  6956. gen_lookup_tb(s);
  6957. }
  6958. return 0;
  6959. }
  6960. /* Unknown register; this might be a guest error or a QEMU
  6961. * unimplemented feature.
  6962. */
  6963. if (is64) {
  6964. qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
  6965. "64 bit system register cp:%d opc1: %d crm:%d "
  6966. "(%s)\n",
  6967. isread ? "read" : "write", cpnum, opc1, crm,
  6968. s->ns ? "non-secure" : "secure");
  6969. } else {
  6970. qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
  6971. "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
  6972. "(%s)\n",
  6973. isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
  6974. s->ns ? "non-secure" : "secure");
  6975. }
  6976. return 1;
  6977. }
  6978. /* Store a 64-bit value to a register pair. Clobbers val. */
  6979. static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
  6980. {
  6981. TCGv_i32 tmp;
  6982. tmp = tcg_temp_new_i32();
  6983. tcg_gen_extrl_i64_i32(tmp, val);
  6984. store_reg(s, rlow, tmp);
  6985. tmp = tcg_temp_new_i32();
  6986. tcg_gen_shri_i64(val, val, 32);
  6987. tcg_gen_extrl_i64_i32(tmp, val);
  6988. store_reg(s, rhigh, tmp);
  6989. }
  6990. /* load a 32-bit value from a register and perform a 64-bit accumulate. */
  6991. static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
  6992. {
  6993. TCGv_i64 tmp;
  6994. TCGv_i32 tmp2;
  6995. /* Load value and extend to 64 bits. */
  6996. tmp = tcg_temp_new_i64();
  6997. tmp2 = load_reg(s, rlow);
  6998. tcg_gen_extu_i32_i64(tmp, tmp2);
  6999. tcg_temp_free_i32(tmp2);
  7000. tcg_gen_add_i64(val, val, tmp);
  7001. tcg_temp_free_i64(tmp);
  7002. }
  7003. /* load and add a 64-bit value from a register pair. */
  7004. static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
  7005. {
  7006. TCGv_i64 tmp;
  7007. TCGv_i32 tmpl;
  7008. TCGv_i32 tmph;
  7009. /* Load 64-bit value rd:rn. */
  7010. tmpl = load_reg(s, rlow);
  7011. tmph = load_reg(s, rhigh);
  7012. tmp = tcg_temp_new_i64();
  7013. tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
  7014. tcg_temp_free_i32(tmpl);
  7015. tcg_temp_free_i32(tmph);
  7016. tcg_gen_add_i64(val, val, tmp);
  7017. tcg_temp_free_i64(tmp);
  7018. }
  7019. /* Set N and Z flags from hi|lo. */
  7020. static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
  7021. {
  7022. tcg_gen_mov_i32(cpu_NF, hi);
  7023. tcg_gen_or_i32(cpu_ZF, lo, hi);
  7024. }
  7025. /* Load/Store exclusive instructions are implemented by remembering
  7026. the value/address loaded, and seeing if these are the same
  7027. when the store is performed. This should be sufficient to implement
  7028. the architecturally mandated semantics, and avoids having to monitor
  7029. regular stores.
  7030. In system emulation mode only one CPU will be running at once, so
  7031. this sequence is effectively atomic. In user emulation mode we
  7032. throw an exception and handle the atomic operation elsewhere. */
  7033. static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
  7034. TCGv_i32 addr, int size)
  7035. {
  7036. TCGv_i32 tmp = tcg_temp_new_i32();
  7037. s->is_ldex = true;
  7038. switch (size) {
  7039. case 0:
  7040. gen_aa32_ld8u(tmp, addr, get_mem_index(s));
  7041. break;
  7042. case 1:
  7043. gen_aa32_ld16u(tmp, addr, get_mem_index(s));
  7044. break;
  7045. case 2:
  7046. case 3:
  7047. gen_aa32_ld32u(tmp, addr, get_mem_index(s));
  7048. break;
  7049. default:
  7050. abort();
  7051. }
  7052. if (size == 3) {
  7053. TCGv_i32 tmp2 = tcg_temp_new_i32();
  7054. TCGv_i32 tmp3 = tcg_temp_new_i32();
  7055. tcg_gen_addi_i32(tmp2, addr, 4);
  7056. gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
  7057. tcg_temp_free_i32(tmp2);
  7058. tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
  7059. store_reg(s, rt2, tmp3);
  7060. } else {
  7061. tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
  7062. }
  7063. store_reg(s, rt, tmp);
  7064. tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
  7065. }
  7066. static void gen_clrex(DisasContext *s)
  7067. {
  7068. tcg_gen_movi_i64(cpu_exclusive_addr, -1);
  7069. }
  7070. #ifdef CONFIG_USER_ONLY
  7071. static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
  7072. TCGv_i32 addr, int size)
  7073. {
  7074. tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
  7075. tcg_gen_movi_i32(cpu_exclusive_info,
  7076. size | (rd << 4) | (rt << 8) | (rt2 << 12));
  7077. gen_exception_internal_insn(s, 4, EXCP_STREX);
  7078. }
  7079. #else
  7080. static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
  7081. TCGv_i32 addr, int size)
  7082. {
  7083. TCGv_i32 tmp;
  7084. TCGv_i64 val64, extaddr;
  7085. TCGLabel *done_label;
  7086. TCGLabel *fail_label;
  7087. /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
  7088. [addr] = {Rt};
  7089. {Rd} = 0;
  7090. } else {
  7091. {Rd} = 1;
  7092. } */
  7093. fail_label = gen_new_label();
  7094. done_label = gen_new_label();
  7095. extaddr = tcg_temp_new_i64();
  7096. tcg_gen_extu_i32_i64(extaddr, addr);
  7097. tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
  7098. tcg_temp_free_i64(extaddr);
  7099. tmp = tcg_temp_new_i32();
  7100. switch (size) {
  7101. case 0:
  7102. gen_aa32_ld8u(tmp, addr, get_mem_index(s));
  7103. break;
  7104. case 1:
  7105. gen_aa32_ld16u(tmp, addr, get_mem_index(s));
  7106. break;
  7107. case 2:
  7108. case 3:
  7109. gen_aa32_ld32u(tmp, addr, get_mem_index(s));
  7110. break;
  7111. default:
  7112. abort();
  7113. }
  7114. val64 = tcg_temp_new_i64();
  7115. if (size == 3) {
  7116. TCGv_i32 tmp2 = tcg_temp_new_i32();
  7117. TCGv_i32 tmp3 = tcg_temp_new_i32();
  7118. tcg_gen_addi_i32(tmp2, addr, 4);
  7119. gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
  7120. tcg_temp_free_i32(tmp2);
  7121. tcg_gen_concat_i32_i64(val64, tmp, tmp3);
  7122. tcg_temp_free_i32(tmp3);
  7123. } else {
  7124. tcg_gen_extu_i32_i64(val64, tmp);
  7125. }
  7126. tcg_temp_free_i32(tmp);
  7127. tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
  7128. tcg_temp_free_i64(val64);
  7129. tmp = load_reg(s, rt);
  7130. switch (size) {
  7131. case 0:
  7132. gen_aa32_st8(tmp, addr, get_mem_index(s));
  7133. break;
  7134. case 1:
  7135. gen_aa32_st16(tmp, addr, get_mem_index(s));
  7136. break;
  7137. case 2:
  7138. case 3:
  7139. gen_aa32_st32(tmp, addr, get_mem_index(s));
  7140. break;
  7141. default:
  7142. abort();
  7143. }
  7144. tcg_temp_free_i32(tmp);
  7145. if (size == 3) {
  7146. tcg_gen_addi_i32(addr, addr, 4);
  7147. tmp = load_reg(s, rt2);
  7148. gen_aa32_st32(tmp, addr, get_mem_index(s));
  7149. tcg_temp_free_i32(tmp);
  7150. }
  7151. tcg_gen_movi_i32(cpu_R[rd], 0);
  7152. tcg_gen_br(done_label);
  7153. gen_set_label(fail_label);
  7154. tcg_gen_movi_i32(cpu_R[rd], 1);
  7155. gen_set_label(done_label);
  7156. tcg_gen_movi_i64(cpu_exclusive_addr, -1);
  7157. }
  7158. #endif
  7159. /* gen_srs:
  7160. * @env: CPUARMState
  7161. * @s: DisasContext
  7162. * @mode: mode field from insn (which stack to store to)
  7163. * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
  7164. * @writeback: true if writeback bit set
  7165. *
  7166. * Generate code for the SRS (Store Return State) insn.
  7167. */
  7168. static void gen_srs(DisasContext *s,
  7169. uint32_t mode, uint32_t amode, bool writeback)
  7170. {
  7171. int32_t offset;
  7172. TCGv_i32 addr = tcg_temp_new_i32();
  7173. TCGv_i32 tmp = tcg_const_i32(mode);
  7174. gen_helper_get_r13_banked(addr, cpu_env, tmp);
  7175. tcg_temp_free_i32(tmp);
  7176. switch (amode) {
  7177. case 0: /* DA */
  7178. offset = -4;
  7179. break;
  7180. case 1: /* IA */
  7181. offset = 0;
  7182. break;
  7183. case 2: /* DB */
  7184. offset = -8;
  7185. break;
  7186. case 3: /* IB */
  7187. offset = 4;
  7188. break;
  7189. default:
  7190. abort();
  7191. }
  7192. tcg_gen_addi_i32(addr, addr, offset);
  7193. tmp = load_reg(s, 14);
  7194. gen_aa32_st32(tmp, addr, get_mem_index(s));
  7195. tcg_temp_free_i32(tmp);
  7196. tmp = load_cpu_field(spsr);
  7197. tcg_gen_addi_i32(addr, addr, 4);
  7198. gen_aa32_st32(tmp, addr, get_mem_index(s));
  7199. tcg_temp_free_i32(tmp);
  7200. if (writeback) {
  7201. switch (amode) {
  7202. case 0:
  7203. offset = -8;
  7204. break;
  7205. case 1:
  7206. offset = 4;
  7207. break;
  7208. case 2:
  7209. offset = -4;
  7210. break;
  7211. case 3:
  7212. offset = 0;
  7213. break;
  7214. default:
  7215. abort();
  7216. }
  7217. tcg_gen_addi_i32(addr, addr, offset);
  7218. tmp = tcg_const_i32(mode);
  7219. gen_helper_set_r13_banked(cpu_env, tmp, addr);
  7220. tcg_temp_free_i32(tmp);
  7221. }
  7222. tcg_temp_free_i32(addr);
  7223. }
  7224. static void disas_arm_insn(DisasContext *s, unsigned int insn)
  7225. {
  7226. unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
  7227. TCGv_i32 tmp;
  7228. TCGv_i32 tmp2;
  7229. TCGv_i32 tmp3;
  7230. TCGv_i32 addr;
  7231. TCGv_i64 tmp64;
  7232. /* M variants do not implement ARM mode. */
  7233. if (arm_dc_feature(s, ARM_FEATURE_M)) {
  7234. goto illegal_op;
  7235. }
  7236. cond = insn >> 28;
  7237. if (cond == 0xf){
  7238. /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
  7239. * choose to UNDEF. In ARMv5 and above the space is used
  7240. * for miscellaneous unconditional instructions.
  7241. */
  7242. ARCH(5);
  7243. /* Unconditional instructions. */
  7244. if (((insn >> 25) & 7) == 1) {
  7245. /* NEON Data processing. */
  7246. if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
  7247. goto illegal_op;
  7248. }
  7249. if (disas_neon_data_insn(s, insn)) {
  7250. goto illegal_op;
  7251. }
  7252. return;
  7253. }
  7254. if ((insn & 0x0f100000) == 0x04000000) {
  7255. /* NEON load/store. */
  7256. if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
  7257. goto illegal_op;
  7258. }
  7259. if (disas_neon_ls_insn(s, insn)) {
  7260. goto illegal_op;
  7261. }
  7262. return;
  7263. }
  7264. if ((insn & 0x0f000e10) == 0x0e000a00) {
  7265. /* VFP. */
  7266. if (disas_vfp_insn(s, insn)) {
  7267. goto illegal_op;
  7268. }
  7269. return;
  7270. }
  7271. if (((insn & 0x0f30f000) == 0x0510f000) ||
  7272. ((insn & 0x0f30f010) == 0x0710f000)) {
  7273. if ((insn & (1 << 22)) == 0) {
  7274. /* PLDW; v7MP */
  7275. if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
  7276. goto illegal_op;
  7277. }
  7278. }
  7279. /* Otherwise PLD; v5TE+ */
  7280. ARCH(5TE);
  7281. return;
  7282. }
  7283. if (((insn & 0x0f70f000) == 0x0450f000) ||
  7284. ((insn & 0x0f70f010) == 0x0650f000)) {
  7285. ARCH(7);
  7286. return; /* PLI; V7 */
  7287. }
  7288. if (((insn & 0x0f700000) == 0x04100000) ||
  7289. ((insn & 0x0f700010) == 0x06100000)) {
  7290. if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
  7291. goto illegal_op;
  7292. }
  7293. return; /* v7MP: Unallocated memory hint: must NOP */
  7294. }
  7295. if ((insn & 0x0ffffdff) == 0x01010000) {
  7296. ARCH(6);
  7297. /* setend */
  7298. if (((insn >> 9) & 1) != s->bswap_code) {
  7299. /* Dynamic endianness switching not implemented. */
  7300. qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
  7301. goto illegal_op;
  7302. }
  7303. return;
  7304. } else if ((insn & 0x0fffff00) == 0x057ff000) {
  7305. switch ((insn >> 4) & 0xf) {
  7306. case 1: /* clrex */
  7307. ARCH(6K);
  7308. gen_clrex(s);
  7309. return;
  7310. case 4: /* dsb */
  7311. case 5: /* dmb */
  7312. case 6: /* isb */
  7313. ARCH(7);
  7314. /* We don't emulate caches so these are a no-op. */
  7315. return;
  7316. default:
  7317. goto illegal_op;
  7318. }
  7319. } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
  7320. /* srs */
  7321. if (IS_USER(s)) {
  7322. goto illegal_op;
  7323. }
  7324. ARCH(6);
  7325. gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
  7326. return;
  7327. } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
  7328. /* rfe */
  7329. int32_t offset;
  7330. if (IS_USER(s))
  7331. goto illegal_op;
  7332. ARCH(6);
  7333. rn = (insn >> 16) & 0xf;
  7334. addr = load_reg(s, rn);
  7335. i = (insn >> 23) & 3;
  7336. switch (i) {
  7337. case 0: offset = -4; break; /* DA */
  7338. case 1: offset = 0; break; /* IA */
  7339. case 2: offset = -8; break; /* DB */
  7340. case 3: offset = 4; break; /* IB */
  7341. default: abort();
  7342. }
  7343. if (offset)
  7344. tcg_gen_addi_i32(addr, addr, offset);
  7345. /* Load PC into tmp and CPSR into tmp2. */
  7346. tmp = tcg_temp_new_i32();
  7347. gen_aa32_ld32u(tmp, addr, get_mem_index(s));
  7348. tcg_gen_addi_i32(addr, addr, 4);
  7349. tmp2 = tcg_temp_new_i32();
  7350. gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
  7351. if (insn & (1 << 21)) {
  7352. /* Base writeback. */
  7353. switch (i) {
  7354. case 0: offset = -8; break;
  7355. case 1: offset = 4; break;
  7356. case 2: offset = -4; break;
  7357. case 3: offset = 0; break;
  7358. default: abort();
  7359. }
  7360. if (offset)
  7361. tcg_gen_addi_i32(addr, addr, offset);
  7362. store_reg(s, rn, addr);
  7363. } else {
  7364. tcg_temp_free_i32(addr);
  7365. }
  7366. gen_rfe(s, tmp, tmp2);
  7367. return;
  7368. } else if ((insn & 0x0e000000) == 0x0a000000) {
  7369. /* branch link and change to thumb (blx <offset>) */
  7370. int32_t offset;
  7371. val = (uint32_t)s->pc;
  7372. tmp = tcg_temp_new_i32();
  7373. tcg_gen_movi_i32(tmp, val);
  7374. store_reg(s, 14, tmp);
  7375. /* Sign-extend the 24-bit offset */
  7376. offset = (((int32_t)insn) << 8) >> 8;
  7377. /* offset * 4 + bit24 * 2 + (thumb bit) */
  7378. val += (offset << 2) | ((insn >> 23) & 2) | 1;
  7379. /* pipeline offset */
  7380. val += 4;
  7381. /* protected by ARCH(5); above, near the start of uncond block */
  7382. gen_bx_im(s, val);
  7383. return;
  7384. } else if ((insn & 0x0e000f00) == 0x0c000100) {
  7385. if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
  7386. /* iWMMXt register transfer. */
  7387. if (extract32(s->c15_cpar, 1, 1)) {
  7388. if (!disas_iwmmxt_insn(s, insn)) {
  7389. return;
  7390. }
  7391. }
  7392. }
  7393. } else if ((insn & 0x0fe00000) == 0x0c400000) {
  7394. /* Coprocessor double register transfer. */
  7395. ARCH(5TE);
  7396. } else if ((insn & 0x0f000010) == 0x0e000010) {
  7397. /* Additional coprocessor register transfer. */
  7398. } else if ((insn & 0x0ff10020) == 0x01000000) {
  7399. uint32_t mask;
  7400. uint32_t val;
  7401. /* cps (privileged) */
  7402. if (IS_USER(s))
  7403. return;
  7404. mask = val = 0;
  7405. if (insn & (1 << 19)) {
  7406. if (insn & (1 << 8))
  7407. mask |= CPSR_A;
  7408. if (insn & (1 << 7))
  7409. mask |= CPSR_I;
  7410. if (insn & (1 << 6))
  7411. mask |= CPSR_F;
  7412. if (insn & (1 << 18))
  7413. val |= mask;
  7414. }
  7415. if (insn & (1 << 17)) {
  7416. mask |= CPSR_M;
  7417. val |= (insn & 0x1f);
  7418. }
  7419. if (mask) {
  7420. gen_set_psr_im(s, mask, 0, val);
  7421. }
  7422. return;
  7423. }
  7424. goto illegal_op;
  7425. }
  7426. if (cond != 0xe) {
  7427. /* if not always execute, we generate a conditional jump to
  7428. next instruction */
  7429. s->condlabel = gen_new_label();
  7430. arm_gen_test_cc(cond ^ 1, s->condlabel);
  7431. s->condjmp = 1;
  7432. }
  7433. if ((insn & 0x0f900000) == 0x03000000) {
  7434. if ((insn & (1 << 21)) == 0) {
  7435. ARCH(6T2);
  7436. rd = (insn >> 12) & 0xf;
  7437. val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
  7438. if ((insn & (1 << 22)) == 0) {
  7439. /* MOVW */
  7440. tmp = tcg_temp_new_i32();
  7441. tcg_gen_movi_i32(tmp, val);
  7442. } else {
  7443. /* MOVT */
  7444. tmp = load_reg(s, rd);
  7445. tcg_gen_ext16u_i32(tmp, tmp);
  7446. tcg_gen_ori_i32(tmp, tmp, val << 16);
  7447. }
  7448. store_reg(s, rd, tmp);
  7449. } else {
  7450. if (((insn >> 12) & 0xf) != 0xf)
  7451. goto illegal_op;
  7452. if (((insn >> 16) & 0xf) == 0) {
  7453. gen_nop_hint(s, insn & 0xff);
  7454. } else {
  7455. /* CPSR = immediate */
  7456. val = insn & 0xff;
  7457. shift = ((insn >> 8) & 0xf) * 2;
  7458. if (shift)
  7459. val = (val >> shift) | (val << (32 - shift));
  7460. i = ((insn & (1 << 22)) != 0);
  7461. if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
  7462. i, val)) {
  7463. goto illegal_op;
  7464. }
  7465. }
  7466. }
  7467. } else if ((insn & 0x0f900000) == 0x01000000
  7468. && (insn & 0x00000090) != 0x00000090) {
  7469. /* miscellaneous instructions */
  7470. op1 = (insn >> 21) & 3;
  7471. sh = (insn >> 4) & 0xf;
  7472. rm = insn & 0xf;
  7473. switch (sh) {
  7474. case 0x0: /* move program status register */
  7475. if (op1 & 1) {
  7476. /* PSR = reg */
  7477. tmp = load_reg(s, rm);
  7478. i = ((op1 & 2) != 0);
  7479. if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
  7480. goto illegal_op;
  7481. } else {
  7482. /* reg = PSR */
  7483. rd = (insn >> 12) & 0xf;
  7484. if (op1 & 2) {
  7485. if (IS_USER(s))
  7486. goto illegal_op;
  7487. tmp = load_cpu_field(spsr);
  7488. } else {
  7489. tmp = tcg_temp_new_i32();
  7490. gen_helper_cpsr_read(tmp, cpu_env);
  7491. }
  7492. store_reg(s, rd, tmp);
  7493. }
  7494. break;
  7495. case 0x1:
  7496. if (op1 == 1) {
  7497. /* branch/exchange thumb (bx). */
  7498. ARCH(4T);
  7499. tmp = load_reg(s, rm);
  7500. gen_bx(s, tmp);
  7501. } else if (op1 == 3) {
  7502. /* clz */
  7503. ARCH(5);
  7504. rd = (insn >> 12) & 0xf;
  7505. tmp = load_reg(s, rm);
  7506. gen_helper_clz(tmp, tmp);
  7507. store_reg(s, rd, tmp);
  7508. } else {
  7509. goto illegal_op;
  7510. }
  7511. break;
  7512. case 0x2:
  7513. if (op1 == 1) {
  7514. ARCH(5J); /* bxj */
  7515. /* Trivial implementation equivalent to bx. */
  7516. tmp = load_reg(s, rm);
  7517. gen_bx(s, tmp);
  7518. } else {
  7519. goto illegal_op;
  7520. }
  7521. break;
  7522. case 0x3:
  7523. if (op1 != 1)
  7524. goto illegal_op;
  7525. ARCH(5);
  7526. /* branch link/exchange thumb (blx) */
  7527. tmp = load_reg(s, rm);
  7528. tmp2 = tcg_temp_new_i32();
  7529. tcg_gen_movi_i32(tmp2, s->pc);
  7530. store_reg(s, 14, tmp2);
  7531. gen_bx(s, tmp);
  7532. break;
  7533. case 0x4:
  7534. {
  7535. /* crc32/crc32c */
  7536. uint32_t c = extract32(insn, 8, 4);
  7537. /* Check this CPU supports ARMv8 CRC instructions.
  7538. * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
  7539. * Bits 8, 10 and 11 should be zero.
  7540. */
  7541. if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
  7542. (c & 0xd) != 0) {
  7543. goto illegal_op;
  7544. }
  7545. rn = extract32(insn, 16, 4);
  7546. rd = extract32(insn, 12, 4);
  7547. tmp = load_reg(s, rn);
  7548. tmp2 = load_reg(s, rm);
  7549. if (op1 == 0) {
  7550. tcg_gen_andi_i32(tmp2, tmp2, 0xff);
  7551. } else if (op1 == 1) {
  7552. tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
  7553. }
  7554. tmp3 = tcg_const_i32(1 << op1);
  7555. if (c & 0x2) {
  7556. gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
  7557. } else {
  7558. gen_helper_crc32(tmp, tmp, tmp2, tmp3);
  7559. }
  7560. tcg_temp_free_i32(tmp2);
  7561. tcg_temp_free_i32(tmp3);
  7562. store_reg(s, rd, tmp);
  7563. break;
  7564. }
  7565. case 0x5: /* saturating add/subtract */
  7566. ARCH(5TE);
  7567. rd = (insn >> 12) & 0xf;
  7568. rn = (insn >> 16) & 0xf;
  7569. tmp = load_reg(s, rm);
  7570. tmp2 = load_reg(s, rn);
  7571. if (op1 & 2)
  7572. gen_helper_double_saturate(tmp2, cpu_env, tmp2);
  7573. if (op1 & 1)
  7574. gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
  7575. else
  7576. gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
  7577. tcg_temp_free_i32(tmp2);
  7578. store_reg(s, rd, tmp);
  7579. break;
  7580. case 7:
  7581. {
  7582. int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
  7583. switch (op1) {
  7584. case 1:
  7585. /* bkpt */
  7586. ARCH(5);
  7587. gen_exception_insn(s, 4, EXCP_BKPT,
  7588. syn_aa32_bkpt(imm16, false),
  7589. default_exception_el(s));
  7590. break;
  7591. case 2:
  7592. /* Hypervisor call (v7) */
  7593. ARCH(7);
  7594. if (IS_USER(s)) {
  7595. goto illegal_op;
  7596. }
  7597. gen_hvc(s, imm16);
  7598. break;
  7599. case 3:
  7600. /* Secure monitor call (v6+) */
  7601. ARCH(6K);
  7602. if (IS_USER(s)) {
  7603. goto illegal_op;
  7604. }
  7605. gen_smc(s);
  7606. break;
  7607. default:
  7608. goto illegal_op;
  7609. }
  7610. break;
  7611. }
  7612. case 0x8: /* signed multiply */
  7613. case 0xa:
  7614. case 0xc:
  7615. case 0xe:
  7616. ARCH(5TE);
  7617. rs = (insn >> 8) & 0xf;
  7618. rn = (insn >> 12) & 0xf;
  7619. rd = (insn >> 16) & 0xf;
  7620. if (op1 == 1) {
  7621. /* (32 * 16) >> 16 */
  7622. tmp = load_reg(s, rm);
  7623. tmp2 = load_reg(s, rs);
  7624. if (sh & 4)
  7625. tcg_gen_sari_i32(tmp2, tmp2, 16);
  7626. else
  7627. gen_sxth(tmp2);
  7628. tmp64 = gen_muls_i64_i32(tmp, tmp2);
  7629. tcg_gen_shri_i64(tmp64, tmp64, 16);
  7630. tmp = tcg_temp_new_i32();
  7631. tcg_gen_extrl_i64_i32(tmp, tmp64);
  7632. tcg_temp_free_i64(tmp64);
  7633. if ((sh & 2) == 0) {
  7634. tmp2 = load_reg(s, rn);
  7635. gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
  7636. tcg_temp_free_i32(tmp2);
  7637. }
  7638. store_reg(s, rd, tmp);
  7639. } else {
  7640. /* 16 * 16 */
  7641. tmp = load_reg(s, rm);
  7642. tmp2 = load_reg(s, rs);
  7643. gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
  7644. tcg_temp_free_i32(tmp2);
  7645. if (op1 == 2) {
  7646. tmp64 = tcg_temp_new_i64();
  7647. tcg_gen_ext_i32_i64(tmp64, tmp);
  7648. tcg_temp_free_i32(tmp);
  7649. gen_addq(s, tmp64, rn, rd);
  7650. gen_storeq_reg(s, rn, rd, tmp64);
  7651. tcg_temp_free_i64(tmp64);
  7652. } else {
  7653. if (op1 == 0) {
  7654. tmp2 = load_reg(s, rn);
  7655. gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
  7656. tcg_temp_free_i32(tmp2);
  7657. }
  7658. store_reg(s, rd, tmp);
  7659. }
  7660. }
  7661. break;
  7662. default:
  7663. goto illegal_op;
  7664. }
  7665. } else if (((insn & 0x0e000000) == 0 &&
  7666. (insn & 0x00000090) != 0x90) ||
  7667. ((insn & 0x0e000000) == (1 << 25))) {
  7668. int set_cc, logic_cc, shiftop;
  7669. op1 = (insn >> 21) & 0xf;
  7670. set_cc = (insn >> 20) & 1;
  7671. logic_cc = table_logic_cc[op1] & set_cc;
  7672. /* data processing instruction */
  7673. if (insn & (1 << 25)) {
  7674. /* immediate operand */
  7675. val = insn & 0xff;
  7676. shift = ((insn >> 8) & 0xf) * 2;
  7677. if (shift) {
  7678. val = (val >> shift) | (val << (32 - shift));
  7679. }
  7680. tmp2 = tcg_temp_new_i32();
  7681. tcg_gen_movi_i32(tmp2, val);
  7682. if (logic_cc && shift) {
  7683. gen_set_CF_bit31(tmp2);
  7684. }
  7685. } else {
  7686. /* register */
  7687. rm = (insn) & 0xf;
  7688. tmp2 = load_reg(s, rm);
  7689. shiftop = (insn >> 5) & 3;
  7690. if (!(insn & (1 << 4))) {
  7691. shift = (insn >> 7) & 0x1f;
  7692. gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
  7693. } else {
  7694. rs = (insn >> 8) & 0xf;
  7695. tmp = load_reg(s, rs);
  7696. gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
  7697. }
  7698. }
  7699. if (op1 != 0x0f && op1 != 0x0d) {
  7700. rn = (insn >> 16) & 0xf;
  7701. tmp = load_reg(s, rn);
  7702. } else {
  7703. TCGV_UNUSED_I32(tmp);
  7704. }
  7705. rd = (insn >> 12) & 0xf;
  7706. switch(op1) {
  7707. case 0x00:
  7708. tcg_gen_and_i32(tmp, tmp, tmp2);
  7709. if (logic_cc) {
  7710. gen_logic_CC(tmp);
  7711. }
  7712. store_reg_bx(s, rd, tmp);
  7713. break;
  7714. case 0x01:
  7715. tcg_gen_xor_i32(tmp, tmp, tmp2);
  7716. if (logic_cc) {
  7717. gen_logic_CC(tmp);
  7718. }
  7719. store_reg_bx(s, rd, tmp);
  7720. break;
  7721. case 0x02:
  7722. if (set_cc && rd == 15) {
  7723. /* SUBS r15, ... is used for exception return. */
  7724. if (IS_USER(s)) {
  7725. goto illegal_op;
  7726. }
  7727. gen_sub_CC(tmp, tmp, tmp2);
  7728. gen_exception_return(s, tmp);
  7729. } else {
  7730. if (set_cc) {
  7731. gen_sub_CC(tmp, tmp, tmp2);
  7732. } else {
  7733. tcg_gen_sub_i32(tmp, tmp, tmp2);
  7734. }
  7735. store_reg_bx(s, rd, tmp);
  7736. }
  7737. break;
  7738. case 0x03:
  7739. if (set_cc) {
  7740. gen_sub_CC(tmp, tmp2, tmp);
  7741. } else {
  7742. tcg_gen_sub_i32(tmp, tmp2, tmp);
  7743. }
  7744. store_reg_bx(s, rd, tmp);
  7745. break;
  7746. case 0x04:
  7747. if (set_cc) {
  7748. gen_add_CC(tmp, tmp, tmp2);
  7749. } else {
  7750. tcg_gen_add_i32(tmp, tmp, tmp2);
  7751. }
  7752. store_reg_bx(s, rd, tmp);
  7753. break;
  7754. case 0x05:
  7755. if (set_cc) {
  7756. gen_adc_CC(tmp, tmp, tmp2);
  7757. } else {
  7758. gen_add_carry(tmp, tmp, tmp2);
  7759. }
  7760. store_reg_bx(s, rd, tmp);
  7761. break;
  7762. case 0x06:
  7763. if (set_cc) {
  7764. gen_sbc_CC(tmp, tmp, tmp2);
  7765. } else {
  7766. gen_sub_carry(tmp, tmp, tmp2);
  7767. }
  7768. store_reg_bx(s, rd, tmp);
  7769. break;
  7770. case 0x07:
  7771. if (set_cc) {
  7772. gen_sbc_CC(tmp, tmp2, tmp);
  7773. } else {
  7774. gen_sub_carry(tmp, tmp2, tmp);
  7775. }
  7776. store_reg_bx(s, rd, tmp);
  7777. break;
  7778. case 0x08:
  7779. if (set_cc) {
  7780. tcg_gen_and_i32(tmp, tmp, tmp2);
  7781. gen_logic_CC(tmp);
  7782. }
  7783. tcg_temp_free_i32(tmp);
  7784. break;
  7785. case 0x09:
  7786. if (set_cc) {
  7787. tcg_gen_xor_i32(tmp, tmp, tmp2);
  7788. gen_logic_CC(tmp);
  7789. }
  7790. tcg_temp_free_i32(tmp);
  7791. break;
  7792. case 0x0a:
  7793. if (set_cc) {
  7794. gen_sub_CC(tmp, tmp, tmp2);
  7795. }
  7796. tcg_temp_free_i32(tmp);
  7797. break;
  7798. case 0x0b:
  7799. if (set_cc) {
  7800. gen_add_CC(tmp, tmp, tmp2);
  7801. }
  7802. tcg_temp_free_i32(tmp);
  7803. break;
  7804. case 0x0c:
  7805. tcg_gen_or_i32(tmp, tmp, tmp2);
  7806. if (logic_cc) {
  7807. gen_logic_CC(tmp);
  7808. }
  7809. store_reg_bx(s, rd, tmp);
  7810. break;
  7811. case 0x0d:
  7812. if (logic_cc && rd == 15) {
  7813. /* MOVS r15, ... is used for exception return. */
  7814. if (IS_USER(s)) {
  7815. goto illegal_op;
  7816. }
  7817. gen_exception_return(s, tmp2);
  7818. } else {
  7819. if (logic_cc) {
  7820. gen_logic_CC(tmp2);
  7821. }
  7822. store_reg_bx(s, rd, tmp2);
  7823. }
  7824. break;
  7825. case 0x0e:
  7826. tcg_gen_andc_i32(tmp, tmp, tmp2);
  7827. if (logic_cc) {
  7828. gen_logic_CC(tmp);
  7829. }
  7830. store_reg_bx(s, rd, tmp);
  7831. break;
  7832. default:
  7833. case 0x0f:
  7834. tcg_gen_not_i32(tmp2, tmp2);
  7835. if (logic_cc) {
  7836. gen_logic_CC(tmp2);
  7837. }
  7838. store_reg_bx(s, rd, tmp2);
  7839. break;
  7840. }
  7841. if (op1 != 0x0f && op1 != 0x0d) {
  7842. tcg_temp_free_i32(tmp2);
  7843. }
  7844. } else {
  7845. /* other instructions */
  7846. op1 = (insn >> 24) & 0xf;
  7847. switch(op1) {
  7848. case 0x0:
  7849. case 0x1:
  7850. /* multiplies, extra load/stores */
  7851. sh = (insn >> 5) & 3;
  7852. if (sh == 0) {
  7853. if (op1 == 0x0) {
  7854. rd = (insn >> 16) & 0xf;
  7855. rn = (insn >> 12) & 0xf;
  7856. rs = (insn >> 8) & 0xf;
  7857. rm = (insn) & 0xf;
  7858. op1 = (insn >> 20) & 0xf;
  7859. switch (op1) {
  7860. case 0: case 1: case 2: case 3: case 6:
  7861. /* 32 bit mul */
  7862. tmp = load_reg(s, rs);
  7863. tmp2 = load_reg(s, rm);
  7864. tcg_gen_mul_i32(tmp, tmp, tmp2);
  7865. tcg_temp_free_i32(tmp2);
  7866. if (insn & (1 << 22)) {
  7867. /* Subtract (mls) */
  7868. ARCH(6T2);
  7869. tmp2 = load_reg(s, rn);
  7870. tcg_gen_sub_i32(tmp, tmp2, tmp);
  7871. tcg_temp_free_i32(tmp2);
  7872. } else if (insn & (1 << 21)) {
  7873. /* Add */
  7874. tmp2 = load_reg(s, rn);
  7875. tcg_gen_add_i32(tmp, tmp, tmp2);
  7876. tcg_temp_free_i32(tmp2);
  7877. }
  7878. if (insn & (1 << 20))
  7879. gen_logic_CC(tmp);
  7880. store_reg(s, rd, tmp);
  7881. break;
  7882. case 4:
  7883. /* 64 bit mul double accumulate (UMAAL) */
  7884. ARCH(6);
  7885. tmp = load_reg(s, rs);
  7886. tmp2 = load_reg(s, rm);
  7887. tmp64 = gen_mulu_i64_i32(tmp, tmp2);
  7888. gen_addq_lo(s, tmp64, rn);
  7889. gen_addq_lo(s, tmp64, rd);
  7890. gen_storeq_reg(s, rn, rd, tmp64);
  7891. tcg_temp_free_i64(tmp64);
  7892. break;
  7893. case 8: case 9: case 10: case 11:
  7894. case 12: case 13: case 14: case 15:
  7895. /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
  7896. tmp = load_reg(s, rs);
  7897. tmp2 = load_reg(s, rm);
  7898. if (insn & (1 << 22)) {
  7899. tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
  7900. } else {
  7901. tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
  7902. }
  7903. if (insn & (1 << 21)) { /* mult accumulate */
  7904. TCGv_i32 al = load_reg(s, rn);
  7905. TCGv_i32 ah = load_reg(s, rd);
  7906. tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
  7907. tcg_temp_free_i32(al);
  7908. tcg_temp_free_i32(ah);
  7909. }
  7910. if (insn & (1 << 20)) {
  7911. gen_logicq_cc(tmp, tmp2);
  7912. }
  7913. store_reg(s, rn, tmp);
  7914. store_reg(s, rd, tmp2);
  7915. break;
  7916. default:
  7917. goto illegal_op;
  7918. }
  7919. } else {
  7920. rn = (insn >> 16) & 0xf;
  7921. rd = (insn >> 12) & 0xf;
  7922. if (insn & (1 << 23)) {
  7923. /* load/store exclusive */
  7924. int op2 = (insn >> 8) & 3;
  7925. op1 = (insn >> 21) & 0x3;
  7926. switch (op2) {
  7927. case 0: /* lda/stl */
  7928. if (op1 == 1) {
  7929. goto illegal_op;
  7930. }
  7931. ARCH(8);
  7932. break;
  7933. case 1: /* reserved */
  7934. goto illegal_op;
  7935. case 2: /* ldaex/stlex */
  7936. ARCH(8);
  7937. break;
  7938. case 3: /* ldrex/strex */
  7939. if (op1) {
  7940. ARCH(6K);
  7941. } else {
  7942. ARCH(6);
  7943. }
  7944. break;
  7945. }
  7946. addr = tcg_temp_local_new_i32();
  7947. load_reg_var(s, addr, rn);
  7948. /* Since the emulation does not have barriers,
  7949. the acquire/release semantics need no special
  7950. handling */
  7951. if (op2 == 0) {
  7952. if (insn & (1 << 20)) {
  7953. tmp = tcg_temp_new_i32();
  7954. switch (op1) {
  7955. case 0: /* lda */
  7956. gen_aa32_ld32u(tmp, addr, get_mem_index(s));
  7957. break;
  7958. case 2: /* ldab */
  7959. gen_aa32_ld8u(tmp, addr, get_mem_index(s));
  7960. break;
  7961. case 3: /* ldah */
  7962. gen_aa32_ld16u(tmp, addr, get_mem_index(s));
  7963. break;
  7964. default:
  7965. abort();
  7966. }
  7967. store_reg(s, rd, tmp);
  7968. } else {
  7969. rm = insn & 0xf;
  7970. tmp = load_reg(s, rm);
  7971. switch (op1) {
  7972. case 0: /* stl */
  7973. gen_aa32_st32(tmp, addr, get_mem_index(s));
  7974. break;
  7975. case 2: /* stlb */
  7976. gen_aa32_st8(tmp, addr, get_mem_index(s));
  7977. break;
  7978. case 3: /* stlh */
  7979. gen_aa32_st16(tmp, addr, get_mem_index(s));
  7980. break;
  7981. default:
  7982. abort();
  7983. }
  7984. tcg_temp_free_i32(tmp);
  7985. }
  7986. } else if (insn & (1 << 20)) {
  7987. switch (op1) {
  7988. case 0: /* ldrex */
  7989. gen_load_exclusive(s, rd, 15, addr, 2);
  7990. break;
  7991. case 1: /* ldrexd */
  7992. gen_load_exclusive(s, rd, rd + 1, addr, 3);
  7993. break;
  7994. case 2: /* ldrexb */
  7995. gen_load_exclusive(s, rd, 15, addr, 0);
  7996. break;
  7997. case 3: /* ldrexh */
  7998. gen_load_exclusive(s, rd, 15, addr, 1);
  7999. break;
  8000. default:
  8001. abort();
  8002. }
  8003. } else {
  8004. rm = insn & 0xf;
  8005. switch (op1) {
  8006. case 0: /* strex */
  8007. gen_store_exclusive(s, rd, rm, 15, addr, 2);
  8008. break;
  8009. case 1: /* strexd */
  8010. gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
  8011. break;
  8012. case 2: /* strexb */
  8013. gen_store_exclusive(s, rd, rm, 15, addr, 0);
  8014. break;
  8015. case 3: /* strexh */
  8016. gen_store_exclusive(s, rd, rm, 15, addr, 1);
  8017. break;
  8018. default:
  8019. abort();
  8020. }
  8021. }
  8022. tcg_temp_free_i32(addr);
  8023. } else {
  8024. /* SWP instruction */
  8025. rm = (insn) & 0xf;
  8026. /* ??? This is not really atomic. However we know
  8027. we never have multiple CPUs running in parallel,
  8028. so it is good enough. */
  8029. addr = load_reg(s, rn);
  8030. tmp = load_reg(s, rm);
  8031. tmp2 = tcg_temp_new_i32();
  8032. if (insn & (1 << 22)) {
  8033. gen_aa32_ld8u(tmp2, addr, get_mem_index(s));
  8034. gen_aa32_st8(tmp, addr, get_mem_index(s));
  8035. } else {
  8036. gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
  8037. gen_aa32_st32(tmp, addr, get_mem_index(s));
  8038. }
  8039. tcg_temp_free_i32(tmp);
  8040. tcg_temp_free_i32(addr);
  8041. store_reg(s, rd, tmp2);
  8042. }
  8043. }
  8044. } else {
  8045. int address_offset;
  8046. bool load = insn & (1 << 20);
  8047. bool doubleword = false;
  8048. /* Misc load/store */
  8049. rn = (insn >> 16) & 0xf;
  8050. rd = (insn >> 12) & 0xf;
  8051. if (!load && (sh & 2)) {
  8052. /* doubleword */
  8053. ARCH(5TE);
  8054. if (rd & 1) {
  8055. /* UNPREDICTABLE; we choose to UNDEF */
  8056. goto illegal_op;
  8057. }
  8058. load = (sh & 1) == 0;
  8059. doubleword = true;
  8060. }
  8061. addr = load_reg(s, rn);
  8062. if (insn & (1 << 24))
  8063. gen_add_datah_offset(s, insn, 0, addr);
  8064. address_offset = 0;
  8065. if (doubleword) {
  8066. if (!load) {
  8067. /* store */
  8068. tmp = load_reg(s, rd);
  8069. gen_aa32_st32(tmp, addr, get_mem_index(s));
  8070. tcg_temp_free_i32(tmp);
  8071. tcg_gen_addi_i32(addr, addr, 4);
  8072. tmp = load_reg(s, rd + 1);
  8073. gen_aa32_st32(tmp, addr, get_mem_index(s));
  8074. tcg_temp_free_i32(tmp);
  8075. } else {
  8076. /* load */
  8077. tmp = tcg_temp_new_i32();
  8078. gen_aa32_ld32u(tmp, addr, get_mem_index(s));
  8079. store_reg(s, rd, tmp);
  8080. tcg_gen_addi_i32(addr, addr, 4);
  8081. tmp = tcg_temp_new_i32();
  8082. gen_aa32_ld32u(tmp, addr, get_mem_index(s));
  8083. rd++;
  8084. }
  8085. address_offset = -4;
  8086. } else if (load) {
  8087. /* load */
  8088. tmp = tcg_temp_new_i32();
  8089. switch (sh) {
  8090. case 1:
  8091. gen_aa32_ld16u(tmp, addr, get_mem_index(s));
  8092. break;
  8093. case 2:
  8094. gen_aa32_ld8s(tmp, addr, get_mem_index(s));
  8095. break;
  8096. default:
  8097. case 3:
  8098. gen_aa32_ld16s(tmp, addr, get_mem_index(s));
  8099. break;
  8100. }
  8101. } else {
  8102. /* store */
  8103. tmp = load_reg(s, rd);
  8104. gen_aa32_st16(tmp, addr, get_mem_index(s));
  8105. tcg_temp_free_i32(tmp);
  8106. }
  8107. /* Perform base writeback before the loaded value to
  8108. ensure correct behavior with overlapping index registers.
  8109. ldrd with base writeback is undefined if the
  8110. destination and index registers overlap. */
  8111. if (!(insn & (1 << 24))) {
  8112. gen_add_datah_offset(s, insn, address_offset, addr);
  8113. store_reg(s, rn, addr);
  8114. } else if (insn & (1 << 21)) {
  8115. if (address_offset)
  8116. tcg_gen_addi_i32(addr, addr, address_offset);
  8117. store_reg(s, rn, addr);
  8118. } else {
  8119. tcg_temp_free_i32(addr);
  8120. }
  8121. if (load) {
  8122. /* Complete the load. */
  8123. store_reg(s, rd, tmp);
  8124. }
  8125. }
  8126. break;
  8127. case 0x4:
  8128. case 0x5:
  8129. goto do_ldst;
  8130. case 0x6:
  8131. case 0x7:
  8132. if (insn & (1 << 4)) {
  8133. ARCH(6);
  8134. /* Armv6 Media instructions. */
  8135. rm = insn & 0xf;
  8136. rn = (insn >> 16) & 0xf;
  8137. rd = (insn >> 12) & 0xf;
  8138. rs = (insn >> 8) & 0xf;
  8139. switch ((insn >> 23) & 3) {
  8140. case 0: /* Parallel add/subtract. */
  8141. op1 = (insn >> 20) & 7;
  8142. tmp = load_reg(s, rn);
  8143. tmp2 = load_reg(s, rm);
  8144. sh = (insn >> 5) & 7;
  8145. if ((op1 & 3) == 0 || sh == 5 || sh == 6)
  8146. goto illegal_op;
  8147. gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
  8148. tcg_temp_free_i32(tmp2);
  8149. store_reg(s, rd, tmp);
  8150. break;
  8151. case 1:
  8152. if ((insn & 0x00700020) == 0) {
  8153. /* Halfword pack. */
  8154. tmp = load_reg(s, rn);
  8155. tmp2 = load_reg(s, rm);
  8156. shift = (insn >> 7) & 0x1f;
  8157. if (insn & (1 << 6)) {
  8158. /* pkhtb */
  8159. if (shift == 0)
  8160. shift = 31;
  8161. tcg_gen_sari_i32(tmp2, tmp2, shift);
  8162. tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
  8163. tcg_gen_ext16u_i32(tmp2, tmp2);
  8164. } else {
  8165. /* pkhbt */
  8166. if (shift)
  8167. tcg_gen_shli_i32(tmp2, tmp2, shift);
  8168. tcg_gen_ext16u_i32(tmp, tmp);
  8169. tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
  8170. }
  8171. tcg_gen_or_i32(tmp, tmp, tmp2);
  8172. tcg_temp_free_i32(tmp2);
  8173. store_reg(s, rd, tmp);
  8174. } else if ((insn & 0x00200020) == 0x00200000) {
  8175. /* [us]sat */
  8176. tmp = load_reg(s, rm);
  8177. shift = (insn >> 7) & 0x1f;
  8178. if (insn & (1 << 6)) {
  8179. if (shift == 0)
  8180. shift = 31;
  8181. tcg_gen_sari_i32(tmp, tmp, shift);
  8182. } else {
  8183. tcg_gen_shli_i32(tmp, tmp, shift);
  8184. }
  8185. sh = (insn >> 16) & 0x1f;
  8186. tmp2 = tcg_const_i32(sh);
  8187. if (insn & (1 << 22))
  8188. gen_helper_usat(tmp, cpu_env, tmp, tmp2);
  8189. else
  8190. gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
  8191. tcg_temp_free_i32(tmp2);
  8192. store_reg(s, rd, tmp);
  8193. } else if ((insn & 0x00300fe0) == 0x00200f20) {
  8194. /* [us]sat16 */
  8195. tmp = load_reg(s, rm);
  8196. sh = (insn >> 16) & 0x1f;
  8197. tmp2 = tcg_const_i32(sh);
  8198. if (insn & (1 << 22))
  8199. gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
  8200. else
  8201. gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
  8202. tcg_temp_free_i32(tmp2);
  8203. store_reg(s, rd, tmp);
  8204. } else if ((insn & 0x00700fe0) == 0x00000fa0) {
  8205. /* Select bytes. */
  8206. tmp = load_reg(s, rn);
  8207. tmp2 = load_reg(s, rm);
  8208. tmp3 = tcg_temp_new_i32();
  8209. tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
  8210. gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
  8211. tcg_temp_free_i32(tmp3);
  8212. tcg_temp_free_i32(tmp2);
  8213. store_reg(s, rd, tmp);
  8214. } else if ((insn & 0x000003e0) == 0x00000060) {
  8215. tmp = load_reg(s, rm);
  8216. shift = (insn >> 10) & 3;
  8217. /* ??? In many cases it's not necessary to do a
  8218. rotate, a shift is sufficient. */
  8219. if (shift != 0)
  8220. tcg_gen_rotri_i32(tmp, tmp, shift * 8);
  8221. op1 = (insn >> 20) & 7;
  8222. switch (op1) {
  8223. case 0: gen_sxtb16(tmp); break;
  8224. case 2: gen_sxtb(tmp); break;
  8225. case 3: gen_sxth(tmp); break;
  8226. case 4: gen_uxtb16(tmp); break;
  8227. case 6: gen_uxtb(tmp); break;
  8228. case 7: gen_uxth(tmp); break;
  8229. default: goto illegal_op;
  8230. }
  8231. if (rn != 15) {
  8232. tmp2 = load_reg(s, rn);
  8233. if ((op1 & 3) == 0) {
  8234. gen_add16(tmp, tmp2);
  8235. } else {
  8236. tcg_gen_add_i32(tmp, tmp, tmp2);
  8237. tcg_temp_free_i32(tmp2);
  8238. }
  8239. }
  8240. store_reg(s, rd, tmp);
  8241. } else if ((insn & 0x003f0f60) == 0x003f0f20) {
  8242. /* rev */
  8243. tmp = load_reg(s, rm);
  8244. if (insn & (1 << 22)) {
  8245. if (insn & (1 << 7)) {
  8246. gen_revsh(tmp);
  8247. } else {
  8248. ARCH(6T2);
  8249. gen_helper_rbit(tmp, tmp);
  8250. }
  8251. } else {
  8252. if (insn & (1 << 7))
  8253. gen_rev16(tmp);
  8254. else
  8255. tcg_gen_bswap32_i32(tmp, tmp);
  8256. }
  8257. store_reg(s, rd, tmp);
  8258. } else {
  8259. goto illegal_op;
  8260. }
  8261. break;
  8262. case 2: /* Multiplies (Type 3). */
  8263. switch ((insn >> 20) & 0x7) {
  8264. case 5:
  8265. if (((insn >> 6) ^ (insn >> 7)) & 1) {
  8266. /* op2 not 00x or 11x : UNDEF */
  8267. goto illegal_op;
  8268. }
  8269. /* Signed multiply most significant [accumulate].
  8270. (SMMUL, SMMLA, SMMLS) */
  8271. tmp = load_reg(s, rm);
  8272. tmp2 = load_reg(s, rs);
  8273. tmp64 = gen_muls_i64_i32(tmp, tmp2);
  8274. if (rd != 15) {
  8275. tmp = load_reg(s, rd);
  8276. if (insn & (1 << 6)) {
  8277. tmp64 = gen_subq_msw(tmp64, tmp);
  8278. } else {
  8279. tmp64 = gen_addq_msw(tmp64, tmp);
  8280. }
  8281. }
  8282. if (insn & (1 << 5)) {
  8283. tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
  8284. }
  8285. tcg_gen_shri_i64(tmp64, tmp64, 32);
  8286. tmp = tcg_temp_new_i32();
  8287. tcg_gen_extrl_i64_i32(tmp, tmp64);
  8288. tcg_temp_free_i64(tmp64);
  8289. store_reg(s, rn, tmp);
  8290. break;
  8291. case 0:
  8292. case 4:
  8293. /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
  8294. if (insn & (1 << 7)) {
  8295. goto illegal_op;
  8296. }
  8297. tmp = load_reg(s, rm);
  8298. tmp2 = load_reg(s, rs);
  8299. if (insn & (1 << 5))
  8300. gen_swap_half(tmp2);
  8301. gen_smul_dual(tmp, tmp2);
  8302. if (insn & (1 << 22)) {
  8303. /* smlald, smlsld */
  8304. TCGv_i64 tmp64_2;
  8305. tmp64 = tcg_temp_new_i64();
  8306. tmp64_2 = tcg_temp_new_i64();
  8307. tcg_gen_ext_i32_i64(tmp64, tmp);
  8308. tcg_gen_ext_i32_i64(tmp64_2, tmp2);
  8309. tcg_temp_free_i32(tmp);
  8310. tcg_temp_free_i32(tmp2);
  8311. if (insn & (1 << 6)) {
  8312. tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
  8313. } else {
  8314. tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
  8315. }
  8316. tcg_temp_free_i64(tmp64_2);
  8317. gen_addq(s, tmp64, rd, rn);
  8318. gen_storeq_reg(s, rd, rn, tmp64);
  8319. tcg_temp_free_i64(tmp64);
  8320. } else {
  8321. /* smuad, smusd, smlad, smlsd */
  8322. if (insn & (1 << 6)) {
  8323. /* This subtraction cannot overflow. */
  8324. tcg_gen_sub_i32(tmp, tmp, tmp2);
  8325. } else {
  8326. /* This addition cannot overflow 32 bits;
  8327. * however it may overflow considered as a
  8328. * signed operation, in which case we must set
  8329. * the Q flag.
  8330. */
  8331. gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
  8332. }
  8333. tcg_temp_free_i32(tmp2);
  8334. if (rd != 15)
  8335. {
  8336. tmp2 = load_reg(s, rd);
  8337. gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
  8338. tcg_temp_free_i32(tmp2);
  8339. }
  8340. store_reg(s, rn, tmp);
  8341. }
  8342. break;
  8343. case 1:
  8344. case 3:
  8345. /* SDIV, UDIV */
  8346. if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
  8347. goto illegal_op;
  8348. }
  8349. if (((insn >> 5) & 7) || (rd != 15)) {
  8350. goto illegal_op;
  8351. }
  8352. tmp = load_reg(s, rm);
  8353. tmp2 = load_reg(s, rs);
  8354. if (insn & (1 << 21)) {
  8355. gen_helper_udiv(tmp, tmp, tmp2);
  8356. } else {
  8357. gen_helper_sdiv(tmp, tmp, tmp2);
  8358. }
  8359. tcg_temp_free_i32(tmp2);
  8360. store_reg(s, rn, tmp);
  8361. break;
  8362. default:
  8363. goto illegal_op;
  8364. }
  8365. break;
  8366. case 3:
  8367. op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
  8368. switch (op1) {
  8369. case 0: /* Unsigned sum of absolute differences. */
  8370. ARCH(6);
  8371. tmp = load_reg(s, rm);
  8372. tmp2 = load_reg(s, rs);
  8373. gen_helper_usad8(tmp, tmp, tmp2);
  8374. tcg_temp_free_i32(tmp2);
  8375. if (rd != 15) {
  8376. tmp2 = load_reg(s, rd);
  8377. tcg_gen_add_i32(tmp, tmp, tmp2);
  8378. tcg_temp_free_i32(tmp2);
  8379. }
  8380. store_reg(s, rn, tmp);
  8381. break;
  8382. case 0x20: case 0x24: case 0x28: case 0x2c:
  8383. /* Bitfield insert/clear. */
  8384. ARCH(6T2);
  8385. shift = (insn >> 7) & 0x1f;
  8386. i = (insn >> 16) & 0x1f;
  8387. if (i < shift) {
  8388. /* UNPREDICTABLE; we choose to UNDEF */
  8389. goto illegal_op;
  8390. }
  8391. i = i + 1 - shift;
  8392. if (rm == 15) {
  8393. tmp = tcg_temp_new_i32();
  8394. tcg_gen_movi_i32(tmp, 0);
  8395. } else {
  8396. tmp = load_reg(s, rm);
  8397. }
  8398. if (i != 32) {
  8399. tmp2 = load_reg(s, rd);
  8400. tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
  8401. tcg_temp_free_i32(tmp2);
  8402. }
  8403. store_reg(s, rd, tmp);
  8404. break;
  8405. case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
  8406. case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
  8407. ARCH(6T2);
  8408. tmp = load_reg(s, rm);
  8409. shift = (insn >> 7) & 0x1f;
  8410. i = ((insn >> 16) & 0x1f) + 1;
  8411. if (shift + i > 32)
  8412. goto illegal_op;
  8413. if (i < 32) {
  8414. if (op1 & 0x20) {
  8415. gen_ubfx(tmp, shift, (1u << i) - 1);
  8416. } else {
  8417. gen_sbfx(tmp, shift, i);
  8418. }
  8419. }
  8420. store_reg(s, rd, tmp);
  8421. break;
  8422. default:
  8423. goto illegal_op;
  8424. }
  8425. break;
  8426. }
  8427. break;
  8428. }
  8429. do_ldst:
  8430. /* Check for undefined extension instructions
  8431. * per the ARM Bible IE:
  8432. * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
  8433. */
  8434. sh = (0xf << 20) | (0xf << 4);
  8435. if (op1 == 0x7 && ((insn & sh) == sh))
  8436. {
  8437. goto illegal_op;
  8438. }
  8439. /* load/store byte/word */
  8440. rn = (insn >> 16) & 0xf;
  8441. rd = (insn >> 12) & 0xf;
  8442. tmp2 = load_reg(s, rn);
  8443. if ((insn & 0x01200000) == 0x00200000) {
  8444. /* ldrt/strt */
  8445. i = get_a32_user_mem_index(s);
  8446. } else {
  8447. i = get_mem_index(s);
  8448. }
  8449. if (insn & (1 << 24))
  8450. gen_add_data_offset(s, insn, tmp2);
  8451. if (insn & (1 << 20)) {
  8452. /* load */
  8453. tmp = tcg_temp_new_i32();
  8454. if (insn & (1 << 22)) {
  8455. gen_aa32_ld8u(tmp, tmp2, i);
  8456. } else {
  8457. gen_aa32_ld32u(tmp, tmp2, i);
  8458. }
  8459. } else {
  8460. /* store */
  8461. tmp = load_reg(s, rd);
  8462. if (insn & (1 << 22)) {
  8463. gen_aa32_st8(tmp, tmp2, i);
  8464. } else {
  8465. gen_aa32_st32(tmp, tmp2, i);
  8466. }
  8467. tcg_temp_free_i32(tmp);
  8468. }
  8469. if (!(insn & (1 << 24))) {
  8470. gen_add_data_offset(s, insn, tmp2);
  8471. store_reg(s, rn, tmp2);
  8472. } else if (insn & (1 << 21)) {
  8473. store_reg(s, rn, tmp2);
  8474. } else {
  8475. tcg_temp_free_i32(tmp2);
  8476. }
  8477. if (insn & (1 << 20)) {
  8478. /* Complete the load. */
  8479. store_reg_from_load(s, rd, tmp);
  8480. }
  8481. break;
  8482. case 0x08:
  8483. case 0x09:
  8484. {
  8485. int j, n, loaded_base;
  8486. bool exc_return = false;
  8487. bool is_load = extract32(insn, 20, 1);
  8488. bool user = false;
  8489. TCGv_i32 loaded_var;
  8490. /* load/store multiple words */
  8491. /* XXX: store correct base if write back */
  8492. if (insn & (1 << 22)) {
  8493. /* LDM (user), LDM (exception return) and STM (user) */
  8494. if (IS_USER(s))
  8495. goto illegal_op; /* only usable in supervisor mode */
  8496. if (is_load && extract32(insn, 15, 1)) {
  8497. exc_return = true;
  8498. } else {
  8499. user = true;
  8500. }
  8501. }
  8502. rn = (insn >> 16) & 0xf;
  8503. addr = load_reg(s, rn);
  8504. /* compute total size */
  8505. loaded_base = 0;
  8506. TCGV_UNUSED_I32(loaded_var);
  8507. n = 0;
  8508. for(i=0;i<16;i++) {
  8509. if (insn & (1 << i))
  8510. n++;
  8511. }
  8512. /* XXX: test invalid n == 0 case ? */
  8513. if (insn & (1 << 23)) {
  8514. if (insn & (1 << 24)) {
  8515. /* pre increment */
  8516. tcg_gen_addi_i32(addr, addr, 4);
  8517. } else {
  8518. /* post increment */
  8519. }
  8520. } else {
  8521. if (insn & (1 << 24)) {
  8522. /* pre decrement */
  8523. tcg_gen_addi_i32(addr, addr, -(n * 4));
  8524. } else {
  8525. /* post decrement */
  8526. if (n != 1)
  8527. tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
  8528. }
  8529. }
  8530. j = 0;
  8531. for(i=0;i<16;i++) {
  8532. if (insn & (1 << i)) {
  8533. if (is_load) {
  8534. /* load */
  8535. tmp = tcg_temp_new_i32();
  8536. gen_aa32_ld32u(tmp, addr, get_mem_index(s));
  8537. if (user) {
  8538. tmp2 = tcg_const_i32(i);
  8539. gen_helper_set_user_reg(cpu_env, tmp2, tmp);
  8540. tcg_temp_free_i32(tmp2);
  8541. tcg_temp_free_i32(tmp);
  8542. } else if (i == rn) {
  8543. loaded_var = tmp;
  8544. loaded_base = 1;
  8545. } else {
  8546. store_reg_from_load(s, i, tmp);
  8547. }
  8548. } else {
  8549. /* store */
  8550. if (i == 15) {
  8551. /* special case: r15 = PC + 8 */
  8552. val = (long)s->pc + 4;
  8553. tmp = tcg_temp_new_i32();
  8554. tcg_gen_movi_i32(tmp, val);
  8555. } else if (user) {
  8556. tmp = tcg_temp_new_i32();
  8557. tmp2 = tcg_const_i32(i);
  8558. gen_helper_get_user_reg(tmp, cpu_env, tmp2);
  8559. tcg_temp_free_i32(tmp2);
  8560. } else {
  8561. tmp = load_reg(s, i);
  8562. }
  8563. gen_aa32_st32(tmp, addr, get_mem_index(s));
  8564. tcg_temp_free_i32(tmp);
  8565. }
  8566. j++;
  8567. /* no need to add after the last transfer */
  8568. if (j != n)
  8569. tcg_gen_addi_i32(addr, addr, 4);
  8570. }
  8571. }
  8572. if (insn & (1 << 21)) {
  8573. /* write back */
  8574. if (insn & (1 << 23)) {
  8575. if (insn & (1 << 24)) {
  8576. /* pre increment */
  8577. } else {
  8578. /* post increment */
  8579. tcg_gen_addi_i32(addr, addr, 4);
  8580. }
  8581. } else {
  8582. if (insn & (1 << 24)) {
  8583. /* pre decrement */
  8584. if (n != 1)
  8585. tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
  8586. } else {
  8587. /* post decrement */
  8588. tcg_gen_addi_i32(addr, addr, -(n * 4));
  8589. }
  8590. }
  8591. store_reg(s, rn, addr);
  8592. } else {
  8593. tcg_temp_free_i32(addr);
  8594. }
  8595. if (loaded_base) {
  8596. store_reg(s, rn, loaded_var);
  8597. }
  8598. if (exc_return) {
  8599. /* Restore CPSR from SPSR. */
  8600. tmp = load_cpu_field(spsr);
  8601. gen_set_cpsr(tmp, CPSR_ERET_MASK);
  8602. tcg_temp_free_i32(tmp);
  8603. s->is_jmp = DISAS_UPDATE;
  8604. }
  8605. }
  8606. break;
  8607. case 0xa:
  8608. case 0xb:
  8609. {
  8610. int32_t offset;
  8611. /* branch (and link) */
  8612. val = (int32_t)s->pc;
  8613. if (insn & (1 << 24)) {
  8614. tmp = tcg_temp_new_i32();
  8615. tcg_gen_movi_i32(tmp, val);
  8616. store_reg(s, 14, tmp);
  8617. }
  8618. offset = sextract32(insn << 2, 0, 26);
  8619. val += offset + 4;
  8620. gen_jmp(s, val);
  8621. }
  8622. break;
  8623. case 0xc:
  8624. case 0xd:
  8625. case 0xe:
  8626. if (((insn >> 8) & 0xe) == 10) {
  8627. /* VFP. */
  8628. if (disas_vfp_insn(s, insn)) {
  8629. goto illegal_op;
  8630. }
  8631. } else if (disas_coproc_insn(s, insn)) {
  8632. /* Coprocessor. */
  8633. goto illegal_op;
  8634. }
  8635. break;
  8636. case 0xf:
  8637. /* swi */
  8638. gen_set_pc_im(s, s->pc);
  8639. s->svc_imm = extract32(insn, 0, 24);
  8640. s->is_jmp = DISAS_SWI;
  8641. break;
  8642. default:
  8643. illegal_op:
  8644. gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
  8645. default_exception_el(s));
  8646. break;
  8647. }
  8648. }
  8649. }
  8650. /* Return true if this is a Thumb-2 logical op. */
  8651. static int
  8652. thumb2_logic_op(int op)
  8653. {
  8654. return (op < 8);
  8655. }
  8656. /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
  8657. then set condition code flags based on the result of the operation.
  8658. If SHIFTER_OUT is nonzero then set the carry flag for logical operations
  8659. to the high bit of T1.
  8660. Returns zero if the opcode is valid. */
  8661. static int
  8662. gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
  8663. TCGv_i32 t0, TCGv_i32 t1)
  8664. {
  8665. int logic_cc;
  8666. logic_cc = 0;
  8667. switch (op) {
  8668. case 0: /* and */
  8669. tcg_gen_and_i32(t0, t0, t1);
  8670. logic_cc = conds;
  8671. break;
  8672. case 1: /* bic */
  8673. tcg_gen_andc_i32(t0, t0, t1);
  8674. logic_cc = conds;
  8675. break;
  8676. case 2: /* orr */
  8677. tcg_gen_or_i32(t0, t0, t1);
  8678. logic_cc = conds;
  8679. break;
  8680. case 3: /* orn */
  8681. tcg_gen_orc_i32(t0, t0, t1);
  8682. logic_cc = conds;
  8683. break;
  8684. case 4: /* eor */
  8685. tcg_gen_xor_i32(t0, t0, t1);
  8686. logic_cc = conds;
  8687. break;
  8688. case 8: /* add */
  8689. if (conds)
  8690. gen_add_CC(t0, t0, t1);
  8691. else
  8692. tcg_gen_add_i32(t0, t0, t1);
  8693. break;
  8694. case 10: /* adc */
  8695. if (conds)
  8696. gen_adc_CC(t0, t0, t1);
  8697. else
  8698. gen_adc(t0, t1);
  8699. break;
  8700. case 11: /* sbc */
  8701. if (conds) {
  8702. gen_sbc_CC(t0, t0, t1);
  8703. } else {
  8704. gen_sub_carry(t0, t0, t1);
  8705. }
  8706. break;
  8707. case 13: /* sub */
  8708. if (conds)
  8709. gen_sub_CC(t0, t0, t1);
  8710. else
  8711. tcg_gen_sub_i32(t0, t0, t1);
  8712. break;
  8713. case 14: /* rsb */
  8714. if (conds)
  8715. gen_sub_CC(t0, t1, t0);
  8716. else
  8717. tcg_gen_sub_i32(t0, t1, t0);
  8718. break;
  8719. default: /* 5, 6, 7, 9, 12, 15. */
  8720. return 1;
  8721. }
  8722. if (logic_cc) {
  8723. gen_logic_CC(t0);
  8724. if (shifter_out)
  8725. gen_set_CF_bit31(t1);
  8726. }
  8727. return 0;
  8728. }
  8729. /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
  8730. is not legal. */
  8731. static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
  8732. {
  8733. uint32_t insn, imm, shift, offset;
  8734. uint32_t rd, rn, rm, rs;
  8735. TCGv_i32 tmp;
  8736. TCGv_i32 tmp2;
  8737. TCGv_i32 tmp3;
  8738. TCGv_i32 addr;
  8739. TCGv_i64 tmp64;
  8740. int op;
  8741. int shiftop;
  8742. int conds;
  8743. int logic_cc;
  8744. if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
  8745. || arm_dc_feature(s, ARM_FEATURE_M))) {
  8746. /* Thumb-1 cores may need to treat bl and blx as a pair of
  8747. 16-bit instructions to get correct prefetch abort behavior. */
  8748. insn = insn_hw1;
  8749. if ((insn & (1 << 12)) == 0) {
  8750. ARCH(5);
  8751. /* Second half of blx. */
  8752. offset = ((insn & 0x7ff) << 1);
  8753. tmp = load_reg(s, 14);
  8754. tcg_gen_addi_i32(tmp, tmp, offset);
  8755. tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
  8756. tmp2 = tcg_temp_new_i32();
  8757. tcg_gen_movi_i32(tmp2, s->pc | 1);
  8758. store_reg(s, 14, tmp2);
  8759. gen_bx(s, tmp);
  8760. return 0;
  8761. }
  8762. if (insn & (1 << 11)) {
  8763. /* Second half of bl. */
  8764. offset = ((insn & 0x7ff) << 1) | 1;
  8765. tmp = load_reg(s, 14);
  8766. tcg_gen_addi_i32(tmp, tmp, offset);
  8767. tmp2 = tcg_temp_new_i32();
  8768. tcg_gen_movi_i32(tmp2, s->pc | 1);
  8769. store_reg(s, 14, tmp2);
  8770. gen_bx(s, tmp);
  8771. return 0;
  8772. }
  8773. if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
  8774. /* Instruction spans a page boundary. Implement it as two
  8775. 16-bit instructions in case the second half causes an
  8776. prefetch abort. */
  8777. offset = ((int32_t)insn << 21) >> 9;
  8778. tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
  8779. return 0;
  8780. }
  8781. /* Fall through to 32-bit decode. */
  8782. }
  8783. insn = arm_lduw_code(env, s->pc, s->bswap_code);
  8784. s->pc += 2;
  8785. insn |= (uint32_t)insn_hw1 << 16;
  8786. if ((insn & 0xf800e800) != 0xf000e800) {
  8787. ARCH(6T2);
  8788. }
  8789. rn = (insn >> 16) & 0xf;
  8790. rs = (insn >> 12) & 0xf;
  8791. rd = (insn >> 8) & 0xf;
  8792. rm = insn & 0xf;
  8793. switch ((insn >> 25) & 0xf) {
  8794. case 0: case 1: case 2: case 3:
  8795. /* 16-bit instructions. Should never happen. */
  8796. abort();
  8797. case 4:
  8798. if (insn & (1 << 22)) {
  8799. /* Other load/store, table branch. */
  8800. if (insn & 0x01200000) {
  8801. /* Load/store doubleword. */
  8802. if (rn == 15) {
  8803. addr = tcg_temp_new_i32();
  8804. tcg_gen_movi_i32(addr, s->pc & ~3);
  8805. } else {
  8806. addr = load_reg(s, rn);
  8807. }
  8808. offset = (insn & 0xff) * 4;
  8809. if ((insn & (1 << 23)) == 0)
  8810. offset = -offset;
  8811. if (insn & (1 << 24)) {
  8812. tcg_gen_addi_i32(addr, addr, offset);
  8813. offset = 0;
  8814. }
  8815. if (insn & (1 << 20)) {
  8816. /* ldrd */
  8817. tmp = tcg_temp_new_i32();
  8818. gen_aa32_ld32u(tmp, addr, get_mem_index(s));
  8819. store_reg(s, rs, tmp);
  8820. tcg_gen_addi_i32(addr, addr, 4);
  8821. tmp = tcg_temp_new_i32();
  8822. gen_aa32_ld32u(tmp, addr, get_mem_index(s));
  8823. store_reg(s, rd, tmp);
  8824. } else {
  8825. /* strd */
  8826. tmp = load_reg(s, rs);
  8827. gen_aa32_st32(tmp, addr, get_mem_index(s));
  8828. tcg_temp_free_i32(tmp);
  8829. tcg_gen_addi_i32(addr, addr, 4);
  8830. tmp = load_reg(s, rd);
  8831. gen_aa32_st32(tmp, addr, get_mem_index(s));
  8832. tcg_temp_free_i32(tmp);
  8833. }
  8834. if (insn & (1 << 21)) {
  8835. /* Base writeback. */
  8836. if (rn == 15)
  8837. goto illegal_op;
  8838. tcg_gen_addi_i32(addr, addr, offset - 4);
  8839. store_reg(s, rn, addr);
  8840. } else {
  8841. tcg_temp_free_i32(addr);
  8842. }
  8843. } else if ((insn & (1 << 23)) == 0) {
  8844. /* Load/store exclusive word. */
  8845. addr = tcg_temp_local_new_i32();
  8846. load_reg_var(s, addr, rn);
  8847. tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
  8848. if (insn & (1 << 20)) {
  8849. gen_load_exclusive(s, rs, 15, addr, 2);
  8850. } else {
  8851. gen_store_exclusive(s, rd, rs, 15, addr, 2);
  8852. }
  8853. tcg_temp_free_i32(addr);
  8854. } else if ((insn & (7 << 5)) == 0) {
  8855. /* Table Branch. */
  8856. if (rn == 15) {
  8857. addr = tcg_temp_new_i32();
  8858. tcg_gen_movi_i32(addr, s->pc);
  8859. } else {
  8860. addr = load_reg(s, rn);
  8861. }
  8862. tmp = load_reg(s, rm);
  8863. tcg_gen_add_i32(addr, addr, tmp);
  8864. if (insn & (1 << 4)) {
  8865. /* tbh */
  8866. tcg_gen_add_i32(addr, addr, tmp);
  8867. tcg_temp_free_i32(tmp);
  8868. tmp = tcg_temp_new_i32();
  8869. gen_aa32_ld16u(tmp, addr, get_mem_index(s));
  8870. } else { /* tbb */
  8871. tcg_temp_free_i32(tmp);
  8872. tmp = tcg_temp_new_i32();
  8873. gen_aa32_ld8u(tmp, addr, get_mem_index(s));
  8874. }
  8875. tcg_temp_free_i32(addr);
  8876. tcg_gen_shli_i32(tmp, tmp, 1);
  8877. tcg_gen_addi_i32(tmp, tmp, s->pc);
  8878. store_reg(s, 15, tmp);
  8879. } else {
  8880. int op2 = (insn >> 6) & 0x3;
  8881. op = (insn >> 4) & 0x3;
  8882. switch (op2) {
  8883. case 0:
  8884. goto illegal_op;
  8885. case 1:
  8886. /* Load/store exclusive byte/halfword/doubleword */
  8887. if (op == 2) {
  8888. goto illegal_op;
  8889. }
  8890. ARCH(7);
  8891. break;
  8892. case 2:
  8893. /* Load-acquire/store-release */
  8894. if (op == 3) {
  8895. goto illegal_op;
  8896. }
  8897. /* Fall through */
  8898. case 3:
  8899. /* Load-acquire/store-release exclusive */
  8900. ARCH(8);
  8901. break;
  8902. }
  8903. addr = tcg_temp_local_new_i32();
  8904. load_reg_var(s, addr, rn);
  8905. if (!(op2 & 1)) {
  8906. if (insn & (1 << 20)) {
  8907. tmp = tcg_temp_new_i32();
  8908. switch (op) {
  8909. case 0: /* ldab */
  8910. gen_aa32_ld8u(tmp, addr, get_mem_index(s));
  8911. break;
  8912. case 1: /* ldah */
  8913. gen_aa32_ld16u(tmp, addr, get_mem_index(s));
  8914. break;
  8915. case 2: /* lda */
  8916. gen_aa32_ld32u(tmp, addr, get_mem_index(s));
  8917. break;
  8918. default:
  8919. abort();
  8920. }
  8921. store_reg(s, rs, tmp);
  8922. } else {
  8923. tmp = load_reg(s, rs);
  8924. switch (op) {
  8925. case 0: /* stlb */
  8926. gen_aa32_st8(tmp, addr, get_mem_index(s));
  8927. break;
  8928. case 1: /* stlh */
  8929. gen_aa32_st16(tmp, addr, get_mem_index(s));
  8930. break;
  8931. case 2: /* stl */
  8932. gen_aa32_st32(tmp, addr, get_mem_index(s));
  8933. break;
  8934. default:
  8935. abort();
  8936. }
  8937. tcg_temp_free_i32(tmp);
  8938. }
  8939. } else if (insn & (1 << 20)) {
  8940. gen_load_exclusive(s, rs, rd, addr, op);
  8941. } else {
  8942. gen_store_exclusive(s, rm, rs, rd, addr, op);
  8943. }
  8944. tcg_temp_free_i32(addr);
  8945. }
  8946. } else {
  8947. /* Load/store multiple, RFE, SRS. */
  8948. if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
  8949. /* RFE, SRS: not available in user mode or on M profile */
  8950. if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
  8951. goto illegal_op;
  8952. }
  8953. if (insn & (1 << 20)) {
  8954. /* rfe */
  8955. addr = load_reg(s, rn);
  8956. if ((insn & (1 << 24)) == 0)
  8957. tcg_gen_addi_i32(addr, addr, -8);
  8958. /* Load PC into tmp and CPSR into tmp2. */
  8959. tmp = tcg_temp_new_i32();
  8960. gen_aa32_ld32u(tmp, addr, get_mem_index(s));
  8961. tcg_gen_addi_i32(addr, addr, 4);
  8962. tmp2 = tcg_temp_new_i32();
  8963. gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
  8964. if (insn & (1 << 21)) {
  8965. /* Base writeback. */
  8966. if (insn & (1 << 24)) {
  8967. tcg_gen_addi_i32(addr, addr, 4);
  8968. } else {
  8969. tcg_gen_addi_i32(addr, addr, -4);
  8970. }
  8971. store_reg(s, rn, addr);
  8972. } else {
  8973. tcg_temp_free_i32(addr);
  8974. }
  8975. gen_rfe(s, tmp, tmp2);
  8976. } else {
  8977. /* srs */
  8978. gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
  8979. insn & (1 << 21));
  8980. }
  8981. } else {
  8982. int i, loaded_base = 0;
  8983. TCGv_i32 loaded_var;
  8984. /* Load/store multiple. */
  8985. addr = load_reg(s, rn);
  8986. offset = 0;
  8987. for (i = 0; i < 16; i++) {
  8988. if (insn & (1 << i))
  8989. offset += 4;
  8990. }
  8991. if (insn & (1 << 24)) {
  8992. tcg_gen_addi_i32(addr, addr, -offset);
  8993. }
  8994. TCGV_UNUSED_I32(loaded_var);
  8995. for (i = 0; i < 16; i++) {
  8996. if ((insn & (1 << i)) == 0)
  8997. continue;
  8998. if (insn & (1 << 20)) {
  8999. /* Load. */
  9000. tmp = tcg_temp_new_i32();
  9001. gen_aa32_ld32u(tmp, addr, get_mem_index(s));
  9002. if (i == 15) {
  9003. gen_bx(s, tmp);
  9004. } else if (i == rn) {
  9005. loaded_var = tmp;
  9006. loaded_base = 1;
  9007. } else {
  9008. store_reg(s, i, tmp);
  9009. }
  9010. } else {
  9011. /* Store. */
  9012. tmp = load_reg(s, i);
  9013. gen_aa32_st32(tmp, addr, get_mem_index(s));
  9014. tcg_temp_free_i32(tmp);
  9015. }
  9016. tcg_gen_addi_i32(addr, addr, 4);
  9017. }
  9018. if (loaded_base) {
  9019. store_reg(s, rn, loaded_var);
  9020. }
  9021. if (insn & (1 << 21)) {
  9022. /* Base register writeback. */
  9023. if (insn & (1 << 24)) {
  9024. tcg_gen_addi_i32(addr, addr, -offset);
  9025. }
  9026. /* Fault if writeback register is in register list. */
  9027. if (insn & (1 << rn))
  9028. goto illegal_op;
  9029. store_reg(s, rn, addr);
  9030. } else {
  9031. tcg_temp_free_i32(addr);
  9032. }
  9033. }
  9034. }
  9035. break;
  9036. case 5:
  9037. op = (insn >> 21) & 0xf;
  9038. if (op == 6) {
  9039. if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
  9040. goto illegal_op;
  9041. }
  9042. /* Halfword pack. */
  9043. tmp = load_reg(s, rn);
  9044. tmp2 = load_reg(s, rm);
  9045. shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
  9046. if (insn & (1 << 5)) {
  9047. /* pkhtb */
  9048. if (shift == 0)
  9049. shift = 31;
  9050. tcg_gen_sari_i32(tmp2, tmp2, shift);
  9051. tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
  9052. tcg_gen_ext16u_i32(tmp2, tmp2);
  9053. } else {
  9054. /* pkhbt */
  9055. if (shift)
  9056. tcg_gen_shli_i32(tmp2, tmp2, shift);
  9057. tcg_gen_ext16u_i32(tmp, tmp);
  9058. tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
  9059. }
  9060. tcg_gen_or_i32(tmp, tmp, tmp2);
  9061. tcg_temp_free_i32(tmp2);
  9062. store_reg(s, rd, tmp);
  9063. } else {
  9064. /* Data processing register constant shift. */
  9065. if (rn == 15) {
  9066. tmp = tcg_temp_new_i32();
  9067. tcg_gen_movi_i32(tmp, 0);
  9068. } else {
  9069. tmp = load_reg(s, rn);
  9070. }
  9071. tmp2 = load_reg(s, rm);
  9072. shiftop = (insn >> 4) & 3;
  9073. shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
  9074. conds = (insn & (1 << 20)) != 0;
  9075. logic_cc = (conds && thumb2_logic_op(op));
  9076. gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
  9077. if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
  9078. goto illegal_op;
  9079. tcg_temp_free_i32(tmp2);
  9080. if (rd != 15) {
  9081. store_reg(s, rd, tmp);
  9082. } else {
  9083. tcg_temp_free_i32(tmp);
  9084. }
  9085. }
  9086. break;
  9087. case 13: /* Misc data processing. */
  9088. op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
  9089. if (op < 4 && (insn & 0xf000) != 0xf000)
  9090. goto illegal_op;
  9091. switch (op) {
  9092. case 0: /* Register controlled shift. */
  9093. tmp = load_reg(s, rn);
  9094. tmp2 = load_reg(s, rm);
  9095. if ((insn & 0x70) != 0)
  9096. goto illegal_op;
  9097. op = (insn >> 21) & 3;
  9098. logic_cc = (insn & (1 << 20)) != 0;
  9099. gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
  9100. if (logic_cc)
  9101. gen_logic_CC(tmp);
  9102. store_reg_bx(s, rd, tmp);
  9103. break;
  9104. case 1: /* Sign/zero extend. */
  9105. op = (insn >> 20) & 7;
  9106. switch (op) {
  9107. case 0: /* SXTAH, SXTH */
  9108. case 1: /* UXTAH, UXTH */
  9109. case 4: /* SXTAB, SXTB */
  9110. case 5: /* UXTAB, UXTB */
  9111. break;
  9112. case 2: /* SXTAB16, SXTB16 */
  9113. case 3: /* UXTAB16, UXTB16 */
  9114. if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
  9115. goto illegal_op;
  9116. }
  9117. break;
  9118. default:
  9119. goto illegal_op;
  9120. }
  9121. if (rn != 15) {
  9122. if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
  9123. goto illegal_op;
  9124. }
  9125. }
  9126. tmp = load_reg(s, rm);
  9127. shift = (insn >> 4) & 3;
  9128. /* ??? In many cases it's not necessary to do a
  9129. rotate, a shift is sufficient. */
  9130. if (shift != 0)
  9131. tcg_gen_rotri_i32(tmp, tmp, shift * 8);
  9132. op = (insn >> 20) & 7;
  9133. switch (op) {
  9134. case 0: gen_sxth(tmp); break;
  9135. case 1: gen_uxth(tmp); break;
  9136. case 2: gen_sxtb16(tmp); break;
  9137. case 3: gen_uxtb16(tmp); break;
  9138. case 4: gen_sxtb(tmp); break;
  9139. case 5: gen_uxtb(tmp); break;
  9140. default:
  9141. g_assert_not_reached();
  9142. }
  9143. if (rn != 15) {
  9144. tmp2 = load_reg(s, rn);
  9145. if ((op >> 1) == 1) {
  9146. gen_add16(tmp, tmp2);
  9147. } else {
  9148. tcg_gen_add_i32(tmp, tmp, tmp2);
  9149. tcg_temp_free_i32(tmp2);
  9150. }
  9151. }
  9152. store_reg(s, rd, tmp);
  9153. break;
  9154. case 2: /* SIMD add/subtract. */
  9155. if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
  9156. goto illegal_op;
  9157. }
  9158. op = (insn >> 20) & 7;
  9159. shift = (insn >> 4) & 7;
  9160. if ((op & 3) == 3 || (shift & 3) == 3)
  9161. goto illegal_op;
  9162. tmp = load_reg(s, rn);
  9163. tmp2 = load_reg(s, rm);
  9164. gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
  9165. tcg_temp_free_i32(tmp2);
  9166. store_reg(s, rd, tmp);
  9167. break;
  9168. case 3: /* Other data processing. */
  9169. op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
  9170. if (op < 4) {
  9171. /* Saturating add/subtract. */
  9172. if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
  9173. goto illegal_op;
  9174. }
  9175. tmp = load_reg(s, rn);
  9176. tmp2 = load_reg(s, rm);
  9177. if (op & 1)
  9178. gen_helper_double_saturate(tmp, cpu_env, tmp);
  9179. if (op & 2)
  9180. gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
  9181. else
  9182. gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
  9183. tcg_temp_free_i32(tmp2);
  9184. } else {
  9185. switch (op) {
  9186. case 0x0a: /* rbit */
  9187. case 0x08: /* rev */
  9188. case 0x09: /* rev16 */
  9189. case 0x0b: /* revsh */
  9190. case 0x18: /* clz */
  9191. break;
  9192. case 0x10: /* sel */
  9193. if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
  9194. goto illegal_op;
  9195. }
  9196. break;
  9197. case 0x20: /* crc32/crc32c */
  9198. case 0x21:
  9199. case 0x22:
  9200. case 0x28:
  9201. case 0x29:
  9202. case 0x2a:
  9203. if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
  9204. goto illegal_op;
  9205. }
  9206. break;
  9207. default:
  9208. goto illegal_op;
  9209. }
  9210. tmp = load_reg(s, rn);
  9211. switch (op) {
  9212. case 0x0a: /* rbit */
  9213. gen_helper_rbit(tmp, tmp);
  9214. break;
  9215. case 0x08: /* rev */
  9216. tcg_gen_bswap32_i32(tmp, tmp);
  9217. break;
  9218. case 0x09: /* rev16 */
  9219. gen_rev16(tmp);
  9220. break;
  9221. case 0x0b: /* revsh */
  9222. gen_revsh(tmp);
  9223. break;
  9224. case 0x10: /* sel */
  9225. tmp2 = load_reg(s, rm);
  9226. tmp3 = tcg_temp_new_i32();
  9227. tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
  9228. gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
  9229. tcg_temp_free_i32(tmp3);
  9230. tcg_temp_free_i32(tmp2);
  9231. break;
  9232. case 0x18: /* clz */
  9233. gen_helper_clz(tmp, tmp);
  9234. break;
  9235. case 0x20:
  9236. case 0x21:
  9237. case 0x22:
  9238. case 0x28:
  9239. case 0x29:
  9240. case 0x2a:
  9241. {
  9242. /* crc32/crc32c */
  9243. uint32_t sz = op & 0x3;
  9244. uint32_t c = op & 0x8;
  9245. tmp2 = load_reg(s, rm);
  9246. if (sz == 0) {
  9247. tcg_gen_andi_i32(tmp2, tmp2, 0xff);
  9248. } else if (sz == 1) {
  9249. tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
  9250. }
  9251. tmp3 = tcg_const_i32(1 << sz);
  9252. if (c) {
  9253. gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
  9254. } else {
  9255. gen_helper_crc32(tmp, tmp, tmp2, tmp3);
  9256. }
  9257. tcg_temp_free_i32(tmp2);
  9258. tcg_temp_free_i32(tmp3);
  9259. break;
  9260. }
  9261. default:
  9262. g_assert_not_reached();
  9263. }
  9264. }
  9265. store_reg(s, rd, tmp);
  9266. break;
  9267. case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
  9268. switch ((insn >> 20) & 7) {
  9269. case 0: /* 32 x 32 -> 32 */
  9270. case 7: /* Unsigned sum of absolute differences. */
  9271. break;
  9272. case 1: /* 16 x 16 -> 32 */
  9273. case 2: /* Dual multiply add. */
  9274. case 3: /* 32 * 16 -> 32msb */
  9275. case 4: /* Dual multiply subtract. */
  9276. case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
  9277. if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
  9278. goto illegal_op;
  9279. }
  9280. break;
  9281. }
  9282. op = (insn >> 4) & 0xf;
  9283. tmp = load_reg(s, rn);
  9284. tmp2 = load_reg(s, rm);
  9285. switch ((insn >> 20) & 7) {
  9286. case 0: /* 32 x 32 -> 32 */
  9287. tcg_gen_mul_i32(tmp, tmp, tmp2);
  9288. tcg_temp_free_i32(tmp2);
  9289. if (rs != 15) {
  9290. tmp2 = load_reg(s, rs);
  9291. if (op)
  9292. tcg_gen_sub_i32(tmp, tmp2, tmp);
  9293. else
  9294. tcg_gen_add_i32(tmp, tmp, tmp2);
  9295. tcg_temp_free_i32(tmp2);
  9296. }
  9297. break;
  9298. case 1: /* 16 x 16 -> 32 */
  9299. gen_mulxy(tmp, tmp2, op & 2, op & 1);
  9300. tcg_temp_free_i32(tmp2);
  9301. if (rs != 15) {
  9302. tmp2 = load_reg(s, rs);
  9303. gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
  9304. tcg_temp_free_i32(tmp2);
  9305. }
  9306. break;
  9307. case 2: /* Dual multiply add. */
  9308. case 4: /* Dual multiply subtract. */
  9309. if (op)
  9310. gen_swap_half(tmp2);
  9311. gen_smul_dual(tmp, tmp2);
  9312. if (insn & (1 << 22)) {
  9313. /* This subtraction cannot overflow. */
  9314. tcg_gen_sub_i32(tmp, tmp, tmp2);
  9315. } else {
  9316. /* This addition cannot overflow 32 bits;
  9317. * however it may overflow considered as a signed
  9318. * operation, in which case we must set the Q flag.
  9319. */
  9320. gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
  9321. }
  9322. tcg_temp_free_i32(tmp2);
  9323. if (rs != 15)
  9324. {
  9325. tmp2 = load_reg(s, rs);
  9326. gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
  9327. tcg_temp_free_i32(tmp2);
  9328. }
  9329. break;
  9330. case 3: /* 32 * 16 -> 32msb */
  9331. if (op)
  9332. tcg_gen_sari_i32(tmp2, tmp2, 16);
  9333. else
  9334. gen_sxth(tmp2);
  9335. tmp64 = gen_muls_i64_i32(tmp, tmp2);
  9336. tcg_gen_shri_i64(tmp64, tmp64, 16);
  9337. tmp = tcg_temp_new_i32();
  9338. tcg_gen_extrl_i64_i32(tmp, tmp64);
  9339. tcg_temp_free_i64(tmp64);
  9340. if (rs != 15)
  9341. {
  9342. tmp2 = load_reg(s, rs);
  9343. gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
  9344. tcg_temp_free_i32(tmp2);
  9345. }
  9346. break;
  9347. case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
  9348. tmp64 = gen_muls_i64_i32(tmp, tmp2);
  9349. if (rs != 15) {
  9350. tmp = load_reg(s, rs);
  9351. if (insn & (1 << 20)) {
  9352. tmp64 = gen_addq_msw(tmp64, tmp);
  9353. } else {
  9354. tmp64 = gen_subq_msw(tmp64, tmp);
  9355. }
  9356. }
  9357. if (insn & (1 << 4)) {
  9358. tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
  9359. }
  9360. tcg_gen_shri_i64(tmp64, tmp64, 32);
  9361. tmp = tcg_temp_new_i32();
  9362. tcg_gen_extrl_i64_i32(tmp, tmp64);
  9363. tcg_temp_free_i64(tmp64);
  9364. break;
  9365. case 7: /* Unsigned sum of absolute differences. */
  9366. gen_helper_usad8(tmp, tmp, tmp2);
  9367. tcg_temp_free_i32(tmp2);
  9368. if (rs != 15) {
  9369. tmp2 = load_reg(s, rs);
  9370. tcg_gen_add_i32(tmp, tmp, tmp2);
  9371. tcg_temp_free_i32(tmp2);
  9372. }
  9373. break;
  9374. }
  9375. store_reg(s, rd, tmp);
  9376. break;
  9377. case 6: case 7: /* 64-bit multiply, Divide. */
  9378. op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
  9379. tmp = load_reg(s, rn);
  9380. tmp2 = load_reg(s, rm);
  9381. if ((op & 0x50) == 0x10) {
  9382. /* sdiv, udiv */
  9383. if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
  9384. goto illegal_op;
  9385. }
  9386. if (op & 0x20)
  9387. gen_helper_udiv(tmp, tmp, tmp2);
  9388. else
  9389. gen_helper_sdiv(tmp, tmp, tmp2);
  9390. tcg_temp_free_i32(tmp2);
  9391. store_reg(s, rd, tmp);
  9392. } else if ((op & 0xe) == 0xc) {
  9393. /* Dual multiply accumulate long. */
  9394. if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
  9395. tcg_temp_free_i32(tmp);
  9396. tcg_temp_free_i32(tmp2);
  9397. goto illegal_op;
  9398. }
  9399. if (op & 1)
  9400. gen_swap_half(tmp2);
  9401. gen_smul_dual(tmp, tmp2);
  9402. if (op & 0x10) {
  9403. tcg_gen_sub_i32(tmp, tmp, tmp2);
  9404. } else {
  9405. tcg_gen_add_i32(tmp, tmp, tmp2);
  9406. }
  9407. tcg_temp_free_i32(tmp2);
  9408. /* BUGFIX */
  9409. tmp64 = tcg_temp_new_i64();
  9410. tcg_gen_ext_i32_i64(tmp64, tmp);
  9411. tcg_temp_free_i32(tmp);
  9412. gen_addq(s, tmp64, rs, rd);
  9413. gen_storeq_reg(s, rs, rd, tmp64);
  9414. tcg_temp_free_i64(tmp64);
  9415. } else {
  9416. if (op & 0x20) {
  9417. /* Unsigned 64-bit multiply */
  9418. tmp64 = gen_mulu_i64_i32(tmp, tmp2);
  9419. } else {
  9420. if (op & 8) {
  9421. /* smlalxy */
  9422. if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
  9423. tcg_temp_free_i32(tmp2);
  9424. tcg_temp_free_i32(tmp);
  9425. goto illegal_op;
  9426. }
  9427. gen_mulxy(tmp, tmp2, op & 2, op & 1);
  9428. tcg_temp_free_i32(tmp2);
  9429. tmp64 = tcg_temp_new_i64();
  9430. tcg_gen_ext_i32_i64(tmp64, tmp);
  9431. tcg_temp_free_i32(tmp);
  9432. } else {
  9433. /* Signed 64-bit multiply */
  9434. tmp64 = gen_muls_i64_i32(tmp, tmp2);
  9435. }
  9436. }
  9437. if (op & 4) {
  9438. /* umaal */
  9439. if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
  9440. tcg_temp_free_i64(tmp64);
  9441. goto illegal_op;
  9442. }
  9443. gen_addq_lo(s, tmp64, rs);
  9444. gen_addq_lo(s, tmp64, rd);
  9445. } else if (op & 0x40) {
  9446. /* 64-bit accumulate. */
  9447. gen_addq(s, tmp64, rs, rd);
  9448. }
  9449. gen_storeq_reg(s, rs, rd, tmp64);
  9450. tcg_temp_free_i64(tmp64);
  9451. }
  9452. break;
  9453. }
  9454. break;
  9455. case 6: case 7: case 14: case 15:
  9456. /* Coprocessor. */
  9457. if (((insn >> 24) & 3) == 3) {
  9458. /* Translate into the equivalent ARM encoding. */
  9459. insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
  9460. if (disas_neon_data_insn(s, insn)) {
  9461. goto illegal_op;
  9462. }
  9463. } else if (((insn >> 8) & 0xe) == 10) {
  9464. if (disas_vfp_insn(s, insn)) {
  9465. goto illegal_op;
  9466. }
  9467. } else {
  9468. if (insn & (1 << 28))
  9469. goto illegal_op;
  9470. if (disas_coproc_insn(s, insn)) {
  9471. goto illegal_op;
  9472. }
  9473. }
  9474. break;
  9475. case 8: case 9: case 10: case 11:
  9476. if (insn & (1 << 15)) {
  9477. /* Branches, misc control. */
  9478. if (insn & 0x5000) {
  9479. /* Unconditional branch. */
  9480. /* signextend(hw1[10:0]) -> offset[:12]. */
  9481. offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
  9482. /* hw1[10:0] -> offset[11:1]. */
  9483. offset |= (insn & 0x7ff) << 1;
  9484. /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
  9485. offset[24:22] already have the same value because of the
  9486. sign extension above. */
  9487. offset ^= ((~insn) & (1 << 13)) << 10;
  9488. offset ^= ((~insn) & (1 << 11)) << 11;
  9489. if (insn & (1 << 14)) {
  9490. /* Branch and link. */
  9491. tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
  9492. }
  9493. offset += s->pc;
  9494. if (insn & (1 << 12)) {
  9495. /* b/bl */
  9496. gen_jmp(s, offset);
  9497. } else {
  9498. /* blx */
  9499. offset &= ~(uint32_t)2;
  9500. /* thumb2 bx, no need to check */
  9501. gen_bx_im(s, offset);
  9502. }
  9503. } else if (((insn >> 23) & 7) == 7) {
  9504. /* Misc control */
  9505. if (insn & (1 << 13))
  9506. goto illegal_op;
  9507. if (insn & (1 << 26)) {
  9508. if (!(insn & (1 << 20))) {
  9509. /* Hypervisor call (v7) */
  9510. int imm16 = extract32(insn, 16, 4) << 12
  9511. | extract32(insn, 0, 12);
  9512. ARCH(7);
  9513. if (IS_USER(s)) {
  9514. goto illegal_op;
  9515. }
  9516. gen_hvc(s, imm16);
  9517. } else {
  9518. /* Secure monitor call (v6+) */
  9519. ARCH(6K);
  9520. if (IS_USER(s)) {
  9521. goto illegal_op;
  9522. }
  9523. gen_smc(s);
  9524. }
  9525. } else {
  9526. op = (insn >> 20) & 7;
  9527. switch (op) {
  9528. case 0: /* msr cpsr. */
  9529. if (arm_dc_feature(s, ARM_FEATURE_M)) {
  9530. tmp = load_reg(s, rn);
  9531. addr = tcg_const_i32(insn & 0xff);
  9532. gen_helper_v7m_msr(cpu_env, addr, tmp);
  9533. tcg_temp_free_i32(addr);
  9534. tcg_temp_free_i32(tmp);
  9535. gen_lookup_tb(s);
  9536. break;
  9537. }
  9538. /* fall through */
  9539. case 1: /* msr spsr. */
  9540. if (arm_dc_feature(s, ARM_FEATURE_M)) {
  9541. goto illegal_op;
  9542. }
  9543. tmp = load_reg(s, rn);
  9544. if (gen_set_psr(s,
  9545. msr_mask(s, (insn >> 8) & 0xf, op == 1),
  9546. op == 1, tmp))
  9547. goto illegal_op;
  9548. break;
  9549. case 2: /* cps, nop-hint. */
  9550. if (((insn >> 8) & 7) == 0) {
  9551. gen_nop_hint(s, insn & 0xff);
  9552. }
  9553. /* Implemented as NOP in user mode. */
  9554. if (IS_USER(s))
  9555. break;
  9556. offset = 0;
  9557. imm = 0;
  9558. if (insn & (1 << 10)) {
  9559. if (insn & (1 << 7))
  9560. offset |= CPSR_A;
  9561. if (insn & (1 << 6))
  9562. offset |= CPSR_I;
  9563. if (insn & (1 << 5))
  9564. offset |= CPSR_F;
  9565. if (insn & (1 << 9))
  9566. imm = CPSR_A | CPSR_I | CPSR_F;
  9567. }
  9568. if (insn & (1 << 8)) {
  9569. offset |= 0x1f;
  9570. imm |= (insn & 0x1f);
  9571. }
  9572. if (offset) {
  9573. gen_set_psr_im(s, offset, 0, imm);
  9574. }
  9575. break;
  9576. case 3: /* Special control operations. */
  9577. ARCH(7);
  9578. op = (insn >> 4) & 0xf;
  9579. switch (op) {
  9580. case 2: /* clrex */
  9581. gen_clrex(s);
  9582. break;
  9583. case 4: /* dsb */
  9584. case 5: /* dmb */
  9585. case 6: /* isb */
  9586. /* These execute as NOPs. */
  9587. break;
  9588. default:
  9589. goto illegal_op;
  9590. }
  9591. break;
  9592. case 4: /* bxj */
  9593. /* Trivial implementation equivalent to bx. */
  9594. tmp = load_reg(s, rn);
  9595. gen_bx(s, tmp);
  9596. break;
  9597. case 5: /* Exception return. */
  9598. if (IS_USER(s)) {
  9599. goto illegal_op;
  9600. }
  9601. if (rn != 14 || rd != 15) {
  9602. goto illegal_op;
  9603. }
  9604. tmp = load_reg(s, rn);
  9605. tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
  9606. gen_exception_return(s, tmp);
  9607. break;
  9608. case 6: /* mrs cpsr. */
  9609. tmp = tcg_temp_new_i32();
  9610. if (arm_dc_feature(s, ARM_FEATURE_M)) {
  9611. addr = tcg_const_i32(insn & 0xff);
  9612. gen_helper_v7m_mrs(tmp, cpu_env, addr);
  9613. tcg_temp_free_i32(addr);
  9614. } else {
  9615. gen_helper_cpsr_read(tmp, cpu_env);
  9616. }
  9617. store_reg(s, rd, tmp);
  9618. break;
  9619. case 7: /* mrs spsr. */
  9620. /* Not accessible in user mode. */
  9621. if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
  9622. goto illegal_op;
  9623. }
  9624. tmp = load_cpu_field(spsr);
  9625. store_reg(s, rd, tmp);
  9626. break;
  9627. }
  9628. }
  9629. } else {
  9630. /* Conditional branch. */
  9631. op = (insn >> 22) & 0xf;
  9632. /* Generate a conditional jump to next instruction. */
  9633. s->condlabel = gen_new_label();
  9634. arm_gen_test_cc(op ^ 1, s->condlabel);
  9635. s->condjmp = 1;
  9636. /* offset[11:1] = insn[10:0] */
  9637. offset = (insn & 0x7ff) << 1;
  9638. /* offset[17:12] = insn[21:16]. */
  9639. offset |= (insn & 0x003f0000) >> 4;
  9640. /* offset[31:20] = insn[26]. */
  9641. offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
  9642. /* offset[18] = insn[13]. */
  9643. offset |= (insn & (1 << 13)) << 5;
  9644. /* offset[19] = insn[11]. */
  9645. offset |= (insn & (1 << 11)) << 8;
  9646. /* jump to the offset */
  9647. gen_jmp(s, s->pc + offset);
  9648. }
  9649. } else {
  9650. /* Data processing immediate. */
  9651. if (insn & (1 << 25)) {
  9652. if (insn & (1 << 24)) {
  9653. if (insn & (1 << 20))
  9654. goto illegal_op;
  9655. /* Bitfield/Saturate. */
  9656. op = (insn >> 21) & 7;
  9657. imm = insn & 0x1f;
  9658. shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
  9659. if (rn == 15) {
  9660. tmp = tcg_temp_new_i32();
  9661. tcg_gen_movi_i32(tmp, 0);
  9662. } else {
  9663. tmp = load_reg(s, rn);
  9664. }
  9665. switch (op) {
  9666. case 2: /* Signed bitfield extract. */
  9667. imm++;
  9668. if (shift + imm > 32)
  9669. goto illegal_op;
  9670. if (imm < 32)
  9671. gen_sbfx(tmp, shift, imm);
  9672. break;
  9673. case 6: /* Unsigned bitfield extract. */
  9674. imm++;
  9675. if (shift + imm > 32)
  9676. goto illegal_op;
  9677. if (imm < 32)
  9678. gen_ubfx(tmp, shift, (1u << imm) - 1);
  9679. break;
  9680. case 3: /* Bitfield insert/clear. */
  9681. if (imm < shift)
  9682. goto illegal_op;
  9683. imm = imm + 1 - shift;
  9684. if (imm != 32) {
  9685. tmp2 = load_reg(s, rd);
  9686. tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
  9687. tcg_temp_free_i32(tmp2);
  9688. }
  9689. break;
  9690. case 7:
  9691. goto illegal_op;
  9692. default: /* Saturate. */
  9693. if (shift) {
  9694. if (op & 1)
  9695. tcg_gen_sari_i32(tmp, tmp, shift);
  9696. else
  9697. tcg_gen_shli_i32(tmp, tmp, shift);
  9698. }
  9699. tmp2 = tcg_const_i32(imm);
  9700. if (op & 4) {
  9701. /* Unsigned. */
  9702. if ((op & 1) && shift == 0) {
  9703. if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
  9704. tcg_temp_free_i32(tmp);
  9705. tcg_temp_free_i32(tmp2);
  9706. goto illegal_op;
  9707. }
  9708. gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
  9709. } else {
  9710. gen_helper_usat(tmp, cpu_env, tmp, tmp2);
  9711. }
  9712. } else {
  9713. /* Signed. */
  9714. if ((op & 1) && shift == 0) {
  9715. if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
  9716. tcg_temp_free_i32(tmp);
  9717. tcg_temp_free_i32(tmp2);
  9718. goto illegal_op;
  9719. }
  9720. gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
  9721. } else {
  9722. gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
  9723. }
  9724. }
  9725. tcg_temp_free_i32(tmp2);
  9726. break;
  9727. }
  9728. store_reg(s, rd, tmp);
  9729. } else {
  9730. imm = ((insn & 0x04000000) >> 15)
  9731. | ((insn & 0x7000) >> 4) | (insn & 0xff);
  9732. if (insn & (1 << 22)) {
  9733. /* 16-bit immediate. */
  9734. imm |= (insn >> 4) & 0xf000;
  9735. if (insn & (1 << 23)) {
  9736. /* movt */
  9737. tmp = load_reg(s, rd);
  9738. tcg_gen_ext16u_i32(tmp, tmp);
  9739. tcg_gen_ori_i32(tmp, tmp, imm << 16);
  9740. } else {
  9741. /* movw */
  9742. tmp = tcg_temp_new_i32();
  9743. tcg_gen_movi_i32(tmp, imm);
  9744. }
  9745. } else {
  9746. /* Add/sub 12-bit immediate. */
  9747. if (rn == 15) {
  9748. offset = s->pc & ~(uint32_t)3;
  9749. if (insn & (1 << 23))
  9750. offset -= imm;
  9751. else
  9752. offset += imm;
  9753. tmp = tcg_temp_new_i32();
  9754. tcg_gen_movi_i32(tmp, offset);
  9755. } else {
  9756. tmp = load_reg(s, rn);
  9757. if (insn & (1 << 23))
  9758. tcg_gen_subi_i32(tmp, tmp, imm);
  9759. else
  9760. tcg_gen_addi_i32(tmp, tmp, imm);
  9761. }
  9762. }
  9763. store_reg(s, rd, tmp);
  9764. }
  9765. } else {
  9766. int shifter_out = 0;
  9767. /* modified 12-bit immediate. */
  9768. shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
  9769. imm = (insn & 0xff);
  9770. switch (shift) {
  9771. case 0: /* XY */
  9772. /* Nothing to do. */
  9773. break;
  9774. case 1: /* 00XY00XY */
  9775. imm |= imm << 16;
  9776. break;
  9777. case 2: /* XY00XY00 */
  9778. imm |= imm << 16;
  9779. imm <<= 8;
  9780. break;
  9781. case 3: /* XYXYXYXY */
  9782. imm |= imm << 16;
  9783. imm |= imm << 8;
  9784. break;
  9785. default: /* Rotated constant. */
  9786. shift = (shift << 1) | (imm >> 7);
  9787. imm |= 0x80;
  9788. imm = imm << (32 - shift);
  9789. shifter_out = 1;
  9790. break;
  9791. }
  9792. tmp2 = tcg_temp_new_i32();
  9793. tcg_gen_movi_i32(tmp2, imm);
  9794. rn = (insn >> 16) & 0xf;
  9795. if (rn == 15) {
  9796. tmp = tcg_temp_new_i32();
  9797. tcg_gen_movi_i32(tmp, 0);
  9798. } else {
  9799. tmp = load_reg(s, rn);
  9800. }
  9801. op = (insn >> 21) & 0xf;
  9802. if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
  9803. shifter_out, tmp, tmp2))
  9804. goto illegal_op;
  9805. tcg_temp_free_i32(tmp2);
  9806. rd = (insn >> 8) & 0xf;
  9807. if (rd != 15) {
  9808. store_reg(s, rd, tmp);
  9809. } else {
  9810. tcg_temp_free_i32(tmp);
  9811. }
  9812. }
  9813. }
  9814. break;
  9815. case 12: /* Load/store single data item. */
  9816. {
  9817. int postinc = 0;
  9818. int writeback = 0;
  9819. int memidx;
  9820. if ((insn & 0x01100000) == 0x01000000) {
  9821. if (disas_neon_ls_insn(s, insn)) {
  9822. goto illegal_op;
  9823. }
  9824. break;
  9825. }
  9826. op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
  9827. if (rs == 15) {
  9828. if (!(insn & (1 << 20))) {
  9829. goto illegal_op;
  9830. }
  9831. if (op != 2) {
  9832. /* Byte or halfword load space with dest == r15 : memory hints.
  9833. * Catch them early so we don't emit pointless addressing code.
  9834. * This space is a mix of:
  9835. * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
  9836. * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
  9837. * cores)
  9838. * unallocated hints, which must be treated as NOPs
  9839. * UNPREDICTABLE space, which we NOP or UNDEF depending on
  9840. * which is easiest for the decoding logic
  9841. * Some space which must UNDEF
  9842. */
  9843. int op1 = (insn >> 23) & 3;
  9844. int op2 = (insn >> 6) & 0x3f;
  9845. if (op & 2) {
  9846. goto illegal_op;
  9847. }
  9848. if (rn == 15) {
  9849. /* UNPREDICTABLE, unallocated hint or
  9850. * PLD/PLDW/PLI (literal)
  9851. */
  9852. return 0;
  9853. }
  9854. if (op1 & 1) {
  9855. return 0; /* PLD/PLDW/PLI or unallocated hint */
  9856. }
  9857. if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
  9858. return 0; /* PLD/PLDW/PLI or unallocated hint */
  9859. }
  9860. /* UNDEF space, or an UNPREDICTABLE */
  9861. return 1;
  9862. }
  9863. }
  9864. memidx = get_mem_index(s);
  9865. if (rn == 15) {
  9866. addr = tcg_temp_new_i32();
  9867. /* PC relative. */
  9868. /* s->pc has already been incremented by 4. */
  9869. imm = s->pc & 0xfffffffc;
  9870. if (insn & (1 << 23))
  9871. imm += insn & 0xfff;
  9872. else
  9873. imm -= insn & 0xfff;
  9874. tcg_gen_movi_i32(addr, imm);
  9875. } else {
  9876. addr = load_reg(s, rn);
  9877. if (insn & (1 << 23)) {
  9878. /* Positive offset. */
  9879. imm = insn & 0xfff;
  9880. tcg_gen_addi_i32(addr, addr, imm);
  9881. } else {
  9882. imm = insn & 0xff;
  9883. switch ((insn >> 8) & 0xf) {
  9884. case 0x0: /* Shifted Register. */
  9885. shift = (insn >> 4) & 0xf;
  9886. if (shift > 3) {
  9887. tcg_temp_free_i32(addr);
  9888. goto illegal_op;
  9889. }
  9890. tmp = load_reg(s, rm);
  9891. if (shift)
  9892. tcg_gen_shli_i32(tmp, tmp, shift);
  9893. tcg_gen_add_i32(addr, addr, tmp);
  9894. tcg_temp_free_i32(tmp);
  9895. break;
  9896. case 0xc: /* Negative offset. */
  9897. tcg_gen_addi_i32(addr, addr, -imm);
  9898. break;
  9899. case 0xe: /* User privilege. */
  9900. tcg_gen_addi_i32(addr, addr, imm);
  9901. memidx = get_a32_user_mem_index(s);
  9902. break;
  9903. case 0x9: /* Post-decrement. */
  9904. imm = -imm;
  9905. /* Fall through. */
  9906. case 0xb: /* Post-increment. */
  9907. postinc = 1;
  9908. writeback = 1;
  9909. break;
  9910. case 0xd: /* Pre-decrement. */
  9911. imm = -imm;
  9912. /* Fall through. */
  9913. case 0xf: /* Pre-increment. */
  9914. tcg_gen_addi_i32(addr, addr, imm);
  9915. writeback = 1;
  9916. break;
  9917. default:
  9918. tcg_temp_free_i32(addr);
  9919. goto illegal_op;
  9920. }
  9921. }
  9922. }
  9923. if (insn & (1 << 20)) {
  9924. /* Load. */
  9925. tmp = tcg_temp_new_i32();
  9926. switch (op) {
  9927. case 0:
  9928. gen_aa32_ld8u(tmp, addr, memidx);
  9929. break;
  9930. case 4:
  9931. gen_aa32_ld8s(tmp, addr, memidx);
  9932. break;
  9933. case 1:
  9934. gen_aa32_ld16u(tmp, addr, memidx);
  9935. break;
  9936. case 5:
  9937. gen_aa32_ld16s(tmp, addr, memidx);
  9938. break;
  9939. case 2:
  9940. gen_aa32_ld32u(tmp, addr, memidx);
  9941. break;
  9942. default:
  9943. tcg_temp_free_i32(tmp);
  9944. tcg_temp_free_i32(addr);
  9945. goto illegal_op;
  9946. }
  9947. if (rs == 15) {
  9948. gen_bx(s, tmp);
  9949. } else {
  9950. store_reg(s, rs, tmp);
  9951. }
  9952. } else {
  9953. /* Store. */
  9954. tmp = load_reg(s, rs);
  9955. switch (op) {
  9956. case 0:
  9957. gen_aa32_st8(tmp, addr, memidx);
  9958. break;
  9959. case 1:
  9960. gen_aa32_st16(tmp, addr, memidx);
  9961. break;
  9962. case 2:
  9963. gen_aa32_st32(tmp, addr, memidx);
  9964. break;
  9965. default:
  9966. tcg_temp_free_i32(tmp);
  9967. tcg_temp_free_i32(addr);
  9968. goto illegal_op;
  9969. }
  9970. tcg_temp_free_i32(tmp);
  9971. }
  9972. if (postinc)
  9973. tcg_gen_addi_i32(addr, addr, imm);
  9974. if (writeback) {
  9975. store_reg(s, rn, addr);
  9976. } else {
  9977. tcg_temp_free_i32(addr);
  9978. }
  9979. }
  9980. break;
  9981. default:
  9982. goto illegal_op;
  9983. }
  9984. return 0;
  9985. illegal_op:
  9986. return 1;
  9987. }
  9988. static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
  9989. {
  9990. uint32_t val, insn, op, rm, rn, rd, shift, cond;
  9991. int32_t offset;
  9992. int i;
  9993. TCGv_i32 tmp;
  9994. TCGv_i32 tmp2;
  9995. TCGv_i32 addr;
  9996. if (s->condexec_mask) {
  9997. cond = s->condexec_cond;
  9998. if (cond != 0x0e) { /* Skip conditional when condition is AL. */
  9999. s->condlabel = gen_new_label();
  10000. arm_gen_test_cc(cond ^ 1, s->condlabel);
  10001. s->condjmp = 1;
  10002. }
  10003. }
  10004. insn = arm_lduw_code(env, s->pc, s->bswap_code);
  10005. s->pc += 2;
  10006. switch (insn >> 12) {
  10007. case 0: case 1:
  10008. rd = insn & 7;
  10009. op = (insn >> 11) & 3;
  10010. if (op == 3) {
  10011. /* add/subtract */
  10012. rn = (insn >> 3) & 7;
  10013. tmp = load_reg(s, rn);
  10014. if (insn & (1 << 10)) {
  10015. /* immediate */
  10016. tmp2 = tcg_temp_new_i32();
  10017. tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
  10018. } else {
  10019. /* reg */
  10020. rm = (insn >> 6) & 7;
  10021. tmp2 = load_reg(s, rm);
  10022. }
  10023. if (insn & (1 << 9)) {
  10024. if (s->condexec_mask)
  10025. tcg_gen_sub_i32(tmp, tmp, tmp2);
  10026. else
  10027. gen_sub_CC(tmp, tmp, tmp2);
  10028. } else {
  10029. if (s->condexec_mask)
  10030. tcg_gen_add_i32(tmp, tmp, tmp2);
  10031. else
  10032. gen_add_CC(tmp, tmp, tmp2);
  10033. }
  10034. tcg_temp_free_i32(tmp2);
  10035. store_reg(s, rd, tmp);
  10036. } else {
  10037. /* shift immediate */
  10038. rm = (insn >> 3) & 7;
  10039. shift = (insn >> 6) & 0x1f;
  10040. tmp = load_reg(s, rm);
  10041. gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
  10042. if (!s->condexec_mask)
  10043. gen_logic_CC(tmp);
  10044. store_reg(s, rd, tmp);
  10045. }
  10046. break;
  10047. case 2: case 3:
  10048. /* arithmetic large immediate */
  10049. op = (insn >> 11) & 3;
  10050. rd = (insn >> 8) & 0x7;
  10051. if (op == 0) { /* mov */
  10052. tmp = tcg_temp_new_i32();
  10053. tcg_gen_movi_i32(tmp, insn & 0xff);
  10054. if (!s->condexec_mask)
  10055. gen_logic_CC(tmp);
  10056. store_reg(s, rd, tmp);
  10057. } else {
  10058. tmp = load_reg(s, rd);
  10059. tmp2 = tcg_temp_new_i32();
  10060. tcg_gen_movi_i32(tmp2, insn & 0xff);
  10061. switch (op) {
  10062. case 1: /* cmp */
  10063. gen_sub_CC(tmp, tmp, tmp2);
  10064. tcg_temp_free_i32(tmp);
  10065. tcg_temp_free_i32(tmp2);
  10066. break;
  10067. case 2: /* add */
  10068. if (s->condexec_mask)
  10069. tcg_gen_add_i32(tmp, tmp, tmp2);
  10070. else
  10071. gen_add_CC(tmp, tmp, tmp2);
  10072. tcg_temp_free_i32(tmp2);
  10073. store_reg(s, rd, tmp);
  10074. break;
  10075. case 3: /* sub */
  10076. if (s->condexec_mask)
  10077. tcg_gen_sub_i32(tmp, tmp, tmp2);
  10078. else
  10079. gen_sub_CC(tmp, tmp, tmp2);
  10080. tcg_temp_free_i32(tmp2);
  10081. store_reg(s, rd, tmp);
  10082. break;
  10083. }
  10084. }
  10085. break;
  10086. case 4:
  10087. if (insn & (1 << 11)) {
  10088. rd = (insn >> 8) & 7;
  10089. /* load pc-relative. Bit 1 of PC is ignored. */
  10090. val = s->pc + 2 + ((insn & 0xff) * 4);
  10091. val &= ~(uint32_t)2;
  10092. addr = tcg_temp_new_i32();
  10093. tcg_gen_movi_i32(addr, val);
  10094. tmp = tcg_temp_new_i32();
  10095. gen_aa32_ld32u(tmp, addr, get_mem_index(s));
  10096. tcg_temp_free_i32(addr);
  10097. store_reg(s, rd, tmp);
  10098. break;
  10099. }
  10100. if (insn & (1 << 10)) {
  10101. /* data processing extended or blx */
  10102. rd = (insn & 7) | ((insn >> 4) & 8);
  10103. rm = (insn >> 3) & 0xf;
  10104. op = (insn >> 8) & 3;
  10105. switch (op) {
  10106. case 0: /* add */
  10107. tmp = load_reg(s, rd);
  10108. tmp2 = load_reg(s, rm);
  10109. tcg_gen_add_i32(tmp, tmp, tmp2);
  10110. tcg_temp_free_i32(tmp2);
  10111. store_reg(s, rd, tmp);
  10112. break;
  10113. case 1: /* cmp */
  10114. tmp = load_reg(s, rd);
  10115. tmp2 = load_reg(s, rm);
  10116. gen_sub_CC(tmp, tmp, tmp2);
  10117. tcg_temp_free_i32(tmp2);
  10118. tcg_temp_free_i32(tmp);
  10119. break;
  10120. case 2: /* mov/cpy */
  10121. tmp = load_reg(s, rm);
  10122. store_reg(s, rd, tmp);
  10123. break;
  10124. case 3:/* branch [and link] exchange thumb register */
  10125. tmp = load_reg(s, rm);
  10126. if (insn & (1 << 7)) {
  10127. ARCH(5);
  10128. val = (uint32_t)s->pc | 1;
  10129. tmp2 = tcg_temp_new_i32();
  10130. tcg_gen_movi_i32(tmp2, val);
  10131. store_reg(s, 14, tmp2);
  10132. }
  10133. /* already thumb, no need to check */
  10134. gen_bx(s, tmp);
  10135. break;
  10136. }
  10137. break;
  10138. }
  10139. /* data processing register */
  10140. rd = insn & 7;
  10141. rm = (insn >> 3) & 7;
  10142. op = (insn >> 6) & 0xf;
  10143. if (op == 2 || op == 3 || op == 4 || op == 7) {
  10144. /* the shift/rotate ops want the operands backwards */
  10145. val = rm;
  10146. rm = rd;
  10147. rd = val;
  10148. val = 1;
  10149. } else {
  10150. val = 0;
  10151. }
  10152. if (op == 9) { /* neg */
  10153. tmp = tcg_temp_new_i32();
  10154. tcg_gen_movi_i32(tmp, 0);
  10155. } else if (op != 0xf) { /* mvn doesn't read its first operand */
  10156. tmp = load_reg(s, rd);
  10157. } else {
  10158. TCGV_UNUSED_I32(tmp);
  10159. }
  10160. tmp2 = load_reg(s, rm);
  10161. switch (op) {
  10162. case 0x0: /* and */
  10163. tcg_gen_and_i32(tmp, tmp, tmp2);
  10164. if (!s->condexec_mask)
  10165. gen_logic_CC(tmp);
  10166. break;
  10167. case 0x1: /* eor */
  10168. tcg_gen_xor_i32(tmp, tmp, tmp2);
  10169. if (!s->condexec_mask)
  10170. gen_logic_CC(tmp);
  10171. break;
  10172. case 0x2: /* lsl */
  10173. if (s->condexec_mask) {
  10174. gen_shl(tmp2, tmp2, tmp);
  10175. } else {
  10176. gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
  10177. gen_logic_CC(tmp2);
  10178. }
  10179. break;
  10180. case 0x3: /* lsr */
  10181. if (s->condexec_mask) {
  10182. gen_shr(tmp2, tmp2, tmp);
  10183. } else {
  10184. gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
  10185. gen_logic_CC(tmp2);
  10186. }
  10187. break;
  10188. case 0x4: /* asr */
  10189. if (s->condexec_mask) {
  10190. gen_sar(tmp2, tmp2, tmp);
  10191. } else {
  10192. gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
  10193. gen_logic_CC(tmp2);
  10194. }
  10195. break;
  10196. case 0x5: /* adc */
  10197. if (s->condexec_mask) {
  10198. gen_adc(tmp, tmp2);
  10199. } else {
  10200. gen_adc_CC(tmp, tmp, tmp2);
  10201. }
  10202. break;
  10203. case 0x6: /* sbc */
  10204. if (s->condexec_mask) {
  10205. gen_sub_carry(tmp, tmp, tmp2);
  10206. } else {
  10207. gen_sbc_CC(tmp, tmp, tmp2);
  10208. }
  10209. break;
  10210. case 0x7: /* ror */
  10211. if (s->condexec_mask) {
  10212. tcg_gen_andi_i32(tmp, tmp, 0x1f);
  10213. tcg_gen_rotr_i32(tmp2, tmp2, tmp);
  10214. } else {
  10215. gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
  10216. gen_logic_CC(tmp2);
  10217. }
  10218. break;
  10219. case 0x8: /* tst */
  10220. tcg_gen_and_i32(tmp, tmp, tmp2);
  10221. gen_logic_CC(tmp);
  10222. rd = 16;
  10223. break;
  10224. case 0x9: /* neg */
  10225. if (s->condexec_mask)
  10226. tcg_gen_neg_i32(tmp, tmp2);
  10227. else
  10228. gen_sub_CC(tmp, tmp, tmp2);
  10229. break;
  10230. case 0xa: /* cmp */
  10231. gen_sub_CC(tmp, tmp, tmp2);
  10232. rd = 16;
  10233. break;
  10234. case 0xb: /* cmn */
  10235. gen_add_CC(tmp, tmp, tmp2);
  10236. rd = 16;
  10237. break;
  10238. case 0xc: /* orr */
  10239. tcg_gen_or_i32(tmp, tmp, tmp2);
  10240. if (!s->condexec_mask)
  10241. gen_logic_CC(tmp);
  10242. break;
  10243. case 0xd: /* mul */
  10244. tcg_gen_mul_i32(tmp, tmp, tmp2);
  10245. if (!s->condexec_mask)
  10246. gen_logic_CC(tmp);
  10247. break;
  10248. case 0xe: /* bic */
  10249. tcg_gen_andc_i32(tmp, tmp, tmp2);
  10250. if (!s->condexec_mask)
  10251. gen_logic_CC(tmp);
  10252. break;
  10253. case 0xf: /* mvn */
  10254. tcg_gen_not_i32(tmp2, tmp2);
  10255. if (!s->condexec_mask)
  10256. gen_logic_CC(tmp2);
  10257. val = 1;
  10258. rm = rd;
  10259. break;
  10260. }
  10261. if (rd != 16) {
  10262. if (val) {
  10263. store_reg(s, rm, tmp2);
  10264. if (op != 0xf)
  10265. tcg_temp_free_i32(tmp);
  10266. } else {
  10267. store_reg(s, rd, tmp);
  10268. tcg_temp_free_i32(tmp2);
  10269. }
  10270. } else {
  10271. tcg_temp_free_i32(tmp);
  10272. tcg_temp_free_i32(tmp2);
  10273. }
  10274. break;
  10275. case 5:
  10276. /* load/store register offset. */
  10277. rd = insn & 7;
  10278. rn = (insn >> 3) & 7;
  10279. rm = (insn >> 6) & 7;
  10280. op = (insn >> 9) & 7;
  10281. addr = load_reg(s, rn);
  10282. tmp = load_reg(s, rm);
  10283. tcg_gen_add_i32(addr, addr, tmp);
  10284. tcg_temp_free_i32(tmp);
  10285. if (op < 3) { /* store */
  10286. tmp = load_reg(s, rd);
  10287. } else {
  10288. tmp = tcg_temp_new_i32();
  10289. }
  10290. switch (op) {
  10291. case 0: /* str */
  10292. gen_aa32_st32(tmp, addr, get_mem_index(s));
  10293. break;
  10294. case 1: /* strh */
  10295. gen_aa32_st16(tmp, addr, get_mem_index(s));
  10296. break;
  10297. case 2: /* strb */
  10298. gen_aa32_st8(tmp, addr, get_mem_index(s));
  10299. break;
  10300. case 3: /* ldrsb */
  10301. gen_aa32_ld8s(tmp, addr, get_mem_index(s));
  10302. break;
  10303. case 4: /* ldr */
  10304. gen_aa32_ld32u(tmp, addr, get_mem_index(s));
  10305. break;
  10306. case 5: /* ldrh */
  10307. gen_aa32_ld16u(tmp, addr, get_mem_index(s));
  10308. break;
  10309. case 6: /* ldrb */
  10310. gen_aa32_ld8u(tmp, addr, get_mem_index(s));
  10311. break;
  10312. case 7: /* ldrsh */
  10313. gen_aa32_ld16s(tmp, addr, get_mem_index(s));
  10314. break;
  10315. }
  10316. if (op >= 3) { /* load */
  10317. store_reg(s, rd, tmp);
  10318. } else {
  10319. tcg_temp_free_i32(tmp);
  10320. }
  10321. tcg_temp_free_i32(addr);
  10322. break;
  10323. case 6:
  10324. /* load/store word immediate offset */
  10325. rd = insn & 7;
  10326. rn = (insn >> 3) & 7;
  10327. addr = load_reg(s, rn);
  10328. val = (insn >> 4) & 0x7c;
  10329. tcg_gen_addi_i32(addr, addr, val);
  10330. if (insn & (1 << 11)) {
  10331. /* load */
  10332. tmp = tcg_temp_new_i32();
  10333. gen_aa32_ld32u(tmp, addr, get_mem_index(s));
  10334. store_reg(s, rd, tmp);
  10335. } else {
  10336. /* store */
  10337. tmp = load_reg(s, rd);
  10338. gen_aa32_st32(tmp, addr, get_mem_index(s));
  10339. tcg_temp_free_i32(tmp);
  10340. }
  10341. tcg_temp_free_i32(addr);
  10342. break;
  10343. case 7:
  10344. /* load/store byte immediate offset */
  10345. rd = insn & 7;
  10346. rn = (insn >> 3) & 7;
  10347. addr = load_reg(s, rn);
  10348. val = (insn >> 6) & 0x1f;
  10349. tcg_gen_addi_i32(addr, addr, val);
  10350. if (insn & (1 << 11)) {
  10351. /* load */
  10352. tmp = tcg_temp_new_i32();
  10353. gen_aa32_ld8u(tmp, addr, get_mem_index(s));
  10354. store_reg(s, rd, tmp);
  10355. } else {
  10356. /* store */
  10357. tmp = load_reg(s, rd);
  10358. gen_aa32_st8(tmp, addr, get_mem_index(s));
  10359. tcg_temp_free_i32(tmp);
  10360. }
  10361. tcg_temp_free_i32(addr);
  10362. break;
  10363. case 8:
  10364. /* load/store halfword immediate offset */
  10365. rd = insn & 7;
  10366. rn = (insn >> 3) & 7;
  10367. addr = load_reg(s, rn);
  10368. val = (insn >> 5) & 0x3e;
  10369. tcg_gen_addi_i32(addr, addr, val);
  10370. if (insn & (1 << 11)) {
  10371. /* load */
  10372. tmp = tcg_temp_new_i32();
  10373. gen_aa32_ld16u(tmp, addr, get_mem_index(s));
  10374. store_reg(s, rd, tmp);
  10375. } else {
  10376. /* store */
  10377. tmp = load_reg(s, rd);
  10378. gen_aa32_st16(tmp, addr, get_mem_index(s));
  10379. tcg_temp_free_i32(tmp);
  10380. }
  10381. tcg_temp_free_i32(addr);
  10382. break;
  10383. case 9:
  10384. /* load/store from stack */
  10385. rd = (insn >> 8) & 7;
  10386. addr = load_reg(s, 13);
  10387. val = (insn & 0xff) * 4;
  10388. tcg_gen_addi_i32(addr, addr, val);
  10389. if (insn & (1 << 11)) {
  10390. /* load */
  10391. tmp = tcg_temp_new_i32();
  10392. gen_aa32_ld32u(tmp, addr, get_mem_index(s));
  10393. store_reg(s, rd, tmp);
  10394. } else {
  10395. /* store */
  10396. tmp = load_reg(s, rd);
  10397. gen_aa32_st32(tmp, addr, get_mem_index(s));
  10398. tcg_temp_free_i32(tmp);
  10399. }
  10400. tcg_temp_free_i32(addr);
  10401. break;
  10402. case 10:
  10403. /* add to high reg */
  10404. rd = (insn >> 8) & 7;
  10405. if (insn & (1 << 11)) {
  10406. /* SP */
  10407. tmp = load_reg(s, 13);
  10408. } else {
  10409. /* PC. bit 1 is ignored. */
  10410. tmp = tcg_temp_new_i32();
  10411. tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
  10412. }
  10413. val = (insn & 0xff) * 4;
  10414. tcg_gen_addi_i32(tmp, tmp, val);
  10415. store_reg(s, rd, tmp);
  10416. break;
  10417. case 11:
  10418. /* misc */
  10419. op = (insn >> 8) & 0xf;
  10420. switch (op) {
  10421. case 0:
  10422. /* adjust stack pointer */
  10423. tmp = load_reg(s, 13);
  10424. val = (insn & 0x7f) * 4;
  10425. if (insn & (1 << 7))
  10426. val = -(int32_t)val;
  10427. tcg_gen_addi_i32(tmp, tmp, val);
  10428. store_reg(s, 13, tmp);
  10429. break;
  10430. case 2: /* sign/zero extend. */
  10431. ARCH(6);
  10432. rd = insn & 7;
  10433. rm = (insn >> 3) & 7;
  10434. tmp = load_reg(s, rm);
  10435. switch ((insn >> 6) & 3) {
  10436. case 0: gen_sxth(tmp); break;
  10437. case 1: gen_sxtb(tmp); break;
  10438. case 2: gen_uxth(tmp); break;
  10439. case 3: gen_uxtb(tmp); break;
  10440. }
  10441. store_reg(s, rd, tmp);
  10442. break;
  10443. case 4: case 5: case 0xc: case 0xd:
  10444. /* push/pop */
  10445. addr = load_reg(s, 13);
  10446. if (insn & (1 << 8))
  10447. offset = 4;
  10448. else
  10449. offset = 0;
  10450. for (i = 0; i < 8; i++) {
  10451. if (insn & (1 << i))
  10452. offset += 4;
  10453. }
  10454. if ((insn & (1 << 11)) == 0) {
  10455. tcg_gen_addi_i32(addr, addr, -offset);
  10456. }
  10457. for (i = 0; i < 8; i++) {
  10458. if (insn & (1 << i)) {
  10459. if (insn & (1 << 11)) {
  10460. /* pop */
  10461. tmp = tcg_temp_new_i32();
  10462. gen_aa32_ld32u(tmp, addr, get_mem_index(s));
  10463. store_reg(s, i, tmp);
  10464. } else {
  10465. /* push */
  10466. tmp = load_reg(s, i);
  10467. gen_aa32_st32(tmp, addr, get_mem_index(s));
  10468. tcg_temp_free_i32(tmp);
  10469. }
  10470. /* advance to the next address. */
  10471. tcg_gen_addi_i32(addr, addr, 4);
  10472. }
  10473. }
  10474. TCGV_UNUSED_I32(tmp);
  10475. if (insn & (1 << 8)) {
  10476. if (insn & (1 << 11)) {
  10477. /* pop pc */
  10478. tmp = tcg_temp_new_i32();
  10479. gen_aa32_ld32u(tmp, addr, get_mem_index(s));
  10480. /* don't set the pc until the rest of the instruction
  10481. has completed */
  10482. } else {
  10483. /* push lr */
  10484. tmp = load_reg(s, 14);
  10485. gen_aa32_st32(tmp, addr, get_mem_index(s));
  10486. tcg_temp_free_i32(tmp);
  10487. }
  10488. tcg_gen_addi_i32(addr, addr, 4);
  10489. }
  10490. if ((insn & (1 << 11)) == 0) {
  10491. tcg_gen_addi_i32(addr, addr, -offset);
  10492. }
  10493. /* write back the new stack pointer */
  10494. store_reg(s, 13, addr);
  10495. /* set the new PC value */
  10496. if ((insn & 0x0900) == 0x0900) {
  10497. store_reg_from_load(s, 15, tmp);
  10498. }
  10499. break;
  10500. case 1: case 3: case 9: case 11: /* czb */
  10501. rm = insn & 7;
  10502. tmp = load_reg(s, rm);
  10503. s->condlabel = gen_new_label();
  10504. s->condjmp = 1;
  10505. if (insn & (1 << 11))
  10506. tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
  10507. else
  10508. tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
  10509. tcg_temp_free_i32(tmp);
  10510. offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
  10511. val = (uint32_t)s->pc + 2;
  10512. val += offset;
  10513. gen_jmp(s, val);
  10514. break;
  10515. case 15: /* IT, nop-hint. */
  10516. if ((insn & 0xf) == 0) {
  10517. gen_nop_hint(s, (insn >> 4) & 0xf);
  10518. break;
  10519. }
  10520. /* If Then. */
  10521. s->condexec_cond = (insn >> 4) & 0xe;
  10522. s->condexec_mask = insn & 0x1f;
  10523. /* No actual code generated for this insn, just setup state. */
  10524. break;
  10525. case 0xe: /* bkpt */
  10526. {
  10527. int imm8 = extract32(insn, 0, 8);
  10528. ARCH(5);
  10529. gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
  10530. default_exception_el(s));
  10531. break;
  10532. }
  10533. case 0xa: /* rev */
  10534. ARCH(6);
  10535. rn = (insn >> 3) & 0x7;
  10536. rd = insn & 0x7;
  10537. tmp = load_reg(s, rn);
  10538. switch ((insn >> 6) & 3) {
  10539. case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
  10540. case 1: gen_rev16(tmp); break;
  10541. case 3: gen_revsh(tmp); break;
  10542. default: goto illegal_op;
  10543. }
  10544. store_reg(s, rd, tmp);
  10545. break;
  10546. case 6:
  10547. switch ((insn >> 5) & 7) {
  10548. case 2:
  10549. /* setend */
  10550. ARCH(6);
  10551. if (((insn >> 3) & 1) != s->bswap_code) {
  10552. /* Dynamic endianness switching not implemented. */
  10553. qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
  10554. goto illegal_op;
  10555. }
  10556. break;
  10557. case 3:
  10558. /* cps */
  10559. ARCH(6);
  10560. if (IS_USER(s)) {
  10561. break;
  10562. }
  10563. if (arm_dc_feature(s, ARM_FEATURE_M)) {
  10564. tmp = tcg_const_i32((insn & (1 << 4)) != 0);
  10565. /* FAULTMASK */
  10566. if (insn & 1) {
  10567. addr = tcg_const_i32(19);
  10568. gen_helper_v7m_msr(cpu_env, addr, tmp);
  10569. tcg_temp_free_i32(addr);
  10570. }
  10571. /* PRIMASK */
  10572. if (insn & 2) {
  10573. addr = tcg_const_i32(16);
  10574. gen_helper_v7m_msr(cpu_env, addr, tmp);
  10575. tcg_temp_free_i32(addr);
  10576. }
  10577. tcg_temp_free_i32(tmp);
  10578. gen_lookup_tb(s);
  10579. } else {
  10580. if (insn & (1 << 4)) {
  10581. shift = CPSR_A | CPSR_I | CPSR_F;
  10582. } else {
  10583. shift = 0;
  10584. }
  10585. gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
  10586. }
  10587. break;
  10588. default:
  10589. goto undef;
  10590. }
  10591. break;
  10592. default:
  10593. goto undef;
  10594. }
  10595. break;
  10596. case 12:
  10597. {
  10598. /* load/store multiple */
  10599. TCGv_i32 loaded_var;
  10600. TCGV_UNUSED_I32(loaded_var);
  10601. rn = (insn >> 8) & 0x7;
  10602. addr = load_reg(s, rn);
  10603. for (i = 0; i < 8; i++) {
  10604. if (insn & (1 << i)) {
  10605. if (insn & (1 << 11)) {
  10606. /* load */
  10607. tmp = tcg_temp_new_i32();
  10608. gen_aa32_ld32u(tmp, addr, get_mem_index(s));
  10609. if (i == rn) {
  10610. loaded_var = tmp;
  10611. } else {
  10612. store_reg(s, i, tmp);
  10613. }
  10614. } else {
  10615. /* store */
  10616. tmp = load_reg(s, i);
  10617. gen_aa32_st32(tmp, addr, get_mem_index(s));
  10618. tcg_temp_free_i32(tmp);
  10619. }
  10620. /* advance to the next address */
  10621. tcg_gen_addi_i32(addr, addr, 4);
  10622. }
  10623. }
  10624. if ((insn & (1 << rn)) == 0) {
  10625. /* base reg not in list: base register writeback */
  10626. store_reg(s, rn, addr);
  10627. } else {
  10628. /* base reg in list: if load, complete it now */
  10629. if (insn & (1 << 11)) {
  10630. store_reg(s, rn, loaded_var);
  10631. }
  10632. tcg_temp_free_i32(addr);
  10633. }
  10634. break;
  10635. }
  10636. case 13:
  10637. /* conditional branch or swi */
  10638. cond = (insn >> 8) & 0xf;
  10639. if (cond == 0xe)
  10640. goto undef;
  10641. if (cond == 0xf) {
  10642. /* swi */
  10643. gen_set_pc_im(s, s->pc);
  10644. s->svc_imm = extract32(insn, 0, 8);
  10645. s->is_jmp = DISAS_SWI;
  10646. break;
  10647. }
  10648. /* generate a conditional jump to next instruction */
  10649. s->condlabel = gen_new_label();
  10650. arm_gen_test_cc(cond ^ 1, s->condlabel);
  10651. s->condjmp = 1;
  10652. /* jump to the offset */
  10653. val = (uint32_t)s->pc + 2;
  10654. offset = ((int32_t)insn << 24) >> 24;
  10655. val += offset << 1;
  10656. gen_jmp(s, val);
  10657. break;
  10658. case 14:
  10659. if (insn & (1 << 11)) {
  10660. if (disas_thumb2_insn(env, s, insn))
  10661. goto undef32;
  10662. break;
  10663. }
  10664. /* unconditional branch */
  10665. val = (uint32_t)s->pc;
  10666. offset = ((int32_t)insn << 21) >> 21;
  10667. val += (offset << 1) + 2;
  10668. gen_jmp(s, val);
  10669. break;
  10670. case 15:
  10671. if (disas_thumb2_insn(env, s, insn))
  10672. goto undef32;
  10673. break;
  10674. }
  10675. return;
  10676. undef32:
  10677. gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
  10678. default_exception_el(s));
  10679. return;
  10680. illegal_op:
  10681. undef:
  10682. gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
  10683. default_exception_el(s));
  10684. }
  10685. /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
  10686. basic block 'tb'. */
  10687. void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
  10688. {
  10689. ARMCPU *cpu = arm_env_get_cpu(env);
  10690. CPUState *cs = CPU(cpu);
  10691. DisasContext dc1, *dc = &dc1;
  10692. target_ulong pc_start;
  10693. target_ulong next_page_start;
  10694. int num_insns;
  10695. int max_insns;
  10696. /* generate intermediate code */
  10697. /* The A64 decoder has its own top level loop, because it doesn't need
  10698. * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
  10699. */
  10700. if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
  10701. gen_intermediate_code_a64(cpu, tb);
  10702. return;
  10703. }
  10704. pc_start = tb->pc;
  10705. dc->tb = tb;
  10706. dc->is_jmp = DISAS_NEXT;
  10707. dc->pc = pc_start;
  10708. dc->singlestep_enabled = cs->singlestep_enabled;
  10709. dc->condjmp = 0;
  10710. dc->aarch64 = 0;
  10711. /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
  10712. * there is no secure EL1, so we route exceptions to EL3.
  10713. */
  10714. dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
  10715. !arm_el_is_aa64(env, 3);
  10716. dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
  10717. dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
  10718. dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
  10719. dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
  10720. dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
  10721. dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
  10722. #if !defined(CONFIG_USER_ONLY)
  10723. dc->user = (dc->current_el == 0);
  10724. #endif
  10725. dc->ns = ARM_TBFLAG_NS(tb->flags);
  10726. dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
  10727. dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
  10728. dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
  10729. dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
  10730. dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
  10731. dc->cp_regs = cpu->cp_regs;
  10732. dc->features = env->features;
  10733. /* Single step state. The code-generation logic here is:
  10734. * SS_ACTIVE == 0:
  10735. * generate code with no special handling for single-stepping (except
  10736. * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
  10737. * this happens anyway because those changes are all system register or
  10738. * PSTATE writes).
  10739. * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
  10740. * emit code for one insn
  10741. * emit code to clear PSTATE.SS
  10742. * emit code to generate software step exception for completed step
  10743. * end TB (as usual for having generated an exception)
  10744. * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
  10745. * emit code to generate a software step exception
  10746. * end the TB
  10747. */
  10748. dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
  10749. dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
  10750. dc->is_ldex = false;
  10751. dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
  10752. cpu_F0s = tcg_temp_new_i32();
  10753. cpu_F1s = tcg_temp_new_i32();
  10754. cpu_F0d = tcg_temp_new_i64();
  10755. cpu_F1d = tcg_temp_new_i64();
  10756. cpu_V0 = cpu_F0d;
  10757. cpu_V1 = cpu_F1d;
  10758. /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
  10759. cpu_M0 = tcg_temp_new_i64();
  10760. next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
  10761. num_insns = 0;
  10762. max_insns = tb->cflags & CF_COUNT_MASK;
  10763. if (max_insns == 0) {
  10764. max_insns = CF_COUNT_MASK;
  10765. }
  10766. if (max_insns > TCG_MAX_INSNS) {
  10767. max_insns = TCG_MAX_INSNS;
  10768. }
  10769. gen_tb_start(tb);
  10770. tcg_clear_temp_count();
  10771. /* A note on handling of the condexec (IT) bits:
  10772. *
  10773. * We want to avoid the overhead of having to write the updated condexec
  10774. * bits back to the CPUARMState for every instruction in an IT block. So:
  10775. * (1) if the condexec bits are not already zero then we write
  10776. * zero back into the CPUARMState now. This avoids complications trying
  10777. * to do it at the end of the block. (For example if we don't do this
  10778. * it's hard to identify whether we can safely skip writing condexec
  10779. * at the end of the TB, which we definitely want to do for the case
  10780. * where a TB doesn't do anything with the IT state at all.)
  10781. * (2) if we are going to leave the TB then we call gen_set_condexec()
  10782. * which will write the correct value into CPUARMState if zero is wrong.
  10783. * This is done both for leaving the TB at the end, and for leaving
  10784. * it because of an exception we know will happen, which is done in
  10785. * gen_exception_insn(). The latter is necessary because we need to
  10786. * leave the TB with the PC/IT state just prior to execution of the
  10787. * instruction which caused the exception.
  10788. * (3) if we leave the TB unexpectedly (eg a data abort on a load)
  10789. * then the CPUARMState will be wrong and we need to reset it.
  10790. * This is handled in the same way as restoration of the
  10791. * PC in these situations; we save the value of the condexec bits
  10792. * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
  10793. * then uses this to restore them after an exception.
  10794. *
  10795. * Note that there are no instructions which can read the condexec
  10796. * bits, and none which can write non-static values to them, so
  10797. * we don't need to care about whether CPUARMState is correct in the
  10798. * middle of a TB.
  10799. */
  10800. /* Reset the conditional execution bits immediately. This avoids
  10801. complications trying to do it at the end of the block. */
  10802. if (dc->condexec_mask || dc->condexec_cond)
  10803. {
  10804. TCGv_i32 tmp = tcg_temp_new_i32();
  10805. tcg_gen_movi_i32(tmp, 0);
  10806. store_cpu_field(tmp, condexec_bits);
  10807. }
  10808. do {
  10809. tcg_gen_insn_start(dc->pc,
  10810. (dc->condexec_cond << 4) | (dc->condexec_mask >> 1));
  10811. num_insns++;
  10812. #ifdef CONFIG_USER_ONLY
  10813. /* Intercept jump to the magic kernel page. */
  10814. if (dc->pc >= 0xffff0000) {
  10815. /* We always get here via a jump, so know we are not in a
  10816. conditional execution block. */
  10817. gen_exception_internal(EXCP_KERNEL_TRAP);
  10818. dc->is_jmp = DISAS_UPDATE;
  10819. break;
  10820. }
  10821. #else
  10822. if (dc->pc >= 0xfffffff0 && arm_dc_feature(dc, ARM_FEATURE_M)) {
  10823. /* We always get here via a jump, so know we are not in a
  10824. conditional execution block. */
  10825. gen_exception_internal(EXCP_EXCEPTION_EXIT);
  10826. dc->is_jmp = DISAS_UPDATE;
  10827. break;
  10828. }
  10829. #endif
  10830. if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
  10831. CPUBreakpoint *bp;
  10832. QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
  10833. if (bp->pc == dc->pc) {
  10834. gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
  10835. /* Advance PC so that clearing the breakpoint will
  10836. invalidate this TB. */
  10837. dc->pc += 2;
  10838. goto done_generating;
  10839. }
  10840. }
  10841. }
  10842. if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
  10843. gen_io_start();
  10844. }
  10845. if (dc->ss_active && !dc->pstate_ss) {
  10846. /* Singlestep state is Active-pending.
  10847. * If we're in this state at the start of a TB then either
  10848. * a) we just took an exception to an EL which is being debugged
  10849. * and this is the first insn in the exception handler
  10850. * b) debug exceptions were masked and we just unmasked them
  10851. * without changing EL (eg by clearing PSTATE.D)
  10852. * In either case we're going to take a swstep exception in the
  10853. * "did not step an insn" case, and so the syndrome ISV and EX
  10854. * bits should be zero.
  10855. */
  10856. assert(num_insns == 1);
  10857. gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
  10858. default_exception_el(dc));
  10859. goto done_generating;
  10860. }
  10861. if (dc->thumb) {
  10862. disas_thumb_insn(env, dc);
  10863. if (dc->condexec_mask) {
  10864. dc->condexec_cond = (dc->condexec_cond & 0xe)
  10865. | ((dc->condexec_mask >> 4) & 1);
  10866. dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
  10867. if (dc->condexec_mask == 0) {
  10868. dc->condexec_cond = 0;
  10869. }
  10870. }
  10871. } else {
  10872. unsigned int insn = arm_ldl_code(env, dc->pc, dc->bswap_code);
  10873. dc->pc += 4;
  10874. disas_arm_insn(dc, insn);
  10875. }
  10876. if (dc->condjmp && !dc->is_jmp) {
  10877. gen_set_label(dc->condlabel);
  10878. dc->condjmp = 0;
  10879. }
  10880. if (tcg_check_temp_count()) {
  10881. fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
  10882. dc->pc);
  10883. }
  10884. /* Translation stops when a conditional branch is encountered.
  10885. * Otherwise the subsequent code could get translated several times.
  10886. * Also stop translation when a page boundary is reached. This
  10887. * ensures prefetch aborts occur at the right place. */
  10888. } while (!dc->is_jmp && !tcg_op_buf_full() &&
  10889. !cs->singlestep_enabled &&
  10890. !singlestep &&
  10891. !dc->ss_active &&
  10892. dc->pc < next_page_start &&
  10893. num_insns < max_insns);
  10894. if (tb->cflags & CF_LAST_IO) {
  10895. if (dc->condjmp) {
  10896. /* FIXME: This can theoretically happen with self-modifying
  10897. code. */
  10898. cpu_abort(cs, "IO on conditional branch instruction");
  10899. }
  10900. gen_io_end();
  10901. }
  10902. /* At this stage dc->condjmp will only be set when the skipped
  10903. instruction was a conditional branch or trap, and the PC has
  10904. already been written. */
  10905. if (unlikely(cs->singlestep_enabled || dc->ss_active)) {
  10906. /* Make sure the pc is updated, and raise a debug exception. */
  10907. if (dc->condjmp) {
  10908. gen_set_condexec(dc);
  10909. if (dc->is_jmp == DISAS_SWI) {
  10910. gen_ss_advance(dc);
  10911. gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
  10912. default_exception_el(dc));
  10913. } else if (dc->is_jmp == DISAS_HVC) {
  10914. gen_ss_advance(dc);
  10915. gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
  10916. } else if (dc->is_jmp == DISAS_SMC) {
  10917. gen_ss_advance(dc);
  10918. gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
  10919. } else if (dc->ss_active) {
  10920. gen_step_complete_exception(dc);
  10921. } else {
  10922. gen_exception_internal(EXCP_DEBUG);
  10923. }
  10924. gen_set_label(dc->condlabel);
  10925. }
  10926. if (dc->condjmp || !dc->is_jmp) {
  10927. gen_set_pc_im(dc, dc->pc);
  10928. dc->condjmp = 0;
  10929. }
  10930. gen_set_condexec(dc);
  10931. if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
  10932. gen_ss_advance(dc);
  10933. gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
  10934. default_exception_el(dc));
  10935. } else if (dc->is_jmp == DISAS_HVC && !dc->condjmp) {
  10936. gen_ss_advance(dc);
  10937. gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
  10938. } else if (dc->is_jmp == DISAS_SMC && !dc->condjmp) {
  10939. gen_ss_advance(dc);
  10940. gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
  10941. } else if (dc->ss_active) {
  10942. gen_step_complete_exception(dc);
  10943. } else {
  10944. /* FIXME: Single stepping a WFI insn will not halt
  10945. the CPU. */
  10946. gen_exception_internal(EXCP_DEBUG);
  10947. }
  10948. } else {
  10949. /* While branches must always occur at the end of an IT block,
  10950. there are a few other things that can cause us to terminate
  10951. the TB in the middle of an IT block:
  10952. - Exception generating instructions (bkpt, swi, undefined).
  10953. - Page boundaries.
  10954. - Hardware watchpoints.
  10955. Hardware breakpoints have already been handled and skip this code.
  10956. */
  10957. gen_set_condexec(dc);
  10958. switch(dc->is_jmp) {
  10959. case DISAS_NEXT:
  10960. gen_goto_tb(dc, 1, dc->pc);
  10961. break;
  10962. default:
  10963. case DISAS_JUMP:
  10964. case DISAS_UPDATE:
  10965. /* indicate that the hash table must be used to find the next TB */
  10966. tcg_gen_exit_tb(0);
  10967. break;
  10968. case DISAS_TB_JUMP:
  10969. /* nothing more to generate */
  10970. break;
  10971. case DISAS_WFI:
  10972. gen_helper_wfi(cpu_env);
  10973. /* The helper doesn't necessarily throw an exception, but we
  10974. * must go back to the main loop to check for interrupts anyway.
  10975. */
  10976. tcg_gen_exit_tb(0);
  10977. break;
  10978. case DISAS_WFE:
  10979. gen_helper_wfe(cpu_env);
  10980. break;
  10981. case DISAS_YIELD:
  10982. gen_helper_yield(cpu_env);
  10983. break;
  10984. case DISAS_SWI:
  10985. gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
  10986. default_exception_el(dc));
  10987. break;
  10988. case DISAS_HVC:
  10989. gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
  10990. break;
  10991. case DISAS_SMC:
  10992. gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
  10993. break;
  10994. }
  10995. if (dc->condjmp) {
  10996. gen_set_label(dc->condlabel);
  10997. gen_set_condexec(dc);
  10998. gen_goto_tb(dc, 1, dc->pc);
  10999. dc->condjmp = 0;
  11000. }
  11001. }
  11002. done_generating:
  11003. gen_tb_end(tb, num_insns);
  11004. #ifdef DEBUG_DISAS
  11005. if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
  11006. qemu_log("----------------\n");
  11007. qemu_log("IN: %s\n", lookup_symbol(pc_start));
  11008. log_target_disas(cs, pc_start, dc->pc - pc_start,
  11009. dc->thumb | (dc->bswap_code << 1));
  11010. qemu_log("\n");
  11011. }
  11012. #endif
  11013. tb->size = dc->pc - pc_start;
  11014. tb->icount = num_insns;
  11015. }
  11016. static const char *cpu_mode_names[16] = {
  11017. "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
  11018. "???", "???", "hyp", "und", "???", "???", "???", "sys"
  11019. };
  11020. void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
  11021. int flags)
  11022. {
  11023. ARMCPU *cpu = ARM_CPU(cs);
  11024. CPUARMState *env = &cpu->env;
  11025. int i;
  11026. uint32_t psr;
  11027. if (is_a64(env)) {
  11028. aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
  11029. return;
  11030. }
  11031. for(i=0;i<16;i++) {
  11032. cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
  11033. if ((i % 4) == 3)
  11034. cpu_fprintf(f, "\n");
  11035. else
  11036. cpu_fprintf(f, " ");
  11037. }
  11038. psr = cpsr_read(env);
  11039. cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
  11040. psr,
  11041. psr & (1 << 31) ? 'N' : '-',
  11042. psr & (1 << 30) ? 'Z' : '-',
  11043. psr & (1 << 29) ? 'C' : '-',
  11044. psr & (1 << 28) ? 'V' : '-',
  11045. psr & CPSR_T ? 'T' : 'A',
  11046. cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
  11047. if (flags & CPU_DUMP_FPU) {
  11048. int numvfpregs = 0;
  11049. if (arm_feature(env, ARM_FEATURE_VFP)) {
  11050. numvfpregs += 16;
  11051. }
  11052. if (arm_feature(env, ARM_FEATURE_VFP3)) {
  11053. numvfpregs += 16;
  11054. }
  11055. for (i = 0; i < numvfpregs; i++) {
  11056. uint64_t v = float64_val(env->vfp.regs[i]);
  11057. cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
  11058. i * 2, (uint32_t)v,
  11059. i * 2 + 1, (uint32_t)(v >> 32),
  11060. i, v);
  11061. }
  11062. cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
  11063. }
  11064. }
  11065. void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
  11066. target_ulong *data)
  11067. {
  11068. if (is_a64(env)) {
  11069. env->pc = data[0];
  11070. env->condexec_bits = 0;
  11071. } else {
  11072. env->regs[15] = data[0];
  11073. env->condexec_bits = data[1];
  11074. }
  11075. }