translate.c 263 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020
  1. /*
  2. * ARM translation
  3. *
  4. * Copyright (c) 2003 Fabrice Bellard
  5. * Copyright (c) 2005-2007 CodeSourcery
  6. * Copyright (c) 2007 OpenedHand, Ltd.
  7. *
  8. * This library is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2 of the License, or (at your option) any later version.
  12. *
  13. * This library is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  20. */
  21. #include "qemu/osdep.h"
  22. #include "cpu.h"
  23. #include "internals.h"
  24. #include "disas/disas.h"
  25. #include "exec/exec-all.h"
  26. #include "tcg/tcg-op.h"
  27. #include "tcg/tcg-op-gvec.h"
  28. #include "qemu/log.h"
  29. #include "qemu/bitops.h"
  30. #include "arm_ldst.h"
  31. #include "hw/semihosting/semihost.h"
  32. #include "exec/helper-proto.h"
  33. #include "exec/helper-gen.h"
  34. #include "trace-tcg.h"
  35. #include "exec/log.h"
  36. #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
  37. #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
  38. /* currently all emulated v5 cores are also v5TE, so don't bother */
  39. #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
  40. #define ENABLE_ARCH_5J dc_isar_feature(aa32_jazelle, s)
  41. #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
  42. #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
  43. #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
  44. #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
  45. #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
  46. #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
  47. #include "translate.h"
  48. #if defined(CONFIG_USER_ONLY)
  49. #define IS_USER(s) 1
  50. #else
  51. #define IS_USER(s) (s->user)
  52. #endif
  53. /* We reuse the same 64-bit temporaries for efficiency. */
  54. static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
  55. static TCGv_i32 cpu_R[16];
  56. TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
  57. TCGv_i64 cpu_exclusive_addr;
  58. TCGv_i64 cpu_exclusive_val;
  59. #include "exec/gen-icount.h"
  60. static const char * const regnames[] =
  61. { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
  62. "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
  63. /* Function prototypes for gen_ functions calling Neon helpers. */
  64. typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
  65. TCGv_i32, TCGv_i32);
  66. /* Function prototypes for gen_ functions for fix point conversions */
  67. typedef void VFPGenFixPointFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
  68. /* initialize TCG globals. */
  69. void arm_translate_init(void)
  70. {
  71. int i;
  72. for (i = 0; i < 16; i++) {
  73. cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
  74. offsetof(CPUARMState, regs[i]),
  75. regnames[i]);
  76. }
  77. cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
  78. cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
  79. cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
  80. cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
  81. cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
  82. offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
  83. cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
  84. offsetof(CPUARMState, exclusive_val), "exclusive_val");
  85. a64_translate_init();
  86. }
  87. /* Flags for the disas_set_da_iss info argument:
  88. * lower bits hold the Rt register number, higher bits are flags.
  89. */
  90. typedef enum ISSInfo {
  91. ISSNone = 0,
  92. ISSRegMask = 0x1f,
  93. ISSInvalid = (1 << 5),
  94. ISSIsAcqRel = (1 << 6),
  95. ISSIsWrite = (1 << 7),
  96. ISSIs16Bit = (1 << 8),
  97. } ISSInfo;
  98. /* Save the syndrome information for a Data Abort */
  99. static void disas_set_da_iss(DisasContext *s, MemOp memop, ISSInfo issinfo)
  100. {
  101. uint32_t syn;
  102. int sas = memop & MO_SIZE;
  103. bool sse = memop & MO_SIGN;
  104. bool is_acqrel = issinfo & ISSIsAcqRel;
  105. bool is_write = issinfo & ISSIsWrite;
  106. bool is_16bit = issinfo & ISSIs16Bit;
  107. int srt = issinfo & ISSRegMask;
  108. if (issinfo & ISSInvalid) {
  109. /* Some callsites want to conditionally provide ISS info,
  110. * eg "only if this was not a writeback"
  111. */
  112. return;
  113. }
  114. if (srt == 15) {
  115. /* For AArch32, insns where the src/dest is R15 never generate
  116. * ISS information. Catching that here saves checking at all
  117. * the call sites.
  118. */
  119. return;
  120. }
  121. syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
  122. 0, 0, 0, is_write, 0, is_16bit);
  123. disas_set_insn_syndrome(s, syn);
  124. }
  125. static inline int get_a32_user_mem_index(DisasContext *s)
  126. {
  127. /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
  128. * insns:
  129. * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
  130. * otherwise, access as if at PL0.
  131. */
  132. switch (s->mmu_idx) {
  133. case ARMMMUIdx_E2: /* this one is UNPREDICTABLE */
  134. case ARMMMUIdx_E10_0:
  135. case ARMMMUIdx_E10_1:
  136. case ARMMMUIdx_E10_1_PAN:
  137. return arm_to_core_mmu_idx(ARMMMUIdx_E10_0);
  138. case ARMMMUIdx_SE3:
  139. case ARMMMUIdx_SE10_0:
  140. case ARMMMUIdx_SE10_1:
  141. case ARMMMUIdx_SE10_1_PAN:
  142. return arm_to_core_mmu_idx(ARMMMUIdx_SE10_0);
  143. case ARMMMUIdx_MUser:
  144. case ARMMMUIdx_MPriv:
  145. return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
  146. case ARMMMUIdx_MUserNegPri:
  147. case ARMMMUIdx_MPrivNegPri:
  148. return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
  149. case ARMMMUIdx_MSUser:
  150. case ARMMMUIdx_MSPriv:
  151. return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
  152. case ARMMMUIdx_MSUserNegPri:
  153. case ARMMMUIdx_MSPrivNegPri:
  154. return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
  155. default:
  156. g_assert_not_reached();
  157. }
  158. }
  159. static inline TCGv_i32 load_cpu_offset(int offset)
  160. {
  161. TCGv_i32 tmp = tcg_temp_new_i32();
  162. tcg_gen_ld_i32(tmp, cpu_env, offset);
  163. return tmp;
  164. }
  165. #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
  166. static inline void store_cpu_offset(TCGv_i32 var, int offset)
  167. {
  168. tcg_gen_st_i32(var, cpu_env, offset);
  169. tcg_temp_free_i32(var);
  170. }
  171. #define store_cpu_field(var, name) \
  172. store_cpu_offset(var, offsetof(CPUARMState, name))
  173. /* The architectural value of PC. */
  174. static uint32_t read_pc(DisasContext *s)
  175. {
  176. return s->pc_curr + (s->thumb ? 4 : 8);
  177. }
  178. /* Set a variable to the value of a CPU register. */
  179. static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
  180. {
  181. if (reg == 15) {
  182. tcg_gen_movi_i32(var, read_pc(s));
  183. } else {
  184. tcg_gen_mov_i32(var, cpu_R[reg]);
  185. }
  186. }
  187. /* Create a new temporary and set it to the value of a CPU register. */
  188. static inline TCGv_i32 load_reg(DisasContext *s, int reg)
  189. {
  190. TCGv_i32 tmp = tcg_temp_new_i32();
  191. load_reg_var(s, tmp, reg);
  192. return tmp;
  193. }
  194. /*
  195. * Create a new temp, REG + OFS, except PC is ALIGN(PC, 4).
  196. * This is used for load/store for which use of PC implies (literal),
  197. * or ADD that implies ADR.
  198. */
  199. static TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs)
  200. {
  201. TCGv_i32 tmp = tcg_temp_new_i32();
  202. if (reg == 15) {
  203. tcg_gen_movi_i32(tmp, (read_pc(s) & ~3) + ofs);
  204. } else {
  205. tcg_gen_addi_i32(tmp, cpu_R[reg], ofs);
  206. }
  207. return tmp;
  208. }
  209. /* Set a CPU register. The source must be a temporary and will be
  210. marked as dead. */
  211. static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
  212. {
  213. if (reg == 15) {
  214. /* In Thumb mode, we must ignore bit 0.
  215. * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
  216. * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
  217. * We choose to ignore [1:0] in ARM mode for all architecture versions.
  218. */
  219. tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
  220. s->base.is_jmp = DISAS_JUMP;
  221. }
  222. tcg_gen_mov_i32(cpu_R[reg], var);
  223. tcg_temp_free_i32(var);
  224. }
  225. /*
  226. * Variant of store_reg which applies v8M stack-limit checks before updating
  227. * SP. If the check fails this will result in an exception being taken.
  228. * We disable the stack checks for CONFIG_USER_ONLY because we have
  229. * no idea what the stack limits should be in that case.
  230. * If stack checking is not being done this just acts like store_reg().
  231. */
  232. static void store_sp_checked(DisasContext *s, TCGv_i32 var)
  233. {
  234. #ifndef CONFIG_USER_ONLY
  235. if (s->v8m_stackcheck) {
  236. gen_helper_v8m_stackcheck(cpu_env, var);
  237. }
  238. #endif
  239. store_reg(s, 13, var);
  240. }
  241. /* Value extensions. */
  242. #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
  243. #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
  244. #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
  245. #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
  246. #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
  247. #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
  248. static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
  249. {
  250. TCGv_i32 tmp_mask = tcg_const_i32(mask);
  251. gen_helper_cpsr_write(cpu_env, var, tmp_mask);
  252. tcg_temp_free_i32(tmp_mask);
  253. }
  254. /* Set NZCV flags from the high 4 bits of var. */
  255. #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
  256. static void gen_exception_internal(int excp)
  257. {
  258. TCGv_i32 tcg_excp = tcg_const_i32(excp);
  259. assert(excp_is_internal(excp));
  260. gen_helper_exception_internal(cpu_env, tcg_excp);
  261. tcg_temp_free_i32(tcg_excp);
  262. }
  263. static void gen_step_complete_exception(DisasContext *s)
  264. {
  265. /* We just completed step of an insn. Move from Active-not-pending
  266. * to Active-pending, and then also take the swstep exception.
  267. * This corresponds to making the (IMPDEF) choice to prioritize
  268. * swstep exceptions over asynchronous exceptions taken to an exception
  269. * level where debug is disabled. This choice has the advantage that
  270. * we do not need to maintain internal state corresponding to the
  271. * ISV/EX syndrome bits between completion of the step and generation
  272. * of the exception, and our syndrome information is always correct.
  273. */
  274. gen_ss_advance(s);
  275. gen_swstep_exception(s, 1, s->is_ldex);
  276. s->base.is_jmp = DISAS_NORETURN;
  277. }
  278. static void gen_singlestep_exception(DisasContext *s)
  279. {
  280. /* Generate the right kind of exception for singlestep, which is
  281. * either the architectural singlestep or EXCP_DEBUG for QEMU's
  282. * gdb singlestepping.
  283. */
  284. if (s->ss_active) {
  285. gen_step_complete_exception(s);
  286. } else {
  287. gen_exception_internal(EXCP_DEBUG);
  288. }
  289. }
  290. static inline bool is_singlestepping(DisasContext *s)
  291. {
  292. /* Return true if we are singlestepping either because of
  293. * architectural singlestep or QEMU gdbstub singlestep. This does
  294. * not include the command line '-singlestep' mode which is rather
  295. * misnamed as it only means "one instruction per TB" and doesn't
  296. * affect the code we generate.
  297. */
  298. return s->base.singlestep_enabled || s->ss_active;
  299. }
  300. static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
  301. {
  302. TCGv_i32 tmp1 = tcg_temp_new_i32();
  303. TCGv_i32 tmp2 = tcg_temp_new_i32();
  304. tcg_gen_ext16s_i32(tmp1, a);
  305. tcg_gen_ext16s_i32(tmp2, b);
  306. tcg_gen_mul_i32(tmp1, tmp1, tmp2);
  307. tcg_temp_free_i32(tmp2);
  308. tcg_gen_sari_i32(a, a, 16);
  309. tcg_gen_sari_i32(b, b, 16);
  310. tcg_gen_mul_i32(b, b, a);
  311. tcg_gen_mov_i32(a, tmp1);
  312. tcg_temp_free_i32(tmp1);
  313. }
  314. /* Byteswap each halfword. */
  315. static void gen_rev16(TCGv_i32 dest, TCGv_i32 var)
  316. {
  317. TCGv_i32 tmp = tcg_temp_new_i32();
  318. TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
  319. tcg_gen_shri_i32(tmp, var, 8);
  320. tcg_gen_and_i32(tmp, tmp, mask);
  321. tcg_gen_and_i32(var, var, mask);
  322. tcg_gen_shli_i32(var, var, 8);
  323. tcg_gen_or_i32(dest, var, tmp);
  324. tcg_temp_free_i32(mask);
  325. tcg_temp_free_i32(tmp);
  326. }
  327. /* Byteswap low halfword and sign extend. */
  328. static void gen_revsh(TCGv_i32 dest, TCGv_i32 var)
  329. {
  330. tcg_gen_ext16u_i32(var, var);
  331. tcg_gen_bswap16_i32(var, var);
  332. tcg_gen_ext16s_i32(dest, var);
  333. }
  334. /* Swap low and high halfwords. */
  335. static void gen_swap_half(TCGv_i32 dest, TCGv_i32 var)
  336. {
  337. tcg_gen_rotri_i32(dest, var, 16);
  338. }
  339. /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
  340. tmp = (t0 ^ t1) & 0x8000;
  341. t0 &= ~0x8000;
  342. t1 &= ~0x8000;
  343. t0 = (t0 + t1) ^ tmp;
  344. */
  345. static void gen_add16(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
  346. {
  347. TCGv_i32 tmp = tcg_temp_new_i32();
  348. tcg_gen_xor_i32(tmp, t0, t1);
  349. tcg_gen_andi_i32(tmp, tmp, 0x8000);
  350. tcg_gen_andi_i32(t0, t0, ~0x8000);
  351. tcg_gen_andi_i32(t1, t1, ~0x8000);
  352. tcg_gen_add_i32(t0, t0, t1);
  353. tcg_gen_xor_i32(dest, t0, tmp);
  354. tcg_temp_free_i32(tmp);
  355. }
  356. /* Set N and Z flags from var. */
  357. static inline void gen_logic_CC(TCGv_i32 var)
  358. {
  359. tcg_gen_mov_i32(cpu_NF, var);
  360. tcg_gen_mov_i32(cpu_ZF, var);
  361. }
  362. /* dest = T0 + T1 + CF. */
  363. static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
  364. {
  365. tcg_gen_add_i32(dest, t0, t1);
  366. tcg_gen_add_i32(dest, dest, cpu_CF);
  367. }
  368. /* dest = T0 - T1 + CF - 1. */
  369. static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
  370. {
  371. tcg_gen_sub_i32(dest, t0, t1);
  372. tcg_gen_add_i32(dest, dest, cpu_CF);
  373. tcg_gen_subi_i32(dest, dest, 1);
  374. }
  375. /* dest = T0 + T1. Compute C, N, V and Z flags */
  376. static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
  377. {
  378. TCGv_i32 tmp = tcg_temp_new_i32();
  379. tcg_gen_movi_i32(tmp, 0);
  380. tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
  381. tcg_gen_mov_i32(cpu_ZF, cpu_NF);
  382. tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
  383. tcg_gen_xor_i32(tmp, t0, t1);
  384. tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
  385. tcg_temp_free_i32(tmp);
  386. tcg_gen_mov_i32(dest, cpu_NF);
  387. }
  388. /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
  389. static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
  390. {
  391. TCGv_i32 tmp = tcg_temp_new_i32();
  392. if (TCG_TARGET_HAS_add2_i32) {
  393. tcg_gen_movi_i32(tmp, 0);
  394. tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
  395. tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
  396. } else {
  397. TCGv_i64 q0 = tcg_temp_new_i64();
  398. TCGv_i64 q1 = tcg_temp_new_i64();
  399. tcg_gen_extu_i32_i64(q0, t0);
  400. tcg_gen_extu_i32_i64(q1, t1);
  401. tcg_gen_add_i64(q0, q0, q1);
  402. tcg_gen_extu_i32_i64(q1, cpu_CF);
  403. tcg_gen_add_i64(q0, q0, q1);
  404. tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
  405. tcg_temp_free_i64(q0);
  406. tcg_temp_free_i64(q1);
  407. }
  408. tcg_gen_mov_i32(cpu_ZF, cpu_NF);
  409. tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
  410. tcg_gen_xor_i32(tmp, t0, t1);
  411. tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
  412. tcg_temp_free_i32(tmp);
  413. tcg_gen_mov_i32(dest, cpu_NF);
  414. }
  415. /* dest = T0 - T1. Compute C, N, V and Z flags */
  416. static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
  417. {
  418. TCGv_i32 tmp;
  419. tcg_gen_sub_i32(cpu_NF, t0, t1);
  420. tcg_gen_mov_i32(cpu_ZF, cpu_NF);
  421. tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
  422. tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
  423. tmp = tcg_temp_new_i32();
  424. tcg_gen_xor_i32(tmp, t0, t1);
  425. tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
  426. tcg_temp_free_i32(tmp);
  427. tcg_gen_mov_i32(dest, cpu_NF);
  428. }
  429. /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
  430. static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
  431. {
  432. TCGv_i32 tmp = tcg_temp_new_i32();
  433. tcg_gen_not_i32(tmp, t1);
  434. gen_adc_CC(dest, t0, tmp);
  435. tcg_temp_free_i32(tmp);
  436. }
  437. #define GEN_SHIFT(name) \
  438. static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
  439. { \
  440. TCGv_i32 tmp1, tmp2, tmp3; \
  441. tmp1 = tcg_temp_new_i32(); \
  442. tcg_gen_andi_i32(tmp1, t1, 0xff); \
  443. tmp2 = tcg_const_i32(0); \
  444. tmp3 = tcg_const_i32(0x1f); \
  445. tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
  446. tcg_temp_free_i32(tmp3); \
  447. tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
  448. tcg_gen_##name##_i32(dest, tmp2, tmp1); \
  449. tcg_temp_free_i32(tmp2); \
  450. tcg_temp_free_i32(tmp1); \
  451. }
  452. GEN_SHIFT(shl)
  453. GEN_SHIFT(shr)
  454. #undef GEN_SHIFT
  455. static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
  456. {
  457. TCGv_i32 tmp1, tmp2;
  458. tmp1 = tcg_temp_new_i32();
  459. tcg_gen_andi_i32(tmp1, t1, 0xff);
  460. tmp2 = tcg_const_i32(0x1f);
  461. tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
  462. tcg_temp_free_i32(tmp2);
  463. tcg_gen_sar_i32(dest, t0, tmp1);
  464. tcg_temp_free_i32(tmp1);
  465. }
  466. static void shifter_out_im(TCGv_i32 var, int shift)
  467. {
  468. tcg_gen_extract_i32(cpu_CF, var, shift, 1);
  469. }
  470. /* Shift by immediate. Includes special handling for shift == 0. */
  471. static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
  472. int shift, int flags)
  473. {
  474. switch (shiftop) {
  475. case 0: /* LSL */
  476. if (shift != 0) {
  477. if (flags)
  478. shifter_out_im(var, 32 - shift);
  479. tcg_gen_shli_i32(var, var, shift);
  480. }
  481. break;
  482. case 1: /* LSR */
  483. if (shift == 0) {
  484. if (flags) {
  485. tcg_gen_shri_i32(cpu_CF, var, 31);
  486. }
  487. tcg_gen_movi_i32(var, 0);
  488. } else {
  489. if (flags)
  490. shifter_out_im(var, shift - 1);
  491. tcg_gen_shri_i32(var, var, shift);
  492. }
  493. break;
  494. case 2: /* ASR */
  495. if (shift == 0)
  496. shift = 32;
  497. if (flags)
  498. shifter_out_im(var, shift - 1);
  499. if (shift == 32)
  500. shift = 31;
  501. tcg_gen_sari_i32(var, var, shift);
  502. break;
  503. case 3: /* ROR/RRX */
  504. if (shift != 0) {
  505. if (flags)
  506. shifter_out_im(var, shift - 1);
  507. tcg_gen_rotri_i32(var, var, shift); break;
  508. } else {
  509. TCGv_i32 tmp = tcg_temp_new_i32();
  510. tcg_gen_shli_i32(tmp, cpu_CF, 31);
  511. if (flags)
  512. shifter_out_im(var, 0);
  513. tcg_gen_shri_i32(var, var, 1);
  514. tcg_gen_or_i32(var, var, tmp);
  515. tcg_temp_free_i32(tmp);
  516. }
  517. }
  518. };
  519. static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
  520. TCGv_i32 shift, int flags)
  521. {
  522. if (flags) {
  523. switch (shiftop) {
  524. case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
  525. case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
  526. case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
  527. case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
  528. }
  529. } else {
  530. switch (shiftop) {
  531. case 0:
  532. gen_shl(var, var, shift);
  533. break;
  534. case 1:
  535. gen_shr(var, var, shift);
  536. break;
  537. case 2:
  538. gen_sar(var, var, shift);
  539. break;
  540. case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
  541. tcg_gen_rotr_i32(var, var, shift); break;
  542. }
  543. }
  544. tcg_temp_free_i32(shift);
  545. }
  546. /*
  547. * Generate a conditional based on ARM condition code cc.
  548. * This is common between ARM and Aarch64 targets.
  549. */
  550. void arm_test_cc(DisasCompare *cmp, int cc)
  551. {
  552. TCGv_i32 value;
  553. TCGCond cond;
  554. bool global = true;
  555. switch (cc) {
  556. case 0: /* eq: Z */
  557. case 1: /* ne: !Z */
  558. cond = TCG_COND_EQ;
  559. value = cpu_ZF;
  560. break;
  561. case 2: /* cs: C */
  562. case 3: /* cc: !C */
  563. cond = TCG_COND_NE;
  564. value = cpu_CF;
  565. break;
  566. case 4: /* mi: N */
  567. case 5: /* pl: !N */
  568. cond = TCG_COND_LT;
  569. value = cpu_NF;
  570. break;
  571. case 6: /* vs: V */
  572. case 7: /* vc: !V */
  573. cond = TCG_COND_LT;
  574. value = cpu_VF;
  575. break;
  576. case 8: /* hi: C && !Z */
  577. case 9: /* ls: !C || Z -> !(C && !Z) */
  578. cond = TCG_COND_NE;
  579. value = tcg_temp_new_i32();
  580. global = false;
  581. /* CF is 1 for C, so -CF is an all-bits-set mask for C;
  582. ZF is non-zero for !Z; so AND the two subexpressions. */
  583. tcg_gen_neg_i32(value, cpu_CF);
  584. tcg_gen_and_i32(value, value, cpu_ZF);
  585. break;
  586. case 10: /* ge: N == V -> N ^ V == 0 */
  587. case 11: /* lt: N != V -> N ^ V != 0 */
  588. /* Since we're only interested in the sign bit, == 0 is >= 0. */
  589. cond = TCG_COND_GE;
  590. value = tcg_temp_new_i32();
  591. global = false;
  592. tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
  593. break;
  594. case 12: /* gt: !Z && N == V */
  595. case 13: /* le: Z || N != V */
  596. cond = TCG_COND_NE;
  597. value = tcg_temp_new_i32();
  598. global = false;
  599. /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
  600. * the sign bit then AND with ZF to yield the result. */
  601. tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
  602. tcg_gen_sari_i32(value, value, 31);
  603. tcg_gen_andc_i32(value, cpu_ZF, value);
  604. break;
  605. case 14: /* always */
  606. case 15: /* always */
  607. /* Use the ALWAYS condition, which will fold early.
  608. * It doesn't matter what we use for the value. */
  609. cond = TCG_COND_ALWAYS;
  610. value = cpu_ZF;
  611. goto no_invert;
  612. default:
  613. fprintf(stderr, "Bad condition code 0x%x\n", cc);
  614. abort();
  615. }
  616. if (cc & 1) {
  617. cond = tcg_invert_cond(cond);
  618. }
  619. no_invert:
  620. cmp->cond = cond;
  621. cmp->value = value;
  622. cmp->value_global = global;
  623. }
  624. void arm_free_cc(DisasCompare *cmp)
  625. {
  626. if (!cmp->value_global) {
  627. tcg_temp_free_i32(cmp->value);
  628. }
  629. }
  630. void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
  631. {
  632. tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
  633. }
  634. void arm_gen_test_cc(int cc, TCGLabel *label)
  635. {
  636. DisasCompare cmp;
  637. arm_test_cc(&cmp, cc);
  638. arm_jump_cc(&cmp, label);
  639. arm_free_cc(&cmp);
  640. }
  641. static inline void gen_set_condexec(DisasContext *s)
  642. {
  643. if (s->condexec_mask) {
  644. uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
  645. TCGv_i32 tmp = tcg_temp_new_i32();
  646. tcg_gen_movi_i32(tmp, val);
  647. store_cpu_field(tmp, condexec_bits);
  648. }
  649. }
  650. static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
  651. {
  652. tcg_gen_movi_i32(cpu_R[15], val);
  653. }
  654. /* Set PC and Thumb state from var. var is marked as dead. */
  655. static inline void gen_bx(DisasContext *s, TCGv_i32 var)
  656. {
  657. s->base.is_jmp = DISAS_JUMP;
  658. tcg_gen_andi_i32(cpu_R[15], var, ~1);
  659. tcg_gen_andi_i32(var, var, 1);
  660. store_cpu_field(var, thumb);
  661. }
  662. /*
  663. * Set PC and Thumb state from var. var is marked as dead.
  664. * For M-profile CPUs, include logic to detect exception-return
  665. * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
  666. * and BX reg, and no others, and happens only for code in Handler mode.
  667. * The Security Extension also requires us to check for the FNC_RETURN
  668. * which signals a function return from non-secure state; this can happen
  669. * in both Handler and Thread mode.
  670. * To avoid having to do multiple comparisons in inline generated code,
  671. * we make the check we do here loose, so it will match for EXC_RETURN
  672. * in Thread mode. For system emulation do_v7m_exception_exit() checks
  673. * for these spurious cases and returns without doing anything (giving
  674. * the same behaviour as for a branch to a non-magic address).
  675. *
  676. * In linux-user mode it is unclear what the right behaviour for an
  677. * attempted FNC_RETURN should be, because in real hardware this will go
  678. * directly to Secure code (ie not the Linux kernel) which will then treat
  679. * the error in any way it chooses. For QEMU we opt to make the FNC_RETURN
  680. * attempt behave the way it would on a CPU without the security extension,
  681. * which is to say "like a normal branch". That means we can simply treat
  682. * all branches as normal with no magic address behaviour.
  683. */
  684. static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
  685. {
  686. /* Generate the same code here as for a simple bx, but flag via
  687. * s->base.is_jmp that we need to do the rest of the work later.
  688. */
  689. gen_bx(s, var);
  690. #ifndef CONFIG_USER_ONLY
  691. if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
  692. (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
  693. s->base.is_jmp = DISAS_BX_EXCRET;
  694. }
  695. #endif
  696. }
  697. static inline void gen_bx_excret_final_code(DisasContext *s)
  698. {
  699. /* Generate the code to finish possible exception return and end the TB */
  700. TCGLabel *excret_label = gen_new_label();
  701. uint32_t min_magic;
  702. if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
  703. /* Covers FNC_RETURN and EXC_RETURN magic */
  704. min_magic = FNC_RETURN_MIN_MAGIC;
  705. } else {
  706. /* EXC_RETURN magic only */
  707. min_magic = EXC_RETURN_MIN_MAGIC;
  708. }
  709. /* Is the new PC value in the magic range indicating exception return? */
  710. tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
  711. /* No: end the TB as we would for a DISAS_JMP */
  712. if (is_singlestepping(s)) {
  713. gen_singlestep_exception(s);
  714. } else {
  715. tcg_gen_exit_tb(NULL, 0);
  716. }
  717. gen_set_label(excret_label);
  718. /* Yes: this is an exception return.
  719. * At this point in runtime env->regs[15] and env->thumb will hold
  720. * the exception-return magic number, which do_v7m_exception_exit()
  721. * will read. Nothing else will be able to see those values because
  722. * the cpu-exec main loop guarantees that we will always go straight
  723. * from raising the exception to the exception-handling code.
  724. *
  725. * gen_ss_advance(s) does nothing on M profile currently but
  726. * calling it is conceptually the right thing as we have executed
  727. * this instruction (compare SWI, HVC, SMC handling).
  728. */
  729. gen_ss_advance(s);
  730. gen_exception_internal(EXCP_EXCEPTION_EXIT);
  731. }
  732. static inline void gen_bxns(DisasContext *s, int rm)
  733. {
  734. TCGv_i32 var = load_reg(s, rm);
  735. /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
  736. * we need to sync state before calling it, but:
  737. * - we don't need to do gen_set_pc_im() because the bxns helper will
  738. * always set the PC itself
  739. * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
  740. * unless it's outside an IT block or the last insn in an IT block,
  741. * so we know that condexec == 0 (already set at the top of the TB)
  742. * is correct in the non-UNPREDICTABLE cases, and we can choose
  743. * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
  744. */
  745. gen_helper_v7m_bxns(cpu_env, var);
  746. tcg_temp_free_i32(var);
  747. s->base.is_jmp = DISAS_EXIT;
  748. }
  749. static inline void gen_blxns(DisasContext *s, int rm)
  750. {
  751. TCGv_i32 var = load_reg(s, rm);
  752. /* We don't need to sync condexec state, for the same reason as bxns.
  753. * We do however need to set the PC, because the blxns helper reads it.
  754. * The blxns helper may throw an exception.
  755. */
  756. gen_set_pc_im(s, s->base.pc_next);
  757. gen_helper_v7m_blxns(cpu_env, var);
  758. tcg_temp_free_i32(var);
  759. s->base.is_jmp = DISAS_EXIT;
  760. }
  761. /* Variant of store_reg which uses branch&exchange logic when storing
  762. to r15 in ARM architecture v7 and above. The source must be a temporary
  763. and will be marked as dead. */
  764. static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
  765. {
  766. if (reg == 15 && ENABLE_ARCH_7) {
  767. gen_bx(s, var);
  768. } else {
  769. store_reg(s, reg, var);
  770. }
  771. }
  772. /* Variant of store_reg which uses branch&exchange logic when storing
  773. * to r15 in ARM architecture v5T and above. This is used for storing
  774. * the results of a LDR/LDM/POP into r15, and corresponds to the cases
  775. * in the ARM ARM which use the LoadWritePC() pseudocode function. */
  776. static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
  777. {
  778. if (reg == 15 && ENABLE_ARCH_5) {
  779. gen_bx_excret(s, var);
  780. } else {
  781. store_reg(s, reg, var);
  782. }
  783. }
  784. #ifdef CONFIG_USER_ONLY
  785. #define IS_USER_ONLY 1
  786. #else
  787. #define IS_USER_ONLY 0
  788. #endif
  789. /* Abstractions of "generate code to do a guest load/store for
  790. * AArch32", where a vaddr is always 32 bits (and is zero
  791. * extended if we're a 64 bit core) and data is also
  792. * 32 bits unless specifically doing a 64 bit access.
  793. * These functions work like tcg_gen_qemu_{ld,st}* except
  794. * that the address argument is TCGv_i32 rather than TCGv.
  795. */
  796. static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
  797. {
  798. TCGv addr = tcg_temp_new();
  799. tcg_gen_extu_i32_tl(addr, a32);
  800. /* Not needed for user-mode BE32, where we use MO_BE instead. */
  801. if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
  802. tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
  803. }
  804. return addr;
  805. }
  806. static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
  807. int index, MemOp opc)
  808. {
  809. TCGv addr;
  810. if (arm_dc_feature(s, ARM_FEATURE_M) &&
  811. !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
  812. opc |= MO_ALIGN;
  813. }
  814. addr = gen_aa32_addr(s, a32, opc);
  815. tcg_gen_qemu_ld_i32(val, addr, index, opc);
  816. tcg_temp_free(addr);
  817. }
  818. static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
  819. int index, MemOp opc)
  820. {
  821. TCGv addr;
  822. if (arm_dc_feature(s, ARM_FEATURE_M) &&
  823. !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
  824. opc |= MO_ALIGN;
  825. }
  826. addr = gen_aa32_addr(s, a32, opc);
  827. tcg_gen_qemu_st_i32(val, addr, index, opc);
  828. tcg_temp_free(addr);
  829. }
  830. #define DO_GEN_LD(SUFF, OPC) \
  831. static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
  832. TCGv_i32 a32, int index) \
  833. { \
  834. gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
  835. }
  836. #define DO_GEN_ST(SUFF, OPC) \
  837. static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
  838. TCGv_i32 a32, int index) \
  839. { \
  840. gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
  841. }
  842. static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
  843. {
  844. /* Not needed for user-mode BE32, where we use MO_BE instead. */
  845. if (!IS_USER_ONLY && s->sctlr_b) {
  846. tcg_gen_rotri_i64(val, val, 32);
  847. }
  848. }
  849. static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
  850. int index, MemOp opc)
  851. {
  852. TCGv addr = gen_aa32_addr(s, a32, opc);
  853. tcg_gen_qemu_ld_i64(val, addr, index, opc);
  854. gen_aa32_frob64(s, val);
  855. tcg_temp_free(addr);
  856. }
  857. static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
  858. TCGv_i32 a32, int index)
  859. {
  860. gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
  861. }
  862. static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
  863. int index, MemOp opc)
  864. {
  865. TCGv addr = gen_aa32_addr(s, a32, opc);
  866. /* Not needed for user-mode BE32, where we use MO_BE instead. */
  867. if (!IS_USER_ONLY && s->sctlr_b) {
  868. TCGv_i64 tmp = tcg_temp_new_i64();
  869. tcg_gen_rotri_i64(tmp, val, 32);
  870. tcg_gen_qemu_st_i64(tmp, addr, index, opc);
  871. tcg_temp_free_i64(tmp);
  872. } else {
  873. tcg_gen_qemu_st_i64(val, addr, index, opc);
  874. }
  875. tcg_temp_free(addr);
  876. }
  877. static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
  878. TCGv_i32 a32, int index)
  879. {
  880. gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
  881. }
  882. DO_GEN_LD(8u, MO_UB)
  883. DO_GEN_LD(16u, MO_UW)
  884. DO_GEN_LD(32u, MO_UL)
  885. DO_GEN_ST(8, MO_UB)
  886. DO_GEN_ST(16, MO_UW)
  887. DO_GEN_ST(32, MO_UL)
  888. static inline void gen_hvc(DisasContext *s, int imm16)
  889. {
  890. /* The pre HVC helper handles cases when HVC gets trapped
  891. * as an undefined insn by runtime configuration (ie before
  892. * the insn really executes).
  893. */
  894. gen_set_pc_im(s, s->pc_curr);
  895. gen_helper_pre_hvc(cpu_env);
  896. /* Otherwise we will treat this as a real exception which
  897. * happens after execution of the insn. (The distinction matters
  898. * for the PC value reported to the exception handler and also
  899. * for single stepping.)
  900. */
  901. s->svc_imm = imm16;
  902. gen_set_pc_im(s, s->base.pc_next);
  903. s->base.is_jmp = DISAS_HVC;
  904. }
  905. static inline void gen_smc(DisasContext *s)
  906. {
  907. /* As with HVC, we may take an exception either before or after
  908. * the insn executes.
  909. */
  910. TCGv_i32 tmp;
  911. gen_set_pc_im(s, s->pc_curr);
  912. tmp = tcg_const_i32(syn_aa32_smc());
  913. gen_helper_pre_smc(cpu_env, tmp);
  914. tcg_temp_free_i32(tmp);
  915. gen_set_pc_im(s, s->base.pc_next);
  916. s->base.is_jmp = DISAS_SMC;
  917. }
  918. static void gen_exception_internal_insn(DisasContext *s, uint32_t pc, int excp)
  919. {
  920. gen_set_condexec(s);
  921. gen_set_pc_im(s, pc);
  922. gen_exception_internal(excp);
  923. s->base.is_jmp = DISAS_NORETURN;
  924. }
  925. static void gen_exception_insn(DisasContext *s, uint32_t pc, int excp,
  926. int syn, uint32_t target_el)
  927. {
  928. gen_set_condexec(s);
  929. gen_set_pc_im(s, pc);
  930. gen_exception(excp, syn, target_el);
  931. s->base.is_jmp = DISAS_NORETURN;
  932. }
  933. static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
  934. {
  935. TCGv_i32 tcg_syn;
  936. gen_set_condexec(s);
  937. gen_set_pc_im(s, s->pc_curr);
  938. tcg_syn = tcg_const_i32(syn);
  939. gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
  940. tcg_temp_free_i32(tcg_syn);
  941. s->base.is_jmp = DISAS_NORETURN;
  942. }
  943. static void unallocated_encoding(DisasContext *s)
  944. {
  945. /* Unallocated and reserved encodings are uncategorized */
  946. gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
  947. default_exception_el(s));
  948. }
  949. /* Force a TB lookup after an instruction that changes the CPU state. */
  950. static inline void gen_lookup_tb(DisasContext *s)
  951. {
  952. tcg_gen_movi_i32(cpu_R[15], s->base.pc_next);
  953. s->base.is_jmp = DISAS_EXIT;
  954. }
  955. static inline void gen_hlt(DisasContext *s, int imm)
  956. {
  957. /* HLT. This has two purposes.
  958. * Architecturally, it is an external halting debug instruction.
  959. * Since QEMU doesn't implement external debug, we treat this as
  960. * it is required for halting debug disabled: it will UNDEF.
  961. * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
  962. * and "HLT 0xF000" is an A32 semihosting syscall. These traps
  963. * must trigger semihosting even for ARMv7 and earlier, where
  964. * HLT was an undefined encoding.
  965. * In system mode, we don't allow userspace access to
  966. * semihosting, to provide some semblance of security
  967. * (and for consistency with our 32-bit semihosting).
  968. */
  969. if (semihosting_enabled() &&
  970. #ifndef CONFIG_USER_ONLY
  971. s->current_el != 0 &&
  972. #endif
  973. (imm == (s->thumb ? 0x3c : 0xf000))) {
  974. gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
  975. return;
  976. }
  977. unallocated_encoding(s);
  978. }
  979. static TCGv_ptr get_fpstatus_ptr(int neon)
  980. {
  981. TCGv_ptr statusptr = tcg_temp_new_ptr();
  982. int offset;
  983. if (neon) {
  984. offset = offsetof(CPUARMState, vfp.standard_fp_status);
  985. } else {
  986. offset = offsetof(CPUARMState, vfp.fp_status);
  987. }
  988. tcg_gen_addi_ptr(statusptr, cpu_env, offset);
  989. return statusptr;
  990. }
  991. static inline long vfp_reg_offset(bool dp, unsigned reg)
  992. {
  993. if (dp) {
  994. return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
  995. } else {
  996. long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
  997. if (reg & 1) {
  998. ofs += offsetof(CPU_DoubleU, l.upper);
  999. } else {
  1000. ofs += offsetof(CPU_DoubleU, l.lower);
  1001. }
  1002. return ofs;
  1003. }
  1004. }
  1005. /* Return the offset of a 32-bit piece of a NEON register.
  1006. zero is the least significant end of the register. */
  1007. static inline long
  1008. neon_reg_offset (int reg, int n)
  1009. {
  1010. int sreg;
  1011. sreg = reg * 2 + n;
  1012. return vfp_reg_offset(0, sreg);
  1013. }
  1014. static TCGv_i32 neon_load_reg(int reg, int pass)
  1015. {
  1016. TCGv_i32 tmp = tcg_temp_new_i32();
  1017. tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
  1018. return tmp;
  1019. }
  1020. static void neon_store_reg(int reg, int pass, TCGv_i32 var)
  1021. {
  1022. tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
  1023. tcg_temp_free_i32(var);
  1024. }
  1025. static inline void neon_load_reg64(TCGv_i64 var, int reg)
  1026. {
  1027. tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
  1028. }
  1029. static inline void neon_store_reg64(TCGv_i64 var, int reg)
  1030. {
  1031. tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
  1032. }
  1033. static inline void neon_load_reg32(TCGv_i32 var, int reg)
  1034. {
  1035. tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg));
  1036. }
  1037. static inline void neon_store_reg32(TCGv_i32 var, int reg)
  1038. {
  1039. tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
  1040. }
  1041. static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
  1042. {
  1043. TCGv_ptr ret = tcg_temp_new_ptr();
  1044. tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
  1045. return ret;
  1046. }
  1047. #define ARM_CP_RW_BIT (1 << 20)
  1048. /* Include the VFP and Neon decoders */
  1049. #include "translate-vfp.c.inc"
  1050. #include "translate-neon.c.inc"
  1051. static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
  1052. {
  1053. tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
  1054. }
  1055. static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
  1056. {
  1057. tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
  1058. }
  1059. static inline TCGv_i32 iwmmxt_load_creg(int reg)
  1060. {
  1061. TCGv_i32 var = tcg_temp_new_i32();
  1062. tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
  1063. return var;
  1064. }
  1065. static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
  1066. {
  1067. tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
  1068. tcg_temp_free_i32(var);
  1069. }
  1070. static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
  1071. {
  1072. iwmmxt_store_reg(cpu_M0, rn);
  1073. }
  1074. static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
  1075. {
  1076. iwmmxt_load_reg(cpu_M0, rn);
  1077. }
  1078. static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
  1079. {
  1080. iwmmxt_load_reg(cpu_V1, rn);
  1081. tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
  1082. }
  1083. static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
  1084. {
  1085. iwmmxt_load_reg(cpu_V1, rn);
  1086. tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
  1087. }
  1088. static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
  1089. {
  1090. iwmmxt_load_reg(cpu_V1, rn);
  1091. tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
  1092. }
  1093. #define IWMMXT_OP(name) \
  1094. static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
  1095. { \
  1096. iwmmxt_load_reg(cpu_V1, rn); \
  1097. gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
  1098. }
  1099. #define IWMMXT_OP_ENV(name) \
  1100. static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
  1101. { \
  1102. iwmmxt_load_reg(cpu_V1, rn); \
  1103. gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
  1104. }
  1105. #define IWMMXT_OP_ENV_SIZE(name) \
  1106. IWMMXT_OP_ENV(name##b) \
  1107. IWMMXT_OP_ENV(name##w) \
  1108. IWMMXT_OP_ENV(name##l)
  1109. #define IWMMXT_OP_ENV1(name) \
  1110. static inline void gen_op_iwmmxt_##name##_M0(void) \
  1111. { \
  1112. gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
  1113. }
  1114. IWMMXT_OP(maddsq)
  1115. IWMMXT_OP(madduq)
  1116. IWMMXT_OP(sadb)
  1117. IWMMXT_OP(sadw)
  1118. IWMMXT_OP(mulslw)
  1119. IWMMXT_OP(mulshw)
  1120. IWMMXT_OP(mululw)
  1121. IWMMXT_OP(muluhw)
  1122. IWMMXT_OP(macsw)
  1123. IWMMXT_OP(macuw)
  1124. IWMMXT_OP_ENV_SIZE(unpackl)
  1125. IWMMXT_OP_ENV_SIZE(unpackh)
  1126. IWMMXT_OP_ENV1(unpacklub)
  1127. IWMMXT_OP_ENV1(unpackluw)
  1128. IWMMXT_OP_ENV1(unpacklul)
  1129. IWMMXT_OP_ENV1(unpackhub)
  1130. IWMMXT_OP_ENV1(unpackhuw)
  1131. IWMMXT_OP_ENV1(unpackhul)
  1132. IWMMXT_OP_ENV1(unpacklsb)
  1133. IWMMXT_OP_ENV1(unpacklsw)
  1134. IWMMXT_OP_ENV1(unpacklsl)
  1135. IWMMXT_OP_ENV1(unpackhsb)
  1136. IWMMXT_OP_ENV1(unpackhsw)
  1137. IWMMXT_OP_ENV1(unpackhsl)
  1138. IWMMXT_OP_ENV_SIZE(cmpeq)
  1139. IWMMXT_OP_ENV_SIZE(cmpgtu)
  1140. IWMMXT_OP_ENV_SIZE(cmpgts)
  1141. IWMMXT_OP_ENV_SIZE(mins)
  1142. IWMMXT_OP_ENV_SIZE(minu)
  1143. IWMMXT_OP_ENV_SIZE(maxs)
  1144. IWMMXT_OP_ENV_SIZE(maxu)
  1145. IWMMXT_OP_ENV_SIZE(subn)
  1146. IWMMXT_OP_ENV_SIZE(addn)
  1147. IWMMXT_OP_ENV_SIZE(subu)
  1148. IWMMXT_OP_ENV_SIZE(addu)
  1149. IWMMXT_OP_ENV_SIZE(subs)
  1150. IWMMXT_OP_ENV_SIZE(adds)
  1151. IWMMXT_OP_ENV(avgb0)
  1152. IWMMXT_OP_ENV(avgb1)
  1153. IWMMXT_OP_ENV(avgw0)
  1154. IWMMXT_OP_ENV(avgw1)
  1155. IWMMXT_OP_ENV(packuw)
  1156. IWMMXT_OP_ENV(packul)
  1157. IWMMXT_OP_ENV(packuq)
  1158. IWMMXT_OP_ENV(packsw)
  1159. IWMMXT_OP_ENV(packsl)
  1160. IWMMXT_OP_ENV(packsq)
  1161. static void gen_op_iwmmxt_set_mup(void)
  1162. {
  1163. TCGv_i32 tmp;
  1164. tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
  1165. tcg_gen_ori_i32(tmp, tmp, 2);
  1166. store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
  1167. }
  1168. static void gen_op_iwmmxt_set_cup(void)
  1169. {
  1170. TCGv_i32 tmp;
  1171. tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
  1172. tcg_gen_ori_i32(tmp, tmp, 1);
  1173. store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
  1174. }
  1175. static void gen_op_iwmmxt_setpsr_nz(void)
  1176. {
  1177. TCGv_i32 tmp = tcg_temp_new_i32();
  1178. gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
  1179. store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
  1180. }
  1181. static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
  1182. {
  1183. iwmmxt_load_reg(cpu_V1, rn);
  1184. tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
  1185. tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
  1186. }
  1187. static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
  1188. TCGv_i32 dest)
  1189. {
  1190. int rd;
  1191. uint32_t offset;
  1192. TCGv_i32 tmp;
  1193. rd = (insn >> 16) & 0xf;
  1194. tmp = load_reg(s, rd);
  1195. offset = (insn & 0xff) << ((insn >> 7) & 2);
  1196. if (insn & (1 << 24)) {
  1197. /* Pre indexed */
  1198. if (insn & (1 << 23))
  1199. tcg_gen_addi_i32(tmp, tmp, offset);
  1200. else
  1201. tcg_gen_addi_i32(tmp, tmp, -offset);
  1202. tcg_gen_mov_i32(dest, tmp);
  1203. if (insn & (1 << 21))
  1204. store_reg(s, rd, tmp);
  1205. else
  1206. tcg_temp_free_i32(tmp);
  1207. } else if (insn & (1 << 21)) {
  1208. /* Post indexed */
  1209. tcg_gen_mov_i32(dest, tmp);
  1210. if (insn & (1 << 23))
  1211. tcg_gen_addi_i32(tmp, tmp, offset);
  1212. else
  1213. tcg_gen_addi_i32(tmp, tmp, -offset);
  1214. store_reg(s, rd, tmp);
  1215. } else if (!(insn & (1 << 23)))
  1216. return 1;
  1217. return 0;
  1218. }
  1219. static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
  1220. {
  1221. int rd = (insn >> 0) & 0xf;
  1222. TCGv_i32 tmp;
  1223. if (insn & (1 << 8)) {
  1224. if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
  1225. return 1;
  1226. } else {
  1227. tmp = iwmmxt_load_creg(rd);
  1228. }
  1229. } else {
  1230. tmp = tcg_temp_new_i32();
  1231. iwmmxt_load_reg(cpu_V0, rd);
  1232. tcg_gen_extrl_i64_i32(tmp, cpu_V0);
  1233. }
  1234. tcg_gen_andi_i32(tmp, tmp, mask);
  1235. tcg_gen_mov_i32(dest, tmp);
  1236. tcg_temp_free_i32(tmp);
  1237. return 0;
  1238. }
  1239. /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
  1240. (ie. an undefined instruction). */
  1241. static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
  1242. {
  1243. int rd, wrd;
  1244. int rdhi, rdlo, rd0, rd1, i;
  1245. TCGv_i32 addr;
  1246. TCGv_i32 tmp, tmp2, tmp3;
  1247. if ((insn & 0x0e000e00) == 0x0c000000) {
  1248. if ((insn & 0x0fe00ff0) == 0x0c400000) {
  1249. wrd = insn & 0xf;
  1250. rdlo = (insn >> 12) & 0xf;
  1251. rdhi = (insn >> 16) & 0xf;
  1252. if (insn & ARM_CP_RW_BIT) { /* TMRRC */
  1253. iwmmxt_load_reg(cpu_V0, wrd);
  1254. tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
  1255. tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
  1256. } else { /* TMCRR */
  1257. tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
  1258. iwmmxt_store_reg(cpu_V0, wrd);
  1259. gen_op_iwmmxt_set_mup();
  1260. }
  1261. return 0;
  1262. }
  1263. wrd = (insn >> 12) & 0xf;
  1264. addr = tcg_temp_new_i32();
  1265. if (gen_iwmmxt_address(s, insn, addr)) {
  1266. tcg_temp_free_i32(addr);
  1267. return 1;
  1268. }
  1269. if (insn & ARM_CP_RW_BIT) {
  1270. if ((insn >> 28) == 0xf) { /* WLDRW wCx */
  1271. tmp = tcg_temp_new_i32();
  1272. gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
  1273. iwmmxt_store_creg(wrd, tmp);
  1274. } else {
  1275. i = 1;
  1276. if (insn & (1 << 8)) {
  1277. if (insn & (1 << 22)) { /* WLDRD */
  1278. gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
  1279. i = 0;
  1280. } else { /* WLDRW wRd */
  1281. tmp = tcg_temp_new_i32();
  1282. gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
  1283. }
  1284. } else {
  1285. tmp = tcg_temp_new_i32();
  1286. if (insn & (1 << 22)) { /* WLDRH */
  1287. gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
  1288. } else { /* WLDRB */
  1289. gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
  1290. }
  1291. }
  1292. if (i) {
  1293. tcg_gen_extu_i32_i64(cpu_M0, tmp);
  1294. tcg_temp_free_i32(tmp);
  1295. }
  1296. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1297. }
  1298. } else {
  1299. if ((insn >> 28) == 0xf) { /* WSTRW wCx */
  1300. tmp = iwmmxt_load_creg(wrd);
  1301. gen_aa32_st32(s, tmp, addr, get_mem_index(s));
  1302. } else {
  1303. gen_op_iwmmxt_movq_M0_wRn(wrd);
  1304. tmp = tcg_temp_new_i32();
  1305. if (insn & (1 << 8)) {
  1306. if (insn & (1 << 22)) { /* WSTRD */
  1307. gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
  1308. } else { /* WSTRW wRd */
  1309. tcg_gen_extrl_i64_i32(tmp, cpu_M0);
  1310. gen_aa32_st32(s, tmp, addr, get_mem_index(s));
  1311. }
  1312. } else {
  1313. if (insn & (1 << 22)) { /* WSTRH */
  1314. tcg_gen_extrl_i64_i32(tmp, cpu_M0);
  1315. gen_aa32_st16(s, tmp, addr, get_mem_index(s));
  1316. } else { /* WSTRB */
  1317. tcg_gen_extrl_i64_i32(tmp, cpu_M0);
  1318. gen_aa32_st8(s, tmp, addr, get_mem_index(s));
  1319. }
  1320. }
  1321. }
  1322. tcg_temp_free_i32(tmp);
  1323. }
  1324. tcg_temp_free_i32(addr);
  1325. return 0;
  1326. }
  1327. if ((insn & 0x0f000000) != 0x0e000000)
  1328. return 1;
  1329. switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
  1330. case 0x000: /* WOR */
  1331. wrd = (insn >> 12) & 0xf;
  1332. rd0 = (insn >> 0) & 0xf;
  1333. rd1 = (insn >> 16) & 0xf;
  1334. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1335. gen_op_iwmmxt_orq_M0_wRn(rd1);
  1336. gen_op_iwmmxt_setpsr_nz();
  1337. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1338. gen_op_iwmmxt_set_mup();
  1339. gen_op_iwmmxt_set_cup();
  1340. break;
  1341. case 0x011: /* TMCR */
  1342. if (insn & 0xf)
  1343. return 1;
  1344. rd = (insn >> 12) & 0xf;
  1345. wrd = (insn >> 16) & 0xf;
  1346. switch (wrd) {
  1347. case ARM_IWMMXT_wCID:
  1348. case ARM_IWMMXT_wCASF:
  1349. break;
  1350. case ARM_IWMMXT_wCon:
  1351. gen_op_iwmmxt_set_cup();
  1352. /* Fall through. */
  1353. case ARM_IWMMXT_wCSSF:
  1354. tmp = iwmmxt_load_creg(wrd);
  1355. tmp2 = load_reg(s, rd);
  1356. tcg_gen_andc_i32(tmp, tmp, tmp2);
  1357. tcg_temp_free_i32(tmp2);
  1358. iwmmxt_store_creg(wrd, tmp);
  1359. break;
  1360. case ARM_IWMMXT_wCGR0:
  1361. case ARM_IWMMXT_wCGR1:
  1362. case ARM_IWMMXT_wCGR2:
  1363. case ARM_IWMMXT_wCGR3:
  1364. gen_op_iwmmxt_set_cup();
  1365. tmp = load_reg(s, rd);
  1366. iwmmxt_store_creg(wrd, tmp);
  1367. break;
  1368. default:
  1369. return 1;
  1370. }
  1371. break;
  1372. case 0x100: /* WXOR */
  1373. wrd = (insn >> 12) & 0xf;
  1374. rd0 = (insn >> 0) & 0xf;
  1375. rd1 = (insn >> 16) & 0xf;
  1376. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1377. gen_op_iwmmxt_xorq_M0_wRn(rd1);
  1378. gen_op_iwmmxt_setpsr_nz();
  1379. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1380. gen_op_iwmmxt_set_mup();
  1381. gen_op_iwmmxt_set_cup();
  1382. break;
  1383. case 0x111: /* TMRC */
  1384. if (insn & 0xf)
  1385. return 1;
  1386. rd = (insn >> 12) & 0xf;
  1387. wrd = (insn >> 16) & 0xf;
  1388. tmp = iwmmxt_load_creg(wrd);
  1389. store_reg(s, rd, tmp);
  1390. break;
  1391. case 0x300: /* WANDN */
  1392. wrd = (insn >> 12) & 0xf;
  1393. rd0 = (insn >> 0) & 0xf;
  1394. rd1 = (insn >> 16) & 0xf;
  1395. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1396. tcg_gen_neg_i64(cpu_M0, cpu_M0);
  1397. gen_op_iwmmxt_andq_M0_wRn(rd1);
  1398. gen_op_iwmmxt_setpsr_nz();
  1399. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1400. gen_op_iwmmxt_set_mup();
  1401. gen_op_iwmmxt_set_cup();
  1402. break;
  1403. case 0x200: /* WAND */
  1404. wrd = (insn >> 12) & 0xf;
  1405. rd0 = (insn >> 0) & 0xf;
  1406. rd1 = (insn >> 16) & 0xf;
  1407. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1408. gen_op_iwmmxt_andq_M0_wRn(rd1);
  1409. gen_op_iwmmxt_setpsr_nz();
  1410. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1411. gen_op_iwmmxt_set_mup();
  1412. gen_op_iwmmxt_set_cup();
  1413. break;
  1414. case 0x810: case 0xa10: /* WMADD */
  1415. wrd = (insn >> 12) & 0xf;
  1416. rd0 = (insn >> 0) & 0xf;
  1417. rd1 = (insn >> 16) & 0xf;
  1418. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1419. if (insn & (1 << 21))
  1420. gen_op_iwmmxt_maddsq_M0_wRn(rd1);
  1421. else
  1422. gen_op_iwmmxt_madduq_M0_wRn(rd1);
  1423. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1424. gen_op_iwmmxt_set_mup();
  1425. break;
  1426. case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
  1427. wrd = (insn >> 12) & 0xf;
  1428. rd0 = (insn >> 16) & 0xf;
  1429. rd1 = (insn >> 0) & 0xf;
  1430. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1431. switch ((insn >> 22) & 3) {
  1432. case 0:
  1433. gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
  1434. break;
  1435. case 1:
  1436. gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
  1437. break;
  1438. case 2:
  1439. gen_op_iwmmxt_unpackll_M0_wRn(rd1);
  1440. break;
  1441. case 3:
  1442. return 1;
  1443. }
  1444. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1445. gen_op_iwmmxt_set_mup();
  1446. gen_op_iwmmxt_set_cup();
  1447. break;
  1448. case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
  1449. wrd = (insn >> 12) & 0xf;
  1450. rd0 = (insn >> 16) & 0xf;
  1451. rd1 = (insn >> 0) & 0xf;
  1452. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1453. switch ((insn >> 22) & 3) {
  1454. case 0:
  1455. gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
  1456. break;
  1457. case 1:
  1458. gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
  1459. break;
  1460. case 2:
  1461. gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
  1462. break;
  1463. case 3:
  1464. return 1;
  1465. }
  1466. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1467. gen_op_iwmmxt_set_mup();
  1468. gen_op_iwmmxt_set_cup();
  1469. break;
  1470. case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
  1471. wrd = (insn >> 12) & 0xf;
  1472. rd0 = (insn >> 16) & 0xf;
  1473. rd1 = (insn >> 0) & 0xf;
  1474. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1475. if (insn & (1 << 22))
  1476. gen_op_iwmmxt_sadw_M0_wRn(rd1);
  1477. else
  1478. gen_op_iwmmxt_sadb_M0_wRn(rd1);
  1479. if (!(insn & (1 << 20)))
  1480. gen_op_iwmmxt_addl_M0_wRn(wrd);
  1481. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1482. gen_op_iwmmxt_set_mup();
  1483. break;
  1484. case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
  1485. wrd = (insn >> 12) & 0xf;
  1486. rd0 = (insn >> 16) & 0xf;
  1487. rd1 = (insn >> 0) & 0xf;
  1488. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1489. if (insn & (1 << 21)) {
  1490. if (insn & (1 << 20))
  1491. gen_op_iwmmxt_mulshw_M0_wRn(rd1);
  1492. else
  1493. gen_op_iwmmxt_mulslw_M0_wRn(rd1);
  1494. } else {
  1495. if (insn & (1 << 20))
  1496. gen_op_iwmmxt_muluhw_M0_wRn(rd1);
  1497. else
  1498. gen_op_iwmmxt_mululw_M0_wRn(rd1);
  1499. }
  1500. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1501. gen_op_iwmmxt_set_mup();
  1502. break;
  1503. case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
  1504. wrd = (insn >> 12) & 0xf;
  1505. rd0 = (insn >> 16) & 0xf;
  1506. rd1 = (insn >> 0) & 0xf;
  1507. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1508. if (insn & (1 << 21))
  1509. gen_op_iwmmxt_macsw_M0_wRn(rd1);
  1510. else
  1511. gen_op_iwmmxt_macuw_M0_wRn(rd1);
  1512. if (!(insn & (1 << 20))) {
  1513. iwmmxt_load_reg(cpu_V1, wrd);
  1514. tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
  1515. }
  1516. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1517. gen_op_iwmmxt_set_mup();
  1518. break;
  1519. case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
  1520. wrd = (insn >> 12) & 0xf;
  1521. rd0 = (insn >> 16) & 0xf;
  1522. rd1 = (insn >> 0) & 0xf;
  1523. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1524. switch ((insn >> 22) & 3) {
  1525. case 0:
  1526. gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
  1527. break;
  1528. case 1:
  1529. gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
  1530. break;
  1531. case 2:
  1532. gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
  1533. break;
  1534. case 3:
  1535. return 1;
  1536. }
  1537. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1538. gen_op_iwmmxt_set_mup();
  1539. gen_op_iwmmxt_set_cup();
  1540. break;
  1541. case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
  1542. wrd = (insn >> 12) & 0xf;
  1543. rd0 = (insn >> 16) & 0xf;
  1544. rd1 = (insn >> 0) & 0xf;
  1545. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1546. if (insn & (1 << 22)) {
  1547. if (insn & (1 << 20))
  1548. gen_op_iwmmxt_avgw1_M0_wRn(rd1);
  1549. else
  1550. gen_op_iwmmxt_avgw0_M0_wRn(rd1);
  1551. } else {
  1552. if (insn & (1 << 20))
  1553. gen_op_iwmmxt_avgb1_M0_wRn(rd1);
  1554. else
  1555. gen_op_iwmmxt_avgb0_M0_wRn(rd1);
  1556. }
  1557. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1558. gen_op_iwmmxt_set_mup();
  1559. gen_op_iwmmxt_set_cup();
  1560. break;
  1561. case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
  1562. wrd = (insn >> 12) & 0xf;
  1563. rd0 = (insn >> 16) & 0xf;
  1564. rd1 = (insn >> 0) & 0xf;
  1565. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1566. tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
  1567. tcg_gen_andi_i32(tmp, tmp, 7);
  1568. iwmmxt_load_reg(cpu_V1, rd1);
  1569. gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
  1570. tcg_temp_free_i32(tmp);
  1571. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1572. gen_op_iwmmxt_set_mup();
  1573. break;
  1574. case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
  1575. if (((insn >> 6) & 3) == 3)
  1576. return 1;
  1577. rd = (insn >> 12) & 0xf;
  1578. wrd = (insn >> 16) & 0xf;
  1579. tmp = load_reg(s, rd);
  1580. gen_op_iwmmxt_movq_M0_wRn(wrd);
  1581. switch ((insn >> 6) & 3) {
  1582. case 0:
  1583. tmp2 = tcg_const_i32(0xff);
  1584. tmp3 = tcg_const_i32((insn & 7) << 3);
  1585. break;
  1586. case 1:
  1587. tmp2 = tcg_const_i32(0xffff);
  1588. tmp3 = tcg_const_i32((insn & 3) << 4);
  1589. break;
  1590. case 2:
  1591. tmp2 = tcg_const_i32(0xffffffff);
  1592. tmp3 = tcg_const_i32((insn & 1) << 5);
  1593. break;
  1594. default:
  1595. tmp2 = NULL;
  1596. tmp3 = NULL;
  1597. }
  1598. gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
  1599. tcg_temp_free_i32(tmp3);
  1600. tcg_temp_free_i32(tmp2);
  1601. tcg_temp_free_i32(tmp);
  1602. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1603. gen_op_iwmmxt_set_mup();
  1604. break;
  1605. case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
  1606. rd = (insn >> 12) & 0xf;
  1607. wrd = (insn >> 16) & 0xf;
  1608. if (rd == 15 || ((insn >> 22) & 3) == 3)
  1609. return 1;
  1610. gen_op_iwmmxt_movq_M0_wRn(wrd);
  1611. tmp = tcg_temp_new_i32();
  1612. switch ((insn >> 22) & 3) {
  1613. case 0:
  1614. tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
  1615. tcg_gen_extrl_i64_i32(tmp, cpu_M0);
  1616. if (insn & 8) {
  1617. tcg_gen_ext8s_i32(tmp, tmp);
  1618. } else {
  1619. tcg_gen_andi_i32(tmp, tmp, 0xff);
  1620. }
  1621. break;
  1622. case 1:
  1623. tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
  1624. tcg_gen_extrl_i64_i32(tmp, cpu_M0);
  1625. if (insn & 8) {
  1626. tcg_gen_ext16s_i32(tmp, tmp);
  1627. } else {
  1628. tcg_gen_andi_i32(tmp, tmp, 0xffff);
  1629. }
  1630. break;
  1631. case 2:
  1632. tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
  1633. tcg_gen_extrl_i64_i32(tmp, cpu_M0);
  1634. break;
  1635. }
  1636. store_reg(s, rd, tmp);
  1637. break;
  1638. case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
  1639. if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
  1640. return 1;
  1641. tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
  1642. switch ((insn >> 22) & 3) {
  1643. case 0:
  1644. tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
  1645. break;
  1646. case 1:
  1647. tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
  1648. break;
  1649. case 2:
  1650. tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
  1651. break;
  1652. }
  1653. tcg_gen_shli_i32(tmp, tmp, 28);
  1654. gen_set_nzcv(tmp);
  1655. tcg_temp_free_i32(tmp);
  1656. break;
  1657. case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
  1658. if (((insn >> 6) & 3) == 3)
  1659. return 1;
  1660. rd = (insn >> 12) & 0xf;
  1661. wrd = (insn >> 16) & 0xf;
  1662. tmp = load_reg(s, rd);
  1663. switch ((insn >> 6) & 3) {
  1664. case 0:
  1665. gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
  1666. break;
  1667. case 1:
  1668. gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
  1669. break;
  1670. case 2:
  1671. gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
  1672. break;
  1673. }
  1674. tcg_temp_free_i32(tmp);
  1675. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1676. gen_op_iwmmxt_set_mup();
  1677. break;
  1678. case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
  1679. if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
  1680. return 1;
  1681. tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
  1682. tmp2 = tcg_temp_new_i32();
  1683. tcg_gen_mov_i32(tmp2, tmp);
  1684. switch ((insn >> 22) & 3) {
  1685. case 0:
  1686. for (i = 0; i < 7; i ++) {
  1687. tcg_gen_shli_i32(tmp2, tmp2, 4);
  1688. tcg_gen_and_i32(tmp, tmp, tmp2);
  1689. }
  1690. break;
  1691. case 1:
  1692. for (i = 0; i < 3; i ++) {
  1693. tcg_gen_shli_i32(tmp2, tmp2, 8);
  1694. tcg_gen_and_i32(tmp, tmp, tmp2);
  1695. }
  1696. break;
  1697. case 2:
  1698. tcg_gen_shli_i32(tmp2, tmp2, 16);
  1699. tcg_gen_and_i32(tmp, tmp, tmp2);
  1700. break;
  1701. }
  1702. gen_set_nzcv(tmp);
  1703. tcg_temp_free_i32(tmp2);
  1704. tcg_temp_free_i32(tmp);
  1705. break;
  1706. case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
  1707. wrd = (insn >> 12) & 0xf;
  1708. rd0 = (insn >> 16) & 0xf;
  1709. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1710. switch ((insn >> 22) & 3) {
  1711. case 0:
  1712. gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
  1713. break;
  1714. case 1:
  1715. gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
  1716. break;
  1717. case 2:
  1718. gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
  1719. break;
  1720. case 3:
  1721. return 1;
  1722. }
  1723. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1724. gen_op_iwmmxt_set_mup();
  1725. break;
  1726. case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
  1727. if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
  1728. return 1;
  1729. tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
  1730. tmp2 = tcg_temp_new_i32();
  1731. tcg_gen_mov_i32(tmp2, tmp);
  1732. switch ((insn >> 22) & 3) {
  1733. case 0:
  1734. for (i = 0; i < 7; i ++) {
  1735. tcg_gen_shli_i32(tmp2, tmp2, 4);
  1736. tcg_gen_or_i32(tmp, tmp, tmp2);
  1737. }
  1738. break;
  1739. case 1:
  1740. for (i = 0; i < 3; i ++) {
  1741. tcg_gen_shli_i32(tmp2, tmp2, 8);
  1742. tcg_gen_or_i32(tmp, tmp, tmp2);
  1743. }
  1744. break;
  1745. case 2:
  1746. tcg_gen_shli_i32(tmp2, tmp2, 16);
  1747. tcg_gen_or_i32(tmp, tmp, tmp2);
  1748. break;
  1749. }
  1750. gen_set_nzcv(tmp);
  1751. tcg_temp_free_i32(tmp2);
  1752. tcg_temp_free_i32(tmp);
  1753. break;
  1754. case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
  1755. rd = (insn >> 12) & 0xf;
  1756. rd0 = (insn >> 16) & 0xf;
  1757. if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
  1758. return 1;
  1759. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1760. tmp = tcg_temp_new_i32();
  1761. switch ((insn >> 22) & 3) {
  1762. case 0:
  1763. gen_helper_iwmmxt_msbb(tmp, cpu_M0);
  1764. break;
  1765. case 1:
  1766. gen_helper_iwmmxt_msbw(tmp, cpu_M0);
  1767. break;
  1768. case 2:
  1769. gen_helper_iwmmxt_msbl(tmp, cpu_M0);
  1770. break;
  1771. }
  1772. store_reg(s, rd, tmp);
  1773. break;
  1774. case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
  1775. case 0x906: case 0xb06: case 0xd06: case 0xf06:
  1776. wrd = (insn >> 12) & 0xf;
  1777. rd0 = (insn >> 16) & 0xf;
  1778. rd1 = (insn >> 0) & 0xf;
  1779. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1780. switch ((insn >> 22) & 3) {
  1781. case 0:
  1782. if (insn & (1 << 21))
  1783. gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
  1784. else
  1785. gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
  1786. break;
  1787. case 1:
  1788. if (insn & (1 << 21))
  1789. gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
  1790. else
  1791. gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
  1792. break;
  1793. case 2:
  1794. if (insn & (1 << 21))
  1795. gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
  1796. else
  1797. gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
  1798. break;
  1799. case 3:
  1800. return 1;
  1801. }
  1802. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1803. gen_op_iwmmxt_set_mup();
  1804. gen_op_iwmmxt_set_cup();
  1805. break;
  1806. case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
  1807. case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
  1808. wrd = (insn >> 12) & 0xf;
  1809. rd0 = (insn >> 16) & 0xf;
  1810. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1811. switch ((insn >> 22) & 3) {
  1812. case 0:
  1813. if (insn & (1 << 21))
  1814. gen_op_iwmmxt_unpacklsb_M0();
  1815. else
  1816. gen_op_iwmmxt_unpacklub_M0();
  1817. break;
  1818. case 1:
  1819. if (insn & (1 << 21))
  1820. gen_op_iwmmxt_unpacklsw_M0();
  1821. else
  1822. gen_op_iwmmxt_unpackluw_M0();
  1823. break;
  1824. case 2:
  1825. if (insn & (1 << 21))
  1826. gen_op_iwmmxt_unpacklsl_M0();
  1827. else
  1828. gen_op_iwmmxt_unpacklul_M0();
  1829. break;
  1830. case 3:
  1831. return 1;
  1832. }
  1833. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1834. gen_op_iwmmxt_set_mup();
  1835. gen_op_iwmmxt_set_cup();
  1836. break;
  1837. case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
  1838. case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
  1839. wrd = (insn >> 12) & 0xf;
  1840. rd0 = (insn >> 16) & 0xf;
  1841. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1842. switch ((insn >> 22) & 3) {
  1843. case 0:
  1844. if (insn & (1 << 21))
  1845. gen_op_iwmmxt_unpackhsb_M0();
  1846. else
  1847. gen_op_iwmmxt_unpackhub_M0();
  1848. break;
  1849. case 1:
  1850. if (insn & (1 << 21))
  1851. gen_op_iwmmxt_unpackhsw_M0();
  1852. else
  1853. gen_op_iwmmxt_unpackhuw_M0();
  1854. break;
  1855. case 2:
  1856. if (insn & (1 << 21))
  1857. gen_op_iwmmxt_unpackhsl_M0();
  1858. else
  1859. gen_op_iwmmxt_unpackhul_M0();
  1860. break;
  1861. case 3:
  1862. return 1;
  1863. }
  1864. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1865. gen_op_iwmmxt_set_mup();
  1866. gen_op_iwmmxt_set_cup();
  1867. break;
  1868. case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
  1869. case 0x214: case 0x614: case 0xa14: case 0xe14:
  1870. if (((insn >> 22) & 3) == 0)
  1871. return 1;
  1872. wrd = (insn >> 12) & 0xf;
  1873. rd0 = (insn >> 16) & 0xf;
  1874. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1875. tmp = tcg_temp_new_i32();
  1876. if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
  1877. tcg_temp_free_i32(tmp);
  1878. return 1;
  1879. }
  1880. switch ((insn >> 22) & 3) {
  1881. case 1:
  1882. gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
  1883. break;
  1884. case 2:
  1885. gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
  1886. break;
  1887. case 3:
  1888. gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
  1889. break;
  1890. }
  1891. tcg_temp_free_i32(tmp);
  1892. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1893. gen_op_iwmmxt_set_mup();
  1894. gen_op_iwmmxt_set_cup();
  1895. break;
  1896. case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
  1897. case 0x014: case 0x414: case 0x814: case 0xc14:
  1898. if (((insn >> 22) & 3) == 0)
  1899. return 1;
  1900. wrd = (insn >> 12) & 0xf;
  1901. rd0 = (insn >> 16) & 0xf;
  1902. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1903. tmp = tcg_temp_new_i32();
  1904. if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
  1905. tcg_temp_free_i32(tmp);
  1906. return 1;
  1907. }
  1908. switch ((insn >> 22) & 3) {
  1909. case 1:
  1910. gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
  1911. break;
  1912. case 2:
  1913. gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
  1914. break;
  1915. case 3:
  1916. gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
  1917. break;
  1918. }
  1919. tcg_temp_free_i32(tmp);
  1920. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1921. gen_op_iwmmxt_set_mup();
  1922. gen_op_iwmmxt_set_cup();
  1923. break;
  1924. case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
  1925. case 0x114: case 0x514: case 0x914: case 0xd14:
  1926. if (((insn >> 22) & 3) == 0)
  1927. return 1;
  1928. wrd = (insn >> 12) & 0xf;
  1929. rd0 = (insn >> 16) & 0xf;
  1930. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1931. tmp = tcg_temp_new_i32();
  1932. if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
  1933. tcg_temp_free_i32(tmp);
  1934. return 1;
  1935. }
  1936. switch ((insn >> 22) & 3) {
  1937. case 1:
  1938. gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
  1939. break;
  1940. case 2:
  1941. gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
  1942. break;
  1943. case 3:
  1944. gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
  1945. break;
  1946. }
  1947. tcg_temp_free_i32(tmp);
  1948. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1949. gen_op_iwmmxt_set_mup();
  1950. gen_op_iwmmxt_set_cup();
  1951. break;
  1952. case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
  1953. case 0x314: case 0x714: case 0xb14: case 0xf14:
  1954. if (((insn >> 22) & 3) == 0)
  1955. return 1;
  1956. wrd = (insn >> 12) & 0xf;
  1957. rd0 = (insn >> 16) & 0xf;
  1958. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1959. tmp = tcg_temp_new_i32();
  1960. switch ((insn >> 22) & 3) {
  1961. case 1:
  1962. if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
  1963. tcg_temp_free_i32(tmp);
  1964. return 1;
  1965. }
  1966. gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
  1967. break;
  1968. case 2:
  1969. if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
  1970. tcg_temp_free_i32(tmp);
  1971. return 1;
  1972. }
  1973. gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
  1974. break;
  1975. case 3:
  1976. if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
  1977. tcg_temp_free_i32(tmp);
  1978. return 1;
  1979. }
  1980. gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
  1981. break;
  1982. }
  1983. tcg_temp_free_i32(tmp);
  1984. gen_op_iwmmxt_movq_wRn_M0(wrd);
  1985. gen_op_iwmmxt_set_mup();
  1986. gen_op_iwmmxt_set_cup();
  1987. break;
  1988. case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
  1989. case 0x916: case 0xb16: case 0xd16: case 0xf16:
  1990. wrd = (insn >> 12) & 0xf;
  1991. rd0 = (insn >> 16) & 0xf;
  1992. rd1 = (insn >> 0) & 0xf;
  1993. gen_op_iwmmxt_movq_M0_wRn(rd0);
  1994. switch ((insn >> 22) & 3) {
  1995. case 0:
  1996. if (insn & (1 << 21))
  1997. gen_op_iwmmxt_minsb_M0_wRn(rd1);
  1998. else
  1999. gen_op_iwmmxt_minub_M0_wRn(rd1);
  2000. break;
  2001. case 1:
  2002. if (insn & (1 << 21))
  2003. gen_op_iwmmxt_minsw_M0_wRn(rd1);
  2004. else
  2005. gen_op_iwmmxt_minuw_M0_wRn(rd1);
  2006. break;
  2007. case 2:
  2008. if (insn & (1 << 21))
  2009. gen_op_iwmmxt_minsl_M0_wRn(rd1);
  2010. else
  2011. gen_op_iwmmxt_minul_M0_wRn(rd1);
  2012. break;
  2013. case 3:
  2014. return 1;
  2015. }
  2016. gen_op_iwmmxt_movq_wRn_M0(wrd);
  2017. gen_op_iwmmxt_set_mup();
  2018. break;
  2019. case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
  2020. case 0x816: case 0xa16: case 0xc16: case 0xe16:
  2021. wrd = (insn >> 12) & 0xf;
  2022. rd0 = (insn >> 16) & 0xf;
  2023. rd1 = (insn >> 0) & 0xf;
  2024. gen_op_iwmmxt_movq_M0_wRn(rd0);
  2025. switch ((insn >> 22) & 3) {
  2026. case 0:
  2027. if (insn & (1 << 21))
  2028. gen_op_iwmmxt_maxsb_M0_wRn(rd1);
  2029. else
  2030. gen_op_iwmmxt_maxub_M0_wRn(rd1);
  2031. break;
  2032. case 1:
  2033. if (insn & (1 << 21))
  2034. gen_op_iwmmxt_maxsw_M0_wRn(rd1);
  2035. else
  2036. gen_op_iwmmxt_maxuw_M0_wRn(rd1);
  2037. break;
  2038. case 2:
  2039. if (insn & (1 << 21))
  2040. gen_op_iwmmxt_maxsl_M0_wRn(rd1);
  2041. else
  2042. gen_op_iwmmxt_maxul_M0_wRn(rd1);
  2043. break;
  2044. case 3:
  2045. return 1;
  2046. }
  2047. gen_op_iwmmxt_movq_wRn_M0(wrd);
  2048. gen_op_iwmmxt_set_mup();
  2049. break;
  2050. case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
  2051. case 0x402: case 0x502: case 0x602: case 0x702:
  2052. wrd = (insn >> 12) & 0xf;
  2053. rd0 = (insn >> 16) & 0xf;
  2054. rd1 = (insn >> 0) & 0xf;
  2055. gen_op_iwmmxt_movq_M0_wRn(rd0);
  2056. tmp = tcg_const_i32((insn >> 20) & 3);
  2057. iwmmxt_load_reg(cpu_V1, rd1);
  2058. gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
  2059. tcg_temp_free_i32(tmp);
  2060. gen_op_iwmmxt_movq_wRn_M0(wrd);
  2061. gen_op_iwmmxt_set_mup();
  2062. break;
  2063. case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
  2064. case 0x41a: case 0x51a: case 0x61a: case 0x71a:
  2065. case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
  2066. case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
  2067. wrd = (insn >> 12) & 0xf;
  2068. rd0 = (insn >> 16) & 0xf;
  2069. rd1 = (insn >> 0) & 0xf;
  2070. gen_op_iwmmxt_movq_M0_wRn(rd0);
  2071. switch ((insn >> 20) & 0xf) {
  2072. case 0x0:
  2073. gen_op_iwmmxt_subnb_M0_wRn(rd1);
  2074. break;
  2075. case 0x1:
  2076. gen_op_iwmmxt_subub_M0_wRn(rd1);
  2077. break;
  2078. case 0x3:
  2079. gen_op_iwmmxt_subsb_M0_wRn(rd1);
  2080. break;
  2081. case 0x4:
  2082. gen_op_iwmmxt_subnw_M0_wRn(rd1);
  2083. break;
  2084. case 0x5:
  2085. gen_op_iwmmxt_subuw_M0_wRn(rd1);
  2086. break;
  2087. case 0x7:
  2088. gen_op_iwmmxt_subsw_M0_wRn(rd1);
  2089. break;
  2090. case 0x8:
  2091. gen_op_iwmmxt_subnl_M0_wRn(rd1);
  2092. break;
  2093. case 0x9:
  2094. gen_op_iwmmxt_subul_M0_wRn(rd1);
  2095. break;
  2096. case 0xb:
  2097. gen_op_iwmmxt_subsl_M0_wRn(rd1);
  2098. break;
  2099. default:
  2100. return 1;
  2101. }
  2102. gen_op_iwmmxt_movq_wRn_M0(wrd);
  2103. gen_op_iwmmxt_set_mup();
  2104. gen_op_iwmmxt_set_cup();
  2105. break;
  2106. case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
  2107. case 0x41e: case 0x51e: case 0x61e: case 0x71e:
  2108. case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
  2109. case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
  2110. wrd = (insn >> 12) & 0xf;
  2111. rd0 = (insn >> 16) & 0xf;
  2112. gen_op_iwmmxt_movq_M0_wRn(rd0);
  2113. tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
  2114. gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
  2115. tcg_temp_free_i32(tmp);
  2116. gen_op_iwmmxt_movq_wRn_M0(wrd);
  2117. gen_op_iwmmxt_set_mup();
  2118. gen_op_iwmmxt_set_cup();
  2119. break;
  2120. case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
  2121. case 0x418: case 0x518: case 0x618: case 0x718:
  2122. case 0x818: case 0x918: case 0xa18: case 0xb18:
  2123. case 0xc18: case 0xd18: case 0xe18: case 0xf18:
  2124. wrd = (insn >> 12) & 0xf;
  2125. rd0 = (insn >> 16) & 0xf;
  2126. rd1 = (insn >> 0) & 0xf;
  2127. gen_op_iwmmxt_movq_M0_wRn(rd0);
  2128. switch ((insn >> 20) & 0xf) {
  2129. case 0x0:
  2130. gen_op_iwmmxt_addnb_M0_wRn(rd1);
  2131. break;
  2132. case 0x1:
  2133. gen_op_iwmmxt_addub_M0_wRn(rd1);
  2134. break;
  2135. case 0x3:
  2136. gen_op_iwmmxt_addsb_M0_wRn(rd1);
  2137. break;
  2138. case 0x4:
  2139. gen_op_iwmmxt_addnw_M0_wRn(rd1);
  2140. break;
  2141. case 0x5:
  2142. gen_op_iwmmxt_adduw_M0_wRn(rd1);
  2143. break;
  2144. case 0x7:
  2145. gen_op_iwmmxt_addsw_M0_wRn(rd1);
  2146. break;
  2147. case 0x8:
  2148. gen_op_iwmmxt_addnl_M0_wRn(rd1);
  2149. break;
  2150. case 0x9:
  2151. gen_op_iwmmxt_addul_M0_wRn(rd1);
  2152. break;
  2153. case 0xb:
  2154. gen_op_iwmmxt_addsl_M0_wRn(rd1);
  2155. break;
  2156. default:
  2157. return 1;
  2158. }
  2159. gen_op_iwmmxt_movq_wRn_M0(wrd);
  2160. gen_op_iwmmxt_set_mup();
  2161. gen_op_iwmmxt_set_cup();
  2162. break;
  2163. case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
  2164. case 0x408: case 0x508: case 0x608: case 0x708:
  2165. case 0x808: case 0x908: case 0xa08: case 0xb08:
  2166. case 0xc08: case 0xd08: case 0xe08: case 0xf08:
  2167. if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
  2168. return 1;
  2169. wrd = (insn >> 12) & 0xf;
  2170. rd0 = (insn >> 16) & 0xf;
  2171. rd1 = (insn >> 0) & 0xf;
  2172. gen_op_iwmmxt_movq_M0_wRn(rd0);
  2173. switch ((insn >> 22) & 3) {
  2174. case 1:
  2175. if (insn & (1 << 21))
  2176. gen_op_iwmmxt_packsw_M0_wRn(rd1);
  2177. else
  2178. gen_op_iwmmxt_packuw_M0_wRn(rd1);
  2179. break;
  2180. case 2:
  2181. if (insn & (1 << 21))
  2182. gen_op_iwmmxt_packsl_M0_wRn(rd1);
  2183. else
  2184. gen_op_iwmmxt_packul_M0_wRn(rd1);
  2185. break;
  2186. case 3:
  2187. if (insn & (1 << 21))
  2188. gen_op_iwmmxt_packsq_M0_wRn(rd1);
  2189. else
  2190. gen_op_iwmmxt_packuq_M0_wRn(rd1);
  2191. break;
  2192. }
  2193. gen_op_iwmmxt_movq_wRn_M0(wrd);
  2194. gen_op_iwmmxt_set_mup();
  2195. gen_op_iwmmxt_set_cup();
  2196. break;
  2197. case 0x201: case 0x203: case 0x205: case 0x207:
  2198. case 0x209: case 0x20b: case 0x20d: case 0x20f:
  2199. case 0x211: case 0x213: case 0x215: case 0x217:
  2200. case 0x219: case 0x21b: case 0x21d: case 0x21f:
  2201. wrd = (insn >> 5) & 0xf;
  2202. rd0 = (insn >> 12) & 0xf;
  2203. rd1 = (insn >> 0) & 0xf;
  2204. if (rd0 == 0xf || rd1 == 0xf)
  2205. return 1;
  2206. gen_op_iwmmxt_movq_M0_wRn(wrd);
  2207. tmp = load_reg(s, rd0);
  2208. tmp2 = load_reg(s, rd1);
  2209. switch ((insn >> 16) & 0xf) {
  2210. case 0x0: /* TMIA */
  2211. gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
  2212. break;
  2213. case 0x8: /* TMIAPH */
  2214. gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
  2215. break;
  2216. case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
  2217. if (insn & (1 << 16))
  2218. tcg_gen_shri_i32(tmp, tmp, 16);
  2219. if (insn & (1 << 17))
  2220. tcg_gen_shri_i32(tmp2, tmp2, 16);
  2221. gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
  2222. break;
  2223. default:
  2224. tcg_temp_free_i32(tmp2);
  2225. tcg_temp_free_i32(tmp);
  2226. return 1;
  2227. }
  2228. tcg_temp_free_i32(tmp2);
  2229. tcg_temp_free_i32(tmp);
  2230. gen_op_iwmmxt_movq_wRn_M0(wrd);
  2231. gen_op_iwmmxt_set_mup();
  2232. break;
  2233. default:
  2234. return 1;
  2235. }
  2236. return 0;
  2237. }
  2238. /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
  2239. (ie. an undefined instruction). */
  2240. static int disas_dsp_insn(DisasContext *s, uint32_t insn)
  2241. {
  2242. int acc, rd0, rd1, rdhi, rdlo;
  2243. TCGv_i32 tmp, tmp2;
  2244. if ((insn & 0x0ff00f10) == 0x0e200010) {
  2245. /* Multiply with Internal Accumulate Format */
  2246. rd0 = (insn >> 12) & 0xf;
  2247. rd1 = insn & 0xf;
  2248. acc = (insn >> 5) & 7;
  2249. if (acc != 0)
  2250. return 1;
  2251. tmp = load_reg(s, rd0);
  2252. tmp2 = load_reg(s, rd1);
  2253. switch ((insn >> 16) & 0xf) {
  2254. case 0x0: /* MIA */
  2255. gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
  2256. break;
  2257. case 0x8: /* MIAPH */
  2258. gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
  2259. break;
  2260. case 0xc: /* MIABB */
  2261. case 0xd: /* MIABT */
  2262. case 0xe: /* MIATB */
  2263. case 0xf: /* MIATT */
  2264. if (insn & (1 << 16))
  2265. tcg_gen_shri_i32(tmp, tmp, 16);
  2266. if (insn & (1 << 17))
  2267. tcg_gen_shri_i32(tmp2, tmp2, 16);
  2268. gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
  2269. break;
  2270. default:
  2271. return 1;
  2272. }
  2273. tcg_temp_free_i32(tmp2);
  2274. tcg_temp_free_i32(tmp);
  2275. gen_op_iwmmxt_movq_wRn_M0(acc);
  2276. return 0;
  2277. }
  2278. if ((insn & 0x0fe00ff8) == 0x0c400000) {
  2279. /* Internal Accumulator Access Format */
  2280. rdhi = (insn >> 16) & 0xf;
  2281. rdlo = (insn >> 12) & 0xf;
  2282. acc = insn & 7;
  2283. if (acc != 0)
  2284. return 1;
  2285. if (insn & ARM_CP_RW_BIT) { /* MRA */
  2286. iwmmxt_load_reg(cpu_V0, acc);
  2287. tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
  2288. tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
  2289. tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
  2290. } else { /* MAR */
  2291. tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
  2292. iwmmxt_store_reg(cpu_V0, acc);
  2293. }
  2294. return 0;
  2295. }
  2296. return 1;
  2297. }
  2298. #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
  2299. #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
  2300. if (dc_isar_feature(aa32_simd_r32, s)) { \
  2301. reg = (((insn) >> (bigbit)) & 0x0f) \
  2302. | (((insn) >> ((smallbit) - 4)) & 0x10); \
  2303. } else { \
  2304. if (insn & (1 << (smallbit))) \
  2305. return 1; \
  2306. reg = ((insn) >> (bigbit)) & 0x0f; \
  2307. }} while (0)
  2308. #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
  2309. #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
  2310. #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
  2311. static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
  2312. {
  2313. #ifndef CONFIG_USER_ONLY
  2314. return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
  2315. ((s->base.pc_next - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
  2316. #else
  2317. return true;
  2318. #endif
  2319. }
  2320. static void gen_goto_ptr(void)
  2321. {
  2322. tcg_gen_lookup_and_goto_ptr();
  2323. }
  2324. /* This will end the TB but doesn't guarantee we'll return to
  2325. * cpu_loop_exec. Any live exit_requests will be processed as we
  2326. * enter the next TB.
  2327. */
  2328. static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
  2329. {
  2330. if (use_goto_tb(s, dest)) {
  2331. tcg_gen_goto_tb(n);
  2332. gen_set_pc_im(s, dest);
  2333. tcg_gen_exit_tb(s->base.tb, n);
  2334. } else {
  2335. gen_set_pc_im(s, dest);
  2336. gen_goto_ptr();
  2337. }
  2338. s->base.is_jmp = DISAS_NORETURN;
  2339. }
  2340. static inline void gen_jmp (DisasContext *s, uint32_t dest)
  2341. {
  2342. if (unlikely(is_singlestepping(s))) {
  2343. /* An indirect jump so that we still trigger the debug exception. */
  2344. gen_set_pc_im(s, dest);
  2345. s->base.is_jmp = DISAS_JUMP;
  2346. } else {
  2347. gen_goto_tb(s, 0, dest);
  2348. }
  2349. }
  2350. static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
  2351. {
  2352. if (x)
  2353. tcg_gen_sari_i32(t0, t0, 16);
  2354. else
  2355. gen_sxth(t0);
  2356. if (y)
  2357. tcg_gen_sari_i32(t1, t1, 16);
  2358. else
  2359. gen_sxth(t1);
  2360. tcg_gen_mul_i32(t0, t0, t1);
  2361. }
  2362. /* Return the mask of PSR bits set by a MSR instruction. */
  2363. static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
  2364. {
  2365. uint32_t mask = 0;
  2366. if (flags & (1 << 0)) {
  2367. mask |= 0xff;
  2368. }
  2369. if (flags & (1 << 1)) {
  2370. mask |= 0xff00;
  2371. }
  2372. if (flags & (1 << 2)) {
  2373. mask |= 0xff0000;
  2374. }
  2375. if (flags & (1 << 3)) {
  2376. mask |= 0xff000000;
  2377. }
  2378. /* Mask out undefined and reserved bits. */
  2379. mask &= aarch32_cpsr_valid_mask(s->features, s->isar);
  2380. /* Mask out execution state. */
  2381. if (!spsr) {
  2382. mask &= ~CPSR_EXEC;
  2383. }
  2384. /* Mask out privileged bits. */
  2385. if (IS_USER(s)) {
  2386. mask &= CPSR_USER;
  2387. }
  2388. return mask;
  2389. }
  2390. /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
  2391. static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
  2392. {
  2393. TCGv_i32 tmp;
  2394. if (spsr) {
  2395. /* ??? This is also undefined in system mode. */
  2396. if (IS_USER(s))
  2397. return 1;
  2398. tmp = load_cpu_field(spsr);
  2399. tcg_gen_andi_i32(tmp, tmp, ~mask);
  2400. tcg_gen_andi_i32(t0, t0, mask);
  2401. tcg_gen_or_i32(tmp, tmp, t0);
  2402. store_cpu_field(tmp, spsr);
  2403. } else {
  2404. gen_set_cpsr(t0, mask);
  2405. }
  2406. tcg_temp_free_i32(t0);
  2407. gen_lookup_tb(s);
  2408. return 0;
  2409. }
  2410. /* Returns nonzero if access to the PSR is not permitted. */
  2411. static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
  2412. {
  2413. TCGv_i32 tmp;
  2414. tmp = tcg_temp_new_i32();
  2415. tcg_gen_movi_i32(tmp, val);
  2416. return gen_set_psr(s, mask, spsr, tmp);
  2417. }
  2418. static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
  2419. int *tgtmode, int *regno)
  2420. {
  2421. /* Decode the r and sysm fields of MSR/MRS banked accesses into
  2422. * the target mode and register number, and identify the various
  2423. * unpredictable cases.
  2424. * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
  2425. * + executed in user mode
  2426. * + using R15 as the src/dest register
  2427. * + accessing an unimplemented register
  2428. * + accessing a register that's inaccessible at current PL/security state*
  2429. * + accessing a register that you could access with a different insn
  2430. * We choose to UNDEF in all these cases.
  2431. * Since we don't know which of the various AArch32 modes we are in
  2432. * we have to defer some checks to runtime.
  2433. * Accesses to Monitor mode registers from Secure EL1 (which implies
  2434. * that EL3 is AArch64) must trap to EL3.
  2435. *
  2436. * If the access checks fail this function will emit code to take
  2437. * an exception and return false. Otherwise it will return true,
  2438. * and set *tgtmode and *regno appropriately.
  2439. */
  2440. int exc_target = default_exception_el(s);
  2441. /* These instructions are present only in ARMv8, or in ARMv7 with the
  2442. * Virtualization Extensions.
  2443. */
  2444. if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
  2445. !arm_dc_feature(s, ARM_FEATURE_EL2)) {
  2446. goto undef;
  2447. }
  2448. if (IS_USER(s) || rn == 15) {
  2449. goto undef;
  2450. }
  2451. /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
  2452. * of registers into (r, sysm).
  2453. */
  2454. if (r) {
  2455. /* SPSRs for other modes */
  2456. switch (sysm) {
  2457. case 0xe: /* SPSR_fiq */
  2458. *tgtmode = ARM_CPU_MODE_FIQ;
  2459. break;
  2460. case 0x10: /* SPSR_irq */
  2461. *tgtmode = ARM_CPU_MODE_IRQ;
  2462. break;
  2463. case 0x12: /* SPSR_svc */
  2464. *tgtmode = ARM_CPU_MODE_SVC;
  2465. break;
  2466. case 0x14: /* SPSR_abt */
  2467. *tgtmode = ARM_CPU_MODE_ABT;
  2468. break;
  2469. case 0x16: /* SPSR_und */
  2470. *tgtmode = ARM_CPU_MODE_UND;
  2471. break;
  2472. case 0x1c: /* SPSR_mon */
  2473. *tgtmode = ARM_CPU_MODE_MON;
  2474. break;
  2475. case 0x1e: /* SPSR_hyp */
  2476. *tgtmode = ARM_CPU_MODE_HYP;
  2477. break;
  2478. default: /* unallocated */
  2479. goto undef;
  2480. }
  2481. /* We arbitrarily assign SPSR a register number of 16. */
  2482. *regno = 16;
  2483. } else {
  2484. /* general purpose registers for other modes */
  2485. switch (sysm) {
  2486. case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
  2487. *tgtmode = ARM_CPU_MODE_USR;
  2488. *regno = sysm + 8;
  2489. break;
  2490. case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
  2491. *tgtmode = ARM_CPU_MODE_FIQ;
  2492. *regno = sysm;
  2493. break;
  2494. case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
  2495. *tgtmode = ARM_CPU_MODE_IRQ;
  2496. *regno = sysm & 1 ? 13 : 14;
  2497. break;
  2498. case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
  2499. *tgtmode = ARM_CPU_MODE_SVC;
  2500. *regno = sysm & 1 ? 13 : 14;
  2501. break;
  2502. case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
  2503. *tgtmode = ARM_CPU_MODE_ABT;
  2504. *regno = sysm & 1 ? 13 : 14;
  2505. break;
  2506. case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
  2507. *tgtmode = ARM_CPU_MODE_UND;
  2508. *regno = sysm & 1 ? 13 : 14;
  2509. break;
  2510. case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
  2511. *tgtmode = ARM_CPU_MODE_MON;
  2512. *regno = sysm & 1 ? 13 : 14;
  2513. break;
  2514. case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
  2515. *tgtmode = ARM_CPU_MODE_HYP;
  2516. /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
  2517. *regno = sysm & 1 ? 13 : 17;
  2518. break;
  2519. default: /* unallocated */
  2520. goto undef;
  2521. }
  2522. }
  2523. /* Catch the 'accessing inaccessible register' cases we can detect
  2524. * at translate time.
  2525. */
  2526. switch (*tgtmode) {
  2527. case ARM_CPU_MODE_MON:
  2528. if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
  2529. goto undef;
  2530. }
  2531. if (s->current_el == 1) {
  2532. /* If we're in Secure EL1 (which implies that EL3 is AArch64)
  2533. * then accesses to Mon registers trap to EL3
  2534. */
  2535. exc_target = 3;
  2536. goto undef;
  2537. }
  2538. break;
  2539. case ARM_CPU_MODE_HYP:
  2540. /*
  2541. * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
  2542. * (and so we can forbid accesses from EL2 or below). elr_hyp
  2543. * can be accessed also from Hyp mode, so forbid accesses from
  2544. * EL0 or EL1.
  2545. */
  2546. if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 ||
  2547. (s->current_el < 3 && *regno != 17)) {
  2548. goto undef;
  2549. }
  2550. break;
  2551. default:
  2552. break;
  2553. }
  2554. return true;
  2555. undef:
  2556. /* If we get here then some access check did not pass */
  2557. gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
  2558. syn_uncategorized(), exc_target);
  2559. return false;
  2560. }
  2561. static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
  2562. {
  2563. TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
  2564. int tgtmode = 0, regno = 0;
  2565. if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
  2566. return;
  2567. }
  2568. /* Sync state because msr_banked() can raise exceptions */
  2569. gen_set_condexec(s);
  2570. gen_set_pc_im(s, s->pc_curr);
  2571. tcg_reg = load_reg(s, rn);
  2572. tcg_tgtmode = tcg_const_i32(tgtmode);
  2573. tcg_regno = tcg_const_i32(regno);
  2574. gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
  2575. tcg_temp_free_i32(tcg_tgtmode);
  2576. tcg_temp_free_i32(tcg_regno);
  2577. tcg_temp_free_i32(tcg_reg);
  2578. s->base.is_jmp = DISAS_UPDATE_EXIT;
  2579. }
  2580. static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
  2581. {
  2582. TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
  2583. int tgtmode = 0, regno = 0;
  2584. if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
  2585. return;
  2586. }
  2587. /* Sync state because mrs_banked() can raise exceptions */
  2588. gen_set_condexec(s);
  2589. gen_set_pc_im(s, s->pc_curr);
  2590. tcg_reg = tcg_temp_new_i32();
  2591. tcg_tgtmode = tcg_const_i32(tgtmode);
  2592. tcg_regno = tcg_const_i32(regno);
  2593. gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
  2594. tcg_temp_free_i32(tcg_tgtmode);
  2595. tcg_temp_free_i32(tcg_regno);
  2596. store_reg(s, rn, tcg_reg);
  2597. s->base.is_jmp = DISAS_UPDATE_EXIT;
  2598. }
  2599. /* Store value to PC as for an exception return (ie don't
  2600. * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
  2601. * will do the masking based on the new value of the Thumb bit.
  2602. */
  2603. static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
  2604. {
  2605. tcg_gen_mov_i32(cpu_R[15], pc);
  2606. tcg_temp_free_i32(pc);
  2607. }
  2608. /* Generate a v6 exception return. Marks both values as dead. */
  2609. static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
  2610. {
  2611. store_pc_exc_ret(s, pc);
  2612. /* The cpsr_write_eret helper will mask the low bits of PC
  2613. * appropriately depending on the new Thumb bit, so it must
  2614. * be called after storing the new PC.
  2615. */
  2616. if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
  2617. gen_io_start();
  2618. }
  2619. gen_helper_cpsr_write_eret(cpu_env, cpsr);
  2620. tcg_temp_free_i32(cpsr);
  2621. /* Must exit loop to check un-masked IRQs */
  2622. s->base.is_jmp = DISAS_EXIT;
  2623. }
  2624. /* Generate an old-style exception return. Marks pc as dead. */
  2625. static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
  2626. {
  2627. gen_rfe(s, pc, load_cpu_field(spsr));
  2628. }
  2629. static void gen_gvec_fn3_qc(uint32_t rd_ofs, uint32_t rn_ofs, uint32_t rm_ofs,
  2630. uint32_t opr_sz, uint32_t max_sz,
  2631. gen_helper_gvec_3_ptr *fn)
  2632. {
  2633. TCGv_ptr qc_ptr = tcg_temp_new_ptr();
  2634. tcg_gen_addi_ptr(qc_ptr, cpu_env, offsetof(CPUARMState, vfp.qc));
  2635. tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, qc_ptr,
  2636. opr_sz, max_sz, 0, fn);
  2637. tcg_temp_free_ptr(qc_ptr);
  2638. }
  2639. void gen_gvec_sqrdmlah_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
  2640. uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
  2641. {
  2642. static gen_helper_gvec_3_ptr * const fns[2] = {
  2643. gen_helper_gvec_qrdmlah_s16, gen_helper_gvec_qrdmlah_s32
  2644. };
  2645. tcg_debug_assert(vece >= 1 && vece <= 2);
  2646. gen_gvec_fn3_qc(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, fns[vece - 1]);
  2647. }
  2648. void gen_gvec_sqrdmlsh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
  2649. uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
  2650. {
  2651. static gen_helper_gvec_3_ptr * const fns[2] = {
  2652. gen_helper_gvec_qrdmlsh_s16, gen_helper_gvec_qrdmlsh_s32
  2653. };
  2654. tcg_debug_assert(vece >= 1 && vece <= 2);
  2655. gen_gvec_fn3_qc(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, fns[vece - 1]);
  2656. }
  2657. #define GEN_CMP0(NAME, COND) \
  2658. static void gen_##NAME##0_i32(TCGv_i32 d, TCGv_i32 a) \
  2659. { \
  2660. tcg_gen_setcondi_i32(COND, d, a, 0); \
  2661. tcg_gen_neg_i32(d, d); \
  2662. } \
  2663. static void gen_##NAME##0_i64(TCGv_i64 d, TCGv_i64 a) \
  2664. { \
  2665. tcg_gen_setcondi_i64(COND, d, a, 0); \
  2666. tcg_gen_neg_i64(d, d); \
  2667. } \
  2668. static void gen_##NAME##0_vec(unsigned vece, TCGv_vec d, TCGv_vec a) \
  2669. { \
  2670. TCGv_vec zero = tcg_const_zeros_vec_matching(d); \
  2671. tcg_gen_cmp_vec(COND, vece, d, a, zero); \
  2672. tcg_temp_free_vec(zero); \
  2673. } \
  2674. void gen_gvec_##NAME##0(unsigned vece, uint32_t d, uint32_t m, \
  2675. uint32_t opr_sz, uint32_t max_sz) \
  2676. { \
  2677. const GVecGen2 op[4] = { \
  2678. { .fno = gen_helper_gvec_##NAME##0_b, \
  2679. .fniv = gen_##NAME##0_vec, \
  2680. .opt_opc = vecop_list_cmp, \
  2681. .vece = MO_8 }, \
  2682. { .fno = gen_helper_gvec_##NAME##0_h, \
  2683. .fniv = gen_##NAME##0_vec, \
  2684. .opt_opc = vecop_list_cmp, \
  2685. .vece = MO_16 }, \
  2686. { .fni4 = gen_##NAME##0_i32, \
  2687. .fniv = gen_##NAME##0_vec, \
  2688. .opt_opc = vecop_list_cmp, \
  2689. .vece = MO_32 }, \
  2690. { .fni8 = gen_##NAME##0_i64, \
  2691. .fniv = gen_##NAME##0_vec, \
  2692. .opt_opc = vecop_list_cmp, \
  2693. .prefer_i64 = TCG_TARGET_REG_BITS == 64, \
  2694. .vece = MO_64 }, \
  2695. }; \
  2696. tcg_gen_gvec_2(d, m, opr_sz, max_sz, &op[vece]); \
  2697. }
  2698. static const TCGOpcode vecop_list_cmp[] = {
  2699. INDEX_op_cmp_vec, 0
  2700. };
  2701. GEN_CMP0(ceq, TCG_COND_EQ)
  2702. GEN_CMP0(cle, TCG_COND_LE)
  2703. GEN_CMP0(cge, TCG_COND_GE)
  2704. GEN_CMP0(clt, TCG_COND_LT)
  2705. GEN_CMP0(cgt, TCG_COND_GT)
  2706. #undef GEN_CMP0
  2707. static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
  2708. {
  2709. tcg_gen_vec_sar8i_i64(a, a, shift);
  2710. tcg_gen_vec_add8_i64(d, d, a);
  2711. }
  2712. static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
  2713. {
  2714. tcg_gen_vec_sar16i_i64(a, a, shift);
  2715. tcg_gen_vec_add16_i64(d, d, a);
  2716. }
  2717. static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
  2718. {
  2719. tcg_gen_sari_i32(a, a, shift);
  2720. tcg_gen_add_i32(d, d, a);
  2721. }
  2722. static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
  2723. {
  2724. tcg_gen_sari_i64(a, a, shift);
  2725. tcg_gen_add_i64(d, d, a);
  2726. }
  2727. static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
  2728. {
  2729. tcg_gen_sari_vec(vece, a, a, sh);
  2730. tcg_gen_add_vec(vece, d, d, a);
  2731. }
  2732. void gen_gvec_ssra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
  2733. int64_t shift, uint32_t opr_sz, uint32_t max_sz)
  2734. {
  2735. static const TCGOpcode vecop_list[] = {
  2736. INDEX_op_sari_vec, INDEX_op_add_vec, 0
  2737. };
  2738. static const GVecGen2i ops[4] = {
  2739. { .fni8 = gen_ssra8_i64,
  2740. .fniv = gen_ssra_vec,
  2741. .fno = gen_helper_gvec_ssra_b,
  2742. .load_dest = true,
  2743. .opt_opc = vecop_list,
  2744. .vece = MO_8 },
  2745. { .fni8 = gen_ssra16_i64,
  2746. .fniv = gen_ssra_vec,
  2747. .fno = gen_helper_gvec_ssra_h,
  2748. .load_dest = true,
  2749. .opt_opc = vecop_list,
  2750. .vece = MO_16 },
  2751. { .fni4 = gen_ssra32_i32,
  2752. .fniv = gen_ssra_vec,
  2753. .fno = gen_helper_gvec_ssra_s,
  2754. .load_dest = true,
  2755. .opt_opc = vecop_list,
  2756. .vece = MO_32 },
  2757. { .fni8 = gen_ssra64_i64,
  2758. .fniv = gen_ssra_vec,
  2759. .fno = gen_helper_gvec_ssra_b,
  2760. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  2761. .opt_opc = vecop_list,
  2762. .load_dest = true,
  2763. .vece = MO_64 },
  2764. };
  2765. /* tszimm encoding produces immediates in the range [1..esize]. */
  2766. tcg_debug_assert(shift > 0);
  2767. tcg_debug_assert(shift <= (8 << vece));
  2768. /*
  2769. * Shifts larger than the element size are architecturally valid.
  2770. * Signed results in all sign bits.
  2771. */
  2772. shift = MIN(shift, (8 << vece) - 1);
  2773. tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
  2774. }
  2775. static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
  2776. {
  2777. tcg_gen_vec_shr8i_i64(a, a, shift);
  2778. tcg_gen_vec_add8_i64(d, d, a);
  2779. }
  2780. static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
  2781. {
  2782. tcg_gen_vec_shr16i_i64(a, a, shift);
  2783. tcg_gen_vec_add16_i64(d, d, a);
  2784. }
  2785. static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
  2786. {
  2787. tcg_gen_shri_i32(a, a, shift);
  2788. tcg_gen_add_i32(d, d, a);
  2789. }
  2790. static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
  2791. {
  2792. tcg_gen_shri_i64(a, a, shift);
  2793. tcg_gen_add_i64(d, d, a);
  2794. }
  2795. static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
  2796. {
  2797. tcg_gen_shri_vec(vece, a, a, sh);
  2798. tcg_gen_add_vec(vece, d, d, a);
  2799. }
  2800. void gen_gvec_usra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
  2801. int64_t shift, uint32_t opr_sz, uint32_t max_sz)
  2802. {
  2803. static const TCGOpcode vecop_list[] = {
  2804. INDEX_op_shri_vec, INDEX_op_add_vec, 0
  2805. };
  2806. static const GVecGen2i ops[4] = {
  2807. { .fni8 = gen_usra8_i64,
  2808. .fniv = gen_usra_vec,
  2809. .fno = gen_helper_gvec_usra_b,
  2810. .load_dest = true,
  2811. .opt_opc = vecop_list,
  2812. .vece = MO_8, },
  2813. { .fni8 = gen_usra16_i64,
  2814. .fniv = gen_usra_vec,
  2815. .fno = gen_helper_gvec_usra_h,
  2816. .load_dest = true,
  2817. .opt_opc = vecop_list,
  2818. .vece = MO_16, },
  2819. { .fni4 = gen_usra32_i32,
  2820. .fniv = gen_usra_vec,
  2821. .fno = gen_helper_gvec_usra_s,
  2822. .load_dest = true,
  2823. .opt_opc = vecop_list,
  2824. .vece = MO_32, },
  2825. { .fni8 = gen_usra64_i64,
  2826. .fniv = gen_usra_vec,
  2827. .fno = gen_helper_gvec_usra_d,
  2828. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  2829. .load_dest = true,
  2830. .opt_opc = vecop_list,
  2831. .vece = MO_64, },
  2832. };
  2833. /* tszimm encoding produces immediates in the range [1..esize]. */
  2834. tcg_debug_assert(shift > 0);
  2835. tcg_debug_assert(shift <= (8 << vece));
  2836. /*
  2837. * Shifts larger than the element size are architecturally valid.
  2838. * Unsigned results in all zeros as input to accumulate: nop.
  2839. */
  2840. if (shift < (8 << vece)) {
  2841. tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
  2842. } else {
  2843. /* Nop, but we do need to clear the tail. */
  2844. tcg_gen_gvec_mov(vece, rd_ofs, rd_ofs, opr_sz, max_sz);
  2845. }
  2846. }
  2847. /*
  2848. * Shift one less than the requested amount, and the low bit is
  2849. * the rounding bit. For the 8 and 16-bit operations, because we
  2850. * mask the low bit, we can perform a normal integer shift instead
  2851. * of a vector shift.
  2852. */
  2853. static void gen_srshr8_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
  2854. {
  2855. TCGv_i64 t = tcg_temp_new_i64();
  2856. tcg_gen_shri_i64(t, a, sh - 1);
  2857. tcg_gen_andi_i64(t, t, dup_const(MO_8, 1));
  2858. tcg_gen_vec_sar8i_i64(d, a, sh);
  2859. tcg_gen_vec_add8_i64(d, d, t);
  2860. tcg_temp_free_i64(t);
  2861. }
  2862. static void gen_srshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
  2863. {
  2864. TCGv_i64 t = tcg_temp_new_i64();
  2865. tcg_gen_shri_i64(t, a, sh - 1);
  2866. tcg_gen_andi_i64(t, t, dup_const(MO_16, 1));
  2867. tcg_gen_vec_sar16i_i64(d, a, sh);
  2868. tcg_gen_vec_add16_i64(d, d, t);
  2869. tcg_temp_free_i64(t);
  2870. }
  2871. static void gen_srshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
  2872. {
  2873. TCGv_i32 t = tcg_temp_new_i32();
  2874. tcg_gen_extract_i32(t, a, sh - 1, 1);
  2875. tcg_gen_sari_i32(d, a, sh);
  2876. tcg_gen_add_i32(d, d, t);
  2877. tcg_temp_free_i32(t);
  2878. }
  2879. static void gen_srshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
  2880. {
  2881. TCGv_i64 t = tcg_temp_new_i64();
  2882. tcg_gen_extract_i64(t, a, sh - 1, 1);
  2883. tcg_gen_sari_i64(d, a, sh);
  2884. tcg_gen_add_i64(d, d, t);
  2885. tcg_temp_free_i64(t);
  2886. }
  2887. static void gen_srshr_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
  2888. {
  2889. TCGv_vec t = tcg_temp_new_vec_matching(d);
  2890. TCGv_vec ones = tcg_temp_new_vec_matching(d);
  2891. tcg_gen_shri_vec(vece, t, a, sh - 1);
  2892. tcg_gen_dupi_vec(vece, ones, 1);
  2893. tcg_gen_and_vec(vece, t, t, ones);
  2894. tcg_gen_sari_vec(vece, d, a, sh);
  2895. tcg_gen_add_vec(vece, d, d, t);
  2896. tcg_temp_free_vec(t);
  2897. tcg_temp_free_vec(ones);
  2898. }
  2899. void gen_gvec_srshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
  2900. int64_t shift, uint32_t opr_sz, uint32_t max_sz)
  2901. {
  2902. static const TCGOpcode vecop_list[] = {
  2903. INDEX_op_shri_vec, INDEX_op_sari_vec, INDEX_op_add_vec, 0
  2904. };
  2905. static const GVecGen2i ops[4] = {
  2906. { .fni8 = gen_srshr8_i64,
  2907. .fniv = gen_srshr_vec,
  2908. .fno = gen_helper_gvec_srshr_b,
  2909. .opt_opc = vecop_list,
  2910. .vece = MO_8 },
  2911. { .fni8 = gen_srshr16_i64,
  2912. .fniv = gen_srshr_vec,
  2913. .fno = gen_helper_gvec_srshr_h,
  2914. .opt_opc = vecop_list,
  2915. .vece = MO_16 },
  2916. { .fni4 = gen_srshr32_i32,
  2917. .fniv = gen_srshr_vec,
  2918. .fno = gen_helper_gvec_srshr_s,
  2919. .opt_opc = vecop_list,
  2920. .vece = MO_32 },
  2921. { .fni8 = gen_srshr64_i64,
  2922. .fniv = gen_srshr_vec,
  2923. .fno = gen_helper_gvec_srshr_d,
  2924. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  2925. .opt_opc = vecop_list,
  2926. .vece = MO_64 },
  2927. };
  2928. /* tszimm encoding produces immediates in the range [1..esize] */
  2929. tcg_debug_assert(shift > 0);
  2930. tcg_debug_assert(shift <= (8 << vece));
  2931. if (shift == (8 << vece)) {
  2932. /*
  2933. * Shifts larger than the element size are architecturally valid.
  2934. * Signed results in all sign bits. With rounding, this produces
  2935. * (-1 + 1) >> 1 == 0, or (0 + 1) >> 1 == 0.
  2936. * I.e. always zero.
  2937. */
  2938. tcg_gen_gvec_dup_imm(vece, rd_ofs, opr_sz, max_sz, 0);
  2939. } else {
  2940. tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
  2941. }
  2942. }
  2943. static void gen_srsra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
  2944. {
  2945. TCGv_i64 t = tcg_temp_new_i64();
  2946. gen_srshr8_i64(t, a, sh);
  2947. tcg_gen_vec_add8_i64(d, d, t);
  2948. tcg_temp_free_i64(t);
  2949. }
  2950. static void gen_srsra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
  2951. {
  2952. TCGv_i64 t = tcg_temp_new_i64();
  2953. gen_srshr16_i64(t, a, sh);
  2954. tcg_gen_vec_add16_i64(d, d, t);
  2955. tcg_temp_free_i64(t);
  2956. }
  2957. static void gen_srsra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
  2958. {
  2959. TCGv_i32 t = tcg_temp_new_i32();
  2960. gen_srshr32_i32(t, a, sh);
  2961. tcg_gen_add_i32(d, d, t);
  2962. tcg_temp_free_i32(t);
  2963. }
  2964. static void gen_srsra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
  2965. {
  2966. TCGv_i64 t = tcg_temp_new_i64();
  2967. gen_srshr64_i64(t, a, sh);
  2968. tcg_gen_add_i64(d, d, t);
  2969. tcg_temp_free_i64(t);
  2970. }
  2971. static void gen_srsra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
  2972. {
  2973. TCGv_vec t = tcg_temp_new_vec_matching(d);
  2974. gen_srshr_vec(vece, t, a, sh);
  2975. tcg_gen_add_vec(vece, d, d, t);
  2976. tcg_temp_free_vec(t);
  2977. }
  2978. void gen_gvec_srsra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
  2979. int64_t shift, uint32_t opr_sz, uint32_t max_sz)
  2980. {
  2981. static const TCGOpcode vecop_list[] = {
  2982. INDEX_op_shri_vec, INDEX_op_sari_vec, INDEX_op_add_vec, 0
  2983. };
  2984. static const GVecGen2i ops[4] = {
  2985. { .fni8 = gen_srsra8_i64,
  2986. .fniv = gen_srsra_vec,
  2987. .fno = gen_helper_gvec_srsra_b,
  2988. .opt_opc = vecop_list,
  2989. .load_dest = true,
  2990. .vece = MO_8 },
  2991. { .fni8 = gen_srsra16_i64,
  2992. .fniv = gen_srsra_vec,
  2993. .fno = gen_helper_gvec_srsra_h,
  2994. .opt_opc = vecop_list,
  2995. .load_dest = true,
  2996. .vece = MO_16 },
  2997. { .fni4 = gen_srsra32_i32,
  2998. .fniv = gen_srsra_vec,
  2999. .fno = gen_helper_gvec_srsra_s,
  3000. .opt_opc = vecop_list,
  3001. .load_dest = true,
  3002. .vece = MO_32 },
  3003. { .fni8 = gen_srsra64_i64,
  3004. .fniv = gen_srsra_vec,
  3005. .fno = gen_helper_gvec_srsra_d,
  3006. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  3007. .opt_opc = vecop_list,
  3008. .load_dest = true,
  3009. .vece = MO_64 },
  3010. };
  3011. /* tszimm encoding produces immediates in the range [1..esize] */
  3012. tcg_debug_assert(shift > 0);
  3013. tcg_debug_assert(shift <= (8 << vece));
  3014. /*
  3015. * Shifts larger than the element size are architecturally valid.
  3016. * Signed results in all sign bits. With rounding, this produces
  3017. * (-1 + 1) >> 1 == 0, or (0 + 1) >> 1 == 0.
  3018. * I.e. always zero. With accumulation, this leaves D unchanged.
  3019. */
  3020. if (shift == (8 << vece)) {
  3021. /* Nop, but we do need to clear the tail. */
  3022. tcg_gen_gvec_mov(vece, rd_ofs, rd_ofs, opr_sz, max_sz);
  3023. } else {
  3024. tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
  3025. }
  3026. }
  3027. static void gen_urshr8_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
  3028. {
  3029. TCGv_i64 t = tcg_temp_new_i64();
  3030. tcg_gen_shri_i64(t, a, sh - 1);
  3031. tcg_gen_andi_i64(t, t, dup_const(MO_8, 1));
  3032. tcg_gen_vec_shr8i_i64(d, a, sh);
  3033. tcg_gen_vec_add8_i64(d, d, t);
  3034. tcg_temp_free_i64(t);
  3035. }
  3036. static void gen_urshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
  3037. {
  3038. TCGv_i64 t = tcg_temp_new_i64();
  3039. tcg_gen_shri_i64(t, a, sh - 1);
  3040. tcg_gen_andi_i64(t, t, dup_const(MO_16, 1));
  3041. tcg_gen_vec_shr16i_i64(d, a, sh);
  3042. tcg_gen_vec_add16_i64(d, d, t);
  3043. tcg_temp_free_i64(t);
  3044. }
  3045. static void gen_urshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
  3046. {
  3047. TCGv_i32 t = tcg_temp_new_i32();
  3048. tcg_gen_extract_i32(t, a, sh - 1, 1);
  3049. tcg_gen_shri_i32(d, a, sh);
  3050. tcg_gen_add_i32(d, d, t);
  3051. tcg_temp_free_i32(t);
  3052. }
  3053. static void gen_urshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
  3054. {
  3055. TCGv_i64 t = tcg_temp_new_i64();
  3056. tcg_gen_extract_i64(t, a, sh - 1, 1);
  3057. tcg_gen_shri_i64(d, a, sh);
  3058. tcg_gen_add_i64(d, d, t);
  3059. tcg_temp_free_i64(t);
  3060. }
  3061. static void gen_urshr_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t shift)
  3062. {
  3063. TCGv_vec t = tcg_temp_new_vec_matching(d);
  3064. TCGv_vec ones = tcg_temp_new_vec_matching(d);
  3065. tcg_gen_shri_vec(vece, t, a, shift - 1);
  3066. tcg_gen_dupi_vec(vece, ones, 1);
  3067. tcg_gen_and_vec(vece, t, t, ones);
  3068. tcg_gen_shri_vec(vece, d, a, shift);
  3069. tcg_gen_add_vec(vece, d, d, t);
  3070. tcg_temp_free_vec(t);
  3071. tcg_temp_free_vec(ones);
  3072. }
  3073. void gen_gvec_urshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
  3074. int64_t shift, uint32_t opr_sz, uint32_t max_sz)
  3075. {
  3076. static const TCGOpcode vecop_list[] = {
  3077. INDEX_op_shri_vec, INDEX_op_add_vec, 0
  3078. };
  3079. static const GVecGen2i ops[4] = {
  3080. { .fni8 = gen_urshr8_i64,
  3081. .fniv = gen_urshr_vec,
  3082. .fno = gen_helper_gvec_urshr_b,
  3083. .opt_opc = vecop_list,
  3084. .vece = MO_8 },
  3085. { .fni8 = gen_urshr16_i64,
  3086. .fniv = gen_urshr_vec,
  3087. .fno = gen_helper_gvec_urshr_h,
  3088. .opt_opc = vecop_list,
  3089. .vece = MO_16 },
  3090. { .fni4 = gen_urshr32_i32,
  3091. .fniv = gen_urshr_vec,
  3092. .fno = gen_helper_gvec_urshr_s,
  3093. .opt_opc = vecop_list,
  3094. .vece = MO_32 },
  3095. { .fni8 = gen_urshr64_i64,
  3096. .fniv = gen_urshr_vec,
  3097. .fno = gen_helper_gvec_urshr_d,
  3098. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  3099. .opt_opc = vecop_list,
  3100. .vece = MO_64 },
  3101. };
  3102. /* tszimm encoding produces immediates in the range [1..esize] */
  3103. tcg_debug_assert(shift > 0);
  3104. tcg_debug_assert(shift <= (8 << vece));
  3105. if (shift == (8 << vece)) {
  3106. /*
  3107. * Shifts larger than the element size are architecturally valid.
  3108. * Unsigned results in zero. With rounding, this produces a
  3109. * copy of the most significant bit.
  3110. */
  3111. tcg_gen_gvec_shri(vece, rd_ofs, rm_ofs, shift - 1, opr_sz, max_sz);
  3112. } else {
  3113. tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
  3114. }
  3115. }
  3116. static void gen_ursra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
  3117. {
  3118. TCGv_i64 t = tcg_temp_new_i64();
  3119. if (sh == 8) {
  3120. tcg_gen_vec_shr8i_i64(t, a, 7);
  3121. } else {
  3122. gen_urshr8_i64(t, a, sh);
  3123. }
  3124. tcg_gen_vec_add8_i64(d, d, t);
  3125. tcg_temp_free_i64(t);
  3126. }
  3127. static void gen_ursra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
  3128. {
  3129. TCGv_i64 t = tcg_temp_new_i64();
  3130. if (sh == 16) {
  3131. tcg_gen_vec_shr16i_i64(t, a, 15);
  3132. } else {
  3133. gen_urshr16_i64(t, a, sh);
  3134. }
  3135. tcg_gen_vec_add16_i64(d, d, t);
  3136. tcg_temp_free_i64(t);
  3137. }
  3138. static void gen_ursra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
  3139. {
  3140. TCGv_i32 t = tcg_temp_new_i32();
  3141. if (sh == 32) {
  3142. tcg_gen_shri_i32(t, a, 31);
  3143. } else {
  3144. gen_urshr32_i32(t, a, sh);
  3145. }
  3146. tcg_gen_add_i32(d, d, t);
  3147. tcg_temp_free_i32(t);
  3148. }
  3149. static void gen_ursra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
  3150. {
  3151. TCGv_i64 t = tcg_temp_new_i64();
  3152. if (sh == 64) {
  3153. tcg_gen_shri_i64(t, a, 63);
  3154. } else {
  3155. gen_urshr64_i64(t, a, sh);
  3156. }
  3157. tcg_gen_add_i64(d, d, t);
  3158. tcg_temp_free_i64(t);
  3159. }
  3160. static void gen_ursra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
  3161. {
  3162. TCGv_vec t = tcg_temp_new_vec_matching(d);
  3163. if (sh == (8 << vece)) {
  3164. tcg_gen_shri_vec(vece, t, a, sh - 1);
  3165. } else {
  3166. gen_urshr_vec(vece, t, a, sh);
  3167. }
  3168. tcg_gen_add_vec(vece, d, d, t);
  3169. tcg_temp_free_vec(t);
  3170. }
  3171. void gen_gvec_ursra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
  3172. int64_t shift, uint32_t opr_sz, uint32_t max_sz)
  3173. {
  3174. static const TCGOpcode vecop_list[] = {
  3175. INDEX_op_shri_vec, INDEX_op_add_vec, 0
  3176. };
  3177. static const GVecGen2i ops[4] = {
  3178. { .fni8 = gen_ursra8_i64,
  3179. .fniv = gen_ursra_vec,
  3180. .fno = gen_helper_gvec_ursra_b,
  3181. .opt_opc = vecop_list,
  3182. .load_dest = true,
  3183. .vece = MO_8 },
  3184. { .fni8 = gen_ursra16_i64,
  3185. .fniv = gen_ursra_vec,
  3186. .fno = gen_helper_gvec_ursra_h,
  3187. .opt_opc = vecop_list,
  3188. .load_dest = true,
  3189. .vece = MO_16 },
  3190. { .fni4 = gen_ursra32_i32,
  3191. .fniv = gen_ursra_vec,
  3192. .fno = gen_helper_gvec_ursra_s,
  3193. .opt_opc = vecop_list,
  3194. .load_dest = true,
  3195. .vece = MO_32 },
  3196. { .fni8 = gen_ursra64_i64,
  3197. .fniv = gen_ursra_vec,
  3198. .fno = gen_helper_gvec_ursra_d,
  3199. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  3200. .opt_opc = vecop_list,
  3201. .load_dest = true,
  3202. .vece = MO_64 },
  3203. };
  3204. /* tszimm encoding produces immediates in the range [1..esize] */
  3205. tcg_debug_assert(shift > 0);
  3206. tcg_debug_assert(shift <= (8 << vece));
  3207. tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
  3208. }
  3209. static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
  3210. {
  3211. uint64_t mask = dup_const(MO_8, 0xff >> shift);
  3212. TCGv_i64 t = tcg_temp_new_i64();
  3213. tcg_gen_shri_i64(t, a, shift);
  3214. tcg_gen_andi_i64(t, t, mask);
  3215. tcg_gen_andi_i64(d, d, ~mask);
  3216. tcg_gen_or_i64(d, d, t);
  3217. tcg_temp_free_i64(t);
  3218. }
  3219. static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
  3220. {
  3221. uint64_t mask = dup_const(MO_16, 0xffff >> shift);
  3222. TCGv_i64 t = tcg_temp_new_i64();
  3223. tcg_gen_shri_i64(t, a, shift);
  3224. tcg_gen_andi_i64(t, t, mask);
  3225. tcg_gen_andi_i64(d, d, ~mask);
  3226. tcg_gen_or_i64(d, d, t);
  3227. tcg_temp_free_i64(t);
  3228. }
  3229. static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
  3230. {
  3231. tcg_gen_shri_i32(a, a, shift);
  3232. tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
  3233. }
  3234. static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
  3235. {
  3236. tcg_gen_shri_i64(a, a, shift);
  3237. tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
  3238. }
  3239. static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
  3240. {
  3241. TCGv_vec t = tcg_temp_new_vec_matching(d);
  3242. TCGv_vec m = tcg_temp_new_vec_matching(d);
  3243. tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
  3244. tcg_gen_shri_vec(vece, t, a, sh);
  3245. tcg_gen_and_vec(vece, d, d, m);
  3246. tcg_gen_or_vec(vece, d, d, t);
  3247. tcg_temp_free_vec(t);
  3248. tcg_temp_free_vec(m);
  3249. }
  3250. void gen_gvec_sri(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
  3251. int64_t shift, uint32_t opr_sz, uint32_t max_sz)
  3252. {
  3253. static const TCGOpcode vecop_list[] = { INDEX_op_shri_vec, 0 };
  3254. const GVecGen2i ops[4] = {
  3255. { .fni8 = gen_shr8_ins_i64,
  3256. .fniv = gen_shr_ins_vec,
  3257. .fno = gen_helper_gvec_sri_b,
  3258. .load_dest = true,
  3259. .opt_opc = vecop_list,
  3260. .vece = MO_8 },
  3261. { .fni8 = gen_shr16_ins_i64,
  3262. .fniv = gen_shr_ins_vec,
  3263. .fno = gen_helper_gvec_sri_h,
  3264. .load_dest = true,
  3265. .opt_opc = vecop_list,
  3266. .vece = MO_16 },
  3267. { .fni4 = gen_shr32_ins_i32,
  3268. .fniv = gen_shr_ins_vec,
  3269. .fno = gen_helper_gvec_sri_s,
  3270. .load_dest = true,
  3271. .opt_opc = vecop_list,
  3272. .vece = MO_32 },
  3273. { .fni8 = gen_shr64_ins_i64,
  3274. .fniv = gen_shr_ins_vec,
  3275. .fno = gen_helper_gvec_sri_d,
  3276. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  3277. .load_dest = true,
  3278. .opt_opc = vecop_list,
  3279. .vece = MO_64 },
  3280. };
  3281. /* tszimm encoding produces immediates in the range [1..esize]. */
  3282. tcg_debug_assert(shift > 0);
  3283. tcg_debug_assert(shift <= (8 << vece));
  3284. /* Shift of esize leaves destination unchanged. */
  3285. if (shift < (8 << vece)) {
  3286. tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
  3287. } else {
  3288. /* Nop, but we do need to clear the tail. */
  3289. tcg_gen_gvec_mov(vece, rd_ofs, rd_ofs, opr_sz, max_sz);
  3290. }
  3291. }
  3292. static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
  3293. {
  3294. uint64_t mask = dup_const(MO_8, 0xff << shift);
  3295. TCGv_i64 t = tcg_temp_new_i64();
  3296. tcg_gen_shli_i64(t, a, shift);
  3297. tcg_gen_andi_i64(t, t, mask);
  3298. tcg_gen_andi_i64(d, d, ~mask);
  3299. tcg_gen_or_i64(d, d, t);
  3300. tcg_temp_free_i64(t);
  3301. }
  3302. static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
  3303. {
  3304. uint64_t mask = dup_const(MO_16, 0xffff << shift);
  3305. TCGv_i64 t = tcg_temp_new_i64();
  3306. tcg_gen_shli_i64(t, a, shift);
  3307. tcg_gen_andi_i64(t, t, mask);
  3308. tcg_gen_andi_i64(d, d, ~mask);
  3309. tcg_gen_or_i64(d, d, t);
  3310. tcg_temp_free_i64(t);
  3311. }
  3312. static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
  3313. {
  3314. tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
  3315. }
  3316. static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
  3317. {
  3318. tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
  3319. }
  3320. static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
  3321. {
  3322. TCGv_vec t = tcg_temp_new_vec_matching(d);
  3323. TCGv_vec m = tcg_temp_new_vec_matching(d);
  3324. tcg_gen_shli_vec(vece, t, a, sh);
  3325. tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
  3326. tcg_gen_and_vec(vece, d, d, m);
  3327. tcg_gen_or_vec(vece, d, d, t);
  3328. tcg_temp_free_vec(t);
  3329. tcg_temp_free_vec(m);
  3330. }
  3331. void gen_gvec_sli(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
  3332. int64_t shift, uint32_t opr_sz, uint32_t max_sz)
  3333. {
  3334. static const TCGOpcode vecop_list[] = { INDEX_op_shli_vec, 0 };
  3335. const GVecGen2i ops[4] = {
  3336. { .fni8 = gen_shl8_ins_i64,
  3337. .fniv = gen_shl_ins_vec,
  3338. .fno = gen_helper_gvec_sli_b,
  3339. .load_dest = true,
  3340. .opt_opc = vecop_list,
  3341. .vece = MO_8 },
  3342. { .fni8 = gen_shl16_ins_i64,
  3343. .fniv = gen_shl_ins_vec,
  3344. .fno = gen_helper_gvec_sli_h,
  3345. .load_dest = true,
  3346. .opt_opc = vecop_list,
  3347. .vece = MO_16 },
  3348. { .fni4 = gen_shl32_ins_i32,
  3349. .fniv = gen_shl_ins_vec,
  3350. .fno = gen_helper_gvec_sli_s,
  3351. .load_dest = true,
  3352. .opt_opc = vecop_list,
  3353. .vece = MO_32 },
  3354. { .fni8 = gen_shl64_ins_i64,
  3355. .fniv = gen_shl_ins_vec,
  3356. .fno = gen_helper_gvec_sli_d,
  3357. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  3358. .load_dest = true,
  3359. .opt_opc = vecop_list,
  3360. .vece = MO_64 },
  3361. };
  3362. /* tszimm encoding produces immediates in the range [0..esize-1]. */
  3363. tcg_debug_assert(shift >= 0);
  3364. tcg_debug_assert(shift < (8 << vece));
  3365. if (shift == 0) {
  3366. tcg_gen_gvec_mov(vece, rd_ofs, rm_ofs, opr_sz, max_sz);
  3367. } else {
  3368. tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
  3369. }
  3370. }
  3371. static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
  3372. {
  3373. gen_helper_neon_mul_u8(a, a, b);
  3374. gen_helper_neon_add_u8(d, d, a);
  3375. }
  3376. static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
  3377. {
  3378. gen_helper_neon_mul_u8(a, a, b);
  3379. gen_helper_neon_sub_u8(d, d, a);
  3380. }
  3381. static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
  3382. {
  3383. gen_helper_neon_mul_u16(a, a, b);
  3384. gen_helper_neon_add_u16(d, d, a);
  3385. }
  3386. static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
  3387. {
  3388. gen_helper_neon_mul_u16(a, a, b);
  3389. gen_helper_neon_sub_u16(d, d, a);
  3390. }
  3391. static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
  3392. {
  3393. tcg_gen_mul_i32(a, a, b);
  3394. tcg_gen_add_i32(d, d, a);
  3395. }
  3396. static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
  3397. {
  3398. tcg_gen_mul_i32(a, a, b);
  3399. tcg_gen_sub_i32(d, d, a);
  3400. }
  3401. static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
  3402. {
  3403. tcg_gen_mul_i64(a, a, b);
  3404. tcg_gen_add_i64(d, d, a);
  3405. }
  3406. static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
  3407. {
  3408. tcg_gen_mul_i64(a, a, b);
  3409. tcg_gen_sub_i64(d, d, a);
  3410. }
  3411. static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
  3412. {
  3413. tcg_gen_mul_vec(vece, a, a, b);
  3414. tcg_gen_add_vec(vece, d, d, a);
  3415. }
  3416. static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
  3417. {
  3418. tcg_gen_mul_vec(vece, a, a, b);
  3419. tcg_gen_sub_vec(vece, d, d, a);
  3420. }
  3421. /* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
  3422. * these tables are shared with AArch64 which does support them.
  3423. */
  3424. void gen_gvec_mla(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
  3425. uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
  3426. {
  3427. static const TCGOpcode vecop_list[] = {
  3428. INDEX_op_mul_vec, INDEX_op_add_vec, 0
  3429. };
  3430. static const GVecGen3 ops[4] = {
  3431. { .fni4 = gen_mla8_i32,
  3432. .fniv = gen_mla_vec,
  3433. .load_dest = true,
  3434. .opt_opc = vecop_list,
  3435. .vece = MO_8 },
  3436. { .fni4 = gen_mla16_i32,
  3437. .fniv = gen_mla_vec,
  3438. .load_dest = true,
  3439. .opt_opc = vecop_list,
  3440. .vece = MO_16 },
  3441. { .fni4 = gen_mla32_i32,
  3442. .fniv = gen_mla_vec,
  3443. .load_dest = true,
  3444. .opt_opc = vecop_list,
  3445. .vece = MO_32 },
  3446. { .fni8 = gen_mla64_i64,
  3447. .fniv = gen_mla_vec,
  3448. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  3449. .load_dest = true,
  3450. .opt_opc = vecop_list,
  3451. .vece = MO_64 },
  3452. };
  3453. tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
  3454. }
  3455. void gen_gvec_mls(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
  3456. uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
  3457. {
  3458. static const TCGOpcode vecop_list[] = {
  3459. INDEX_op_mul_vec, INDEX_op_sub_vec, 0
  3460. };
  3461. static const GVecGen3 ops[4] = {
  3462. { .fni4 = gen_mls8_i32,
  3463. .fniv = gen_mls_vec,
  3464. .load_dest = true,
  3465. .opt_opc = vecop_list,
  3466. .vece = MO_8 },
  3467. { .fni4 = gen_mls16_i32,
  3468. .fniv = gen_mls_vec,
  3469. .load_dest = true,
  3470. .opt_opc = vecop_list,
  3471. .vece = MO_16 },
  3472. { .fni4 = gen_mls32_i32,
  3473. .fniv = gen_mls_vec,
  3474. .load_dest = true,
  3475. .opt_opc = vecop_list,
  3476. .vece = MO_32 },
  3477. { .fni8 = gen_mls64_i64,
  3478. .fniv = gen_mls_vec,
  3479. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  3480. .load_dest = true,
  3481. .opt_opc = vecop_list,
  3482. .vece = MO_64 },
  3483. };
  3484. tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
  3485. }
  3486. /* CMTST : test is "if (X & Y != 0)". */
  3487. static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
  3488. {
  3489. tcg_gen_and_i32(d, a, b);
  3490. tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
  3491. tcg_gen_neg_i32(d, d);
  3492. }
  3493. void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
  3494. {
  3495. tcg_gen_and_i64(d, a, b);
  3496. tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
  3497. tcg_gen_neg_i64(d, d);
  3498. }
  3499. static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
  3500. {
  3501. tcg_gen_and_vec(vece, d, a, b);
  3502. tcg_gen_dupi_vec(vece, a, 0);
  3503. tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
  3504. }
  3505. void gen_gvec_cmtst(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
  3506. uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
  3507. {
  3508. static const TCGOpcode vecop_list[] = { INDEX_op_cmp_vec, 0 };
  3509. static const GVecGen3 ops[4] = {
  3510. { .fni4 = gen_helper_neon_tst_u8,
  3511. .fniv = gen_cmtst_vec,
  3512. .opt_opc = vecop_list,
  3513. .vece = MO_8 },
  3514. { .fni4 = gen_helper_neon_tst_u16,
  3515. .fniv = gen_cmtst_vec,
  3516. .opt_opc = vecop_list,
  3517. .vece = MO_16 },
  3518. { .fni4 = gen_cmtst_i32,
  3519. .fniv = gen_cmtst_vec,
  3520. .opt_opc = vecop_list,
  3521. .vece = MO_32 },
  3522. { .fni8 = gen_cmtst_i64,
  3523. .fniv = gen_cmtst_vec,
  3524. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  3525. .opt_opc = vecop_list,
  3526. .vece = MO_64 },
  3527. };
  3528. tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
  3529. }
  3530. void gen_ushl_i32(TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift)
  3531. {
  3532. TCGv_i32 lval = tcg_temp_new_i32();
  3533. TCGv_i32 rval = tcg_temp_new_i32();
  3534. TCGv_i32 lsh = tcg_temp_new_i32();
  3535. TCGv_i32 rsh = tcg_temp_new_i32();
  3536. TCGv_i32 zero = tcg_const_i32(0);
  3537. TCGv_i32 max = tcg_const_i32(32);
  3538. /*
  3539. * Rely on the TCG guarantee that out of range shifts produce
  3540. * unspecified results, not undefined behaviour (i.e. no trap).
  3541. * Discard out-of-range results after the fact.
  3542. */
  3543. tcg_gen_ext8s_i32(lsh, shift);
  3544. tcg_gen_neg_i32(rsh, lsh);
  3545. tcg_gen_shl_i32(lval, src, lsh);
  3546. tcg_gen_shr_i32(rval, src, rsh);
  3547. tcg_gen_movcond_i32(TCG_COND_LTU, dst, lsh, max, lval, zero);
  3548. tcg_gen_movcond_i32(TCG_COND_LTU, dst, rsh, max, rval, dst);
  3549. tcg_temp_free_i32(lval);
  3550. tcg_temp_free_i32(rval);
  3551. tcg_temp_free_i32(lsh);
  3552. tcg_temp_free_i32(rsh);
  3553. tcg_temp_free_i32(zero);
  3554. tcg_temp_free_i32(max);
  3555. }
  3556. void gen_ushl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift)
  3557. {
  3558. TCGv_i64 lval = tcg_temp_new_i64();
  3559. TCGv_i64 rval = tcg_temp_new_i64();
  3560. TCGv_i64 lsh = tcg_temp_new_i64();
  3561. TCGv_i64 rsh = tcg_temp_new_i64();
  3562. TCGv_i64 zero = tcg_const_i64(0);
  3563. TCGv_i64 max = tcg_const_i64(64);
  3564. /*
  3565. * Rely on the TCG guarantee that out of range shifts produce
  3566. * unspecified results, not undefined behaviour (i.e. no trap).
  3567. * Discard out-of-range results after the fact.
  3568. */
  3569. tcg_gen_ext8s_i64(lsh, shift);
  3570. tcg_gen_neg_i64(rsh, lsh);
  3571. tcg_gen_shl_i64(lval, src, lsh);
  3572. tcg_gen_shr_i64(rval, src, rsh);
  3573. tcg_gen_movcond_i64(TCG_COND_LTU, dst, lsh, max, lval, zero);
  3574. tcg_gen_movcond_i64(TCG_COND_LTU, dst, rsh, max, rval, dst);
  3575. tcg_temp_free_i64(lval);
  3576. tcg_temp_free_i64(rval);
  3577. tcg_temp_free_i64(lsh);
  3578. tcg_temp_free_i64(rsh);
  3579. tcg_temp_free_i64(zero);
  3580. tcg_temp_free_i64(max);
  3581. }
  3582. static void gen_ushl_vec(unsigned vece, TCGv_vec dst,
  3583. TCGv_vec src, TCGv_vec shift)
  3584. {
  3585. TCGv_vec lval = tcg_temp_new_vec_matching(dst);
  3586. TCGv_vec rval = tcg_temp_new_vec_matching(dst);
  3587. TCGv_vec lsh = tcg_temp_new_vec_matching(dst);
  3588. TCGv_vec rsh = tcg_temp_new_vec_matching(dst);
  3589. TCGv_vec msk, max;
  3590. tcg_gen_neg_vec(vece, rsh, shift);
  3591. if (vece == MO_8) {
  3592. tcg_gen_mov_vec(lsh, shift);
  3593. } else {
  3594. msk = tcg_temp_new_vec_matching(dst);
  3595. tcg_gen_dupi_vec(vece, msk, 0xff);
  3596. tcg_gen_and_vec(vece, lsh, shift, msk);
  3597. tcg_gen_and_vec(vece, rsh, rsh, msk);
  3598. tcg_temp_free_vec(msk);
  3599. }
  3600. /*
  3601. * Rely on the TCG guarantee that out of range shifts produce
  3602. * unspecified results, not undefined behaviour (i.e. no trap).
  3603. * Discard out-of-range results after the fact.
  3604. */
  3605. tcg_gen_shlv_vec(vece, lval, src, lsh);
  3606. tcg_gen_shrv_vec(vece, rval, src, rsh);
  3607. max = tcg_temp_new_vec_matching(dst);
  3608. tcg_gen_dupi_vec(vece, max, 8 << vece);
  3609. /*
  3610. * The choice of LT (signed) and GEU (unsigned) are biased toward
  3611. * the instructions of the x86_64 host. For MO_8, the whole byte
  3612. * is significant so we must use an unsigned compare; otherwise we
  3613. * have already masked to a byte and so a signed compare works.
  3614. * Other tcg hosts have a full set of comparisons and do not care.
  3615. */
  3616. if (vece == MO_8) {
  3617. tcg_gen_cmp_vec(TCG_COND_GEU, vece, lsh, lsh, max);
  3618. tcg_gen_cmp_vec(TCG_COND_GEU, vece, rsh, rsh, max);
  3619. tcg_gen_andc_vec(vece, lval, lval, lsh);
  3620. tcg_gen_andc_vec(vece, rval, rval, rsh);
  3621. } else {
  3622. tcg_gen_cmp_vec(TCG_COND_LT, vece, lsh, lsh, max);
  3623. tcg_gen_cmp_vec(TCG_COND_LT, vece, rsh, rsh, max);
  3624. tcg_gen_and_vec(vece, lval, lval, lsh);
  3625. tcg_gen_and_vec(vece, rval, rval, rsh);
  3626. }
  3627. tcg_gen_or_vec(vece, dst, lval, rval);
  3628. tcg_temp_free_vec(max);
  3629. tcg_temp_free_vec(lval);
  3630. tcg_temp_free_vec(rval);
  3631. tcg_temp_free_vec(lsh);
  3632. tcg_temp_free_vec(rsh);
  3633. }
  3634. void gen_gvec_ushl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
  3635. uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
  3636. {
  3637. static const TCGOpcode vecop_list[] = {
  3638. INDEX_op_neg_vec, INDEX_op_shlv_vec,
  3639. INDEX_op_shrv_vec, INDEX_op_cmp_vec, 0
  3640. };
  3641. static const GVecGen3 ops[4] = {
  3642. { .fniv = gen_ushl_vec,
  3643. .fno = gen_helper_gvec_ushl_b,
  3644. .opt_opc = vecop_list,
  3645. .vece = MO_8 },
  3646. { .fniv = gen_ushl_vec,
  3647. .fno = gen_helper_gvec_ushl_h,
  3648. .opt_opc = vecop_list,
  3649. .vece = MO_16 },
  3650. { .fni4 = gen_ushl_i32,
  3651. .fniv = gen_ushl_vec,
  3652. .opt_opc = vecop_list,
  3653. .vece = MO_32 },
  3654. { .fni8 = gen_ushl_i64,
  3655. .fniv = gen_ushl_vec,
  3656. .opt_opc = vecop_list,
  3657. .vece = MO_64 },
  3658. };
  3659. tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
  3660. }
  3661. void gen_sshl_i32(TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift)
  3662. {
  3663. TCGv_i32 lval = tcg_temp_new_i32();
  3664. TCGv_i32 rval = tcg_temp_new_i32();
  3665. TCGv_i32 lsh = tcg_temp_new_i32();
  3666. TCGv_i32 rsh = tcg_temp_new_i32();
  3667. TCGv_i32 zero = tcg_const_i32(0);
  3668. TCGv_i32 max = tcg_const_i32(31);
  3669. /*
  3670. * Rely on the TCG guarantee that out of range shifts produce
  3671. * unspecified results, not undefined behaviour (i.e. no trap).
  3672. * Discard out-of-range results after the fact.
  3673. */
  3674. tcg_gen_ext8s_i32(lsh, shift);
  3675. tcg_gen_neg_i32(rsh, lsh);
  3676. tcg_gen_shl_i32(lval, src, lsh);
  3677. tcg_gen_umin_i32(rsh, rsh, max);
  3678. tcg_gen_sar_i32(rval, src, rsh);
  3679. tcg_gen_movcond_i32(TCG_COND_LEU, lval, lsh, max, lval, zero);
  3680. tcg_gen_movcond_i32(TCG_COND_LT, dst, lsh, zero, rval, lval);
  3681. tcg_temp_free_i32(lval);
  3682. tcg_temp_free_i32(rval);
  3683. tcg_temp_free_i32(lsh);
  3684. tcg_temp_free_i32(rsh);
  3685. tcg_temp_free_i32(zero);
  3686. tcg_temp_free_i32(max);
  3687. }
  3688. void gen_sshl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift)
  3689. {
  3690. TCGv_i64 lval = tcg_temp_new_i64();
  3691. TCGv_i64 rval = tcg_temp_new_i64();
  3692. TCGv_i64 lsh = tcg_temp_new_i64();
  3693. TCGv_i64 rsh = tcg_temp_new_i64();
  3694. TCGv_i64 zero = tcg_const_i64(0);
  3695. TCGv_i64 max = tcg_const_i64(63);
  3696. /*
  3697. * Rely on the TCG guarantee that out of range shifts produce
  3698. * unspecified results, not undefined behaviour (i.e. no trap).
  3699. * Discard out-of-range results after the fact.
  3700. */
  3701. tcg_gen_ext8s_i64(lsh, shift);
  3702. tcg_gen_neg_i64(rsh, lsh);
  3703. tcg_gen_shl_i64(lval, src, lsh);
  3704. tcg_gen_umin_i64(rsh, rsh, max);
  3705. tcg_gen_sar_i64(rval, src, rsh);
  3706. tcg_gen_movcond_i64(TCG_COND_LEU, lval, lsh, max, lval, zero);
  3707. tcg_gen_movcond_i64(TCG_COND_LT, dst, lsh, zero, rval, lval);
  3708. tcg_temp_free_i64(lval);
  3709. tcg_temp_free_i64(rval);
  3710. tcg_temp_free_i64(lsh);
  3711. tcg_temp_free_i64(rsh);
  3712. tcg_temp_free_i64(zero);
  3713. tcg_temp_free_i64(max);
  3714. }
  3715. static void gen_sshl_vec(unsigned vece, TCGv_vec dst,
  3716. TCGv_vec src, TCGv_vec shift)
  3717. {
  3718. TCGv_vec lval = tcg_temp_new_vec_matching(dst);
  3719. TCGv_vec rval = tcg_temp_new_vec_matching(dst);
  3720. TCGv_vec lsh = tcg_temp_new_vec_matching(dst);
  3721. TCGv_vec rsh = tcg_temp_new_vec_matching(dst);
  3722. TCGv_vec tmp = tcg_temp_new_vec_matching(dst);
  3723. /*
  3724. * Rely on the TCG guarantee that out of range shifts produce
  3725. * unspecified results, not undefined behaviour (i.e. no trap).
  3726. * Discard out-of-range results after the fact.
  3727. */
  3728. tcg_gen_neg_vec(vece, rsh, shift);
  3729. if (vece == MO_8) {
  3730. tcg_gen_mov_vec(lsh, shift);
  3731. } else {
  3732. tcg_gen_dupi_vec(vece, tmp, 0xff);
  3733. tcg_gen_and_vec(vece, lsh, shift, tmp);
  3734. tcg_gen_and_vec(vece, rsh, rsh, tmp);
  3735. }
  3736. /* Bound rsh so out of bound right shift gets -1. */
  3737. tcg_gen_dupi_vec(vece, tmp, (8 << vece) - 1);
  3738. tcg_gen_umin_vec(vece, rsh, rsh, tmp);
  3739. tcg_gen_cmp_vec(TCG_COND_GT, vece, tmp, lsh, tmp);
  3740. tcg_gen_shlv_vec(vece, lval, src, lsh);
  3741. tcg_gen_sarv_vec(vece, rval, src, rsh);
  3742. /* Select in-bound left shift. */
  3743. tcg_gen_andc_vec(vece, lval, lval, tmp);
  3744. /* Select between left and right shift. */
  3745. if (vece == MO_8) {
  3746. tcg_gen_dupi_vec(vece, tmp, 0);
  3747. tcg_gen_cmpsel_vec(TCG_COND_LT, vece, dst, lsh, tmp, rval, lval);
  3748. } else {
  3749. tcg_gen_dupi_vec(vece, tmp, 0x80);
  3750. tcg_gen_cmpsel_vec(TCG_COND_LT, vece, dst, lsh, tmp, lval, rval);
  3751. }
  3752. tcg_temp_free_vec(lval);
  3753. tcg_temp_free_vec(rval);
  3754. tcg_temp_free_vec(lsh);
  3755. tcg_temp_free_vec(rsh);
  3756. tcg_temp_free_vec(tmp);
  3757. }
  3758. void gen_gvec_sshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
  3759. uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
  3760. {
  3761. static const TCGOpcode vecop_list[] = {
  3762. INDEX_op_neg_vec, INDEX_op_umin_vec, INDEX_op_shlv_vec,
  3763. INDEX_op_sarv_vec, INDEX_op_cmp_vec, INDEX_op_cmpsel_vec, 0
  3764. };
  3765. static const GVecGen3 ops[4] = {
  3766. { .fniv = gen_sshl_vec,
  3767. .fno = gen_helper_gvec_sshl_b,
  3768. .opt_opc = vecop_list,
  3769. .vece = MO_8 },
  3770. { .fniv = gen_sshl_vec,
  3771. .fno = gen_helper_gvec_sshl_h,
  3772. .opt_opc = vecop_list,
  3773. .vece = MO_16 },
  3774. { .fni4 = gen_sshl_i32,
  3775. .fniv = gen_sshl_vec,
  3776. .opt_opc = vecop_list,
  3777. .vece = MO_32 },
  3778. { .fni8 = gen_sshl_i64,
  3779. .fniv = gen_sshl_vec,
  3780. .opt_opc = vecop_list,
  3781. .vece = MO_64 },
  3782. };
  3783. tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
  3784. }
  3785. static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
  3786. TCGv_vec a, TCGv_vec b)
  3787. {
  3788. TCGv_vec x = tcg_temp_new_vec_matching(t);
  3789. tcg_gen_add_vec(vece, x, a, b);
  3790. tcg_gen_usadd_vec(vece, t, a, b);
  3791. tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
  3792. tcg_gen_or_vec(vece, sat, sat, x);
  3793. tcg_temp_free_vec(x);
  3794. }
  3795. void gen_gvec_uqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
  3796. uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
  3797. {
  3798. static const TCGOpcode vecop_list[] = {
  3799. INDEX_op_usadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
  3800. };
  3801. static const GVecGen4 ops[4] = {
  3802. { .fniv = gen_uqadd_vec,
  3803. .fno = gen_helper_gvec_uqadd_b,
  3804. .write_aofs = true,
  3805. .opt_opc = vecop_list,
  3806. .vece = MO_8 },
  3807. { .fniv = gen_uqadd_vec,
  3808. .fno = gen_helper_gvec_uqadd_h,
  3809. .write_aofs = true,
  3810. .opt_opc = vecop_list,
  3811. .vece = MO_16 },
  3812. { .fniv = gen_uqadd_vec,
  3813. .fno = gen_helper_gvec_uqadd_s,
  3814. .write_aofs = true,
  3815. .opt_opc = vecop_list,
  3816. .vece = MO_32 },
  3817. { .fniv = gen_uqadd_vec,
  3818. .fno = gen_helper_gvec_uqadd_d,
  3819. .write_aofs = true,
  3820. .opt_opc = vecop_list,
  3821. .vece = MO_64 },
  3822. };
  3823. tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
  3824. rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
  3825. }
  3826. static void gen_sqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
  3827. TCGv_vec a, TCGv_vec b)
  3828. {
  3829. TCGv_vec x = tcg_temp_new_vec_matching(t);
  3830. tcg_gen_add_vec(vece, x, a, b);
  3831. tcg_gen_ssadd_vec(vece, t, a, b);
  3832. tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
  3833. tcg_gen_or_vec(vece, sat, sat, x);
  3834. tcg_temp_free_vec(x);
  3835. }
  3836. void gen_gvec_sqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
  3837. uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
  3838. {
  3839. static const TCGOpcode vecop_list[] = {
  3840. INDEX_op_ssadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
  3841. };
  3842. static const GVecGen4 ops[4] = {
  3843. { .fniv = gen_sqadd_vec,
  3844. .fno = gen_helper_gvec_sqadd_b,
  3845. .opt_opc = vecop_list,
  3846. .write_aofs = true,
  3847. .vece = MO_8 },
  3848. { .fniv = gen_sqadd_vec,
  3849. .fno = gen_helper_gvec_sqadd_h,
  3850. .opt_opc = vecop_list,
  3851. .write_aofs = true,
  3852. .vece = MO_16 },
  3853. { .fniv = gen_sqadd_vec,
  3854. .fno = gen_helper_gvec_sqadd_s,
  3855. .opt_opc = vecop_list,
  3856. .write_aofs = true,
  3857. .vece = MO_32 },
  3858. { .fniv = gen_sqadd_vec,
  3859. .fno = gen_helper_gvec_sqadd_d,
  3860. .opt_opc = vecop_list,
  3861. .write_aofs = true,
  3862. .vece = MO_64 },
  3863. };
  3864. tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
  3865. rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
  3866. }
  3867. static void gen_uqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
  3868. TCGv_vec a, TCGv_vec b)
  3869. {
  3870. TCGv_vec x = tcg_temp_new_vec_matching(t);
  3871. tcg_gen_sub_vec(vece, x, a, b);
  3872. tcg_gen_ussub_vec(vece, t, a, b);
  3873. tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
  3874. tcg_gen_or_vec(vece, sat, sat, x);
  3875. tcg_temp_free_vec(x);
  3876. }
  3877. void gen_gvec_uqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
  3878. uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
  3879. {
  3880. static const TCGOpcode vecop_list[] = {
  3881. INDEX_op_ussub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
  3882. };
  3883. static const GVecGen4 ops[4] = {
  3884. { .fniv = gen_uqsub_vec,
  3885. .fno = gen_helper_gvec_uqsub_b,
  3886. .opt_opc = vecop_list,
  3887. .write_aofs = true,
  3888. .vece = MO_8 },
  3889. { .fniv = gen_uqsub_vec,
  3890. .fno = gen_helper_gvec_uqsub_h,
  3891. .opt_opc = vecop_list,
  3892. .write_aofs = true,
  3893. .vece = MO_16 },
  3894. { .fniv = gen_uqsub_vec,
  3895. .fno = gen_helper_gvec_uqsub_s,
  3896. .opt_opc = vecop_list,
  3897. .write_aofs = true,
  3898. .vece = MO_32 },
  3899. { .fniv = gen_uqsub_vec,
  3900. .fno = gen_helper_gvec_uqsub_d,
  3901. .opt_opc = vecop_list,
  3902. .write_aofs = true,
  3903. .vece = MO_64 },
  3904. };
  3905. tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
  3906. rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
  3907. }
  3908. static void gen_sqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
  3909. TCGv_vec a, TCGv_vec b)
  3910. {
  3911. TCGv_vec x = tcg_temp_new_vec_matching(t);
  3912. tcg_gen_sub_vec(vece, x, a, b);
  3913. tcg_gen_sssub_vec(vece, t, a, b);
  3914. tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
  3915. tcg_gen_or_vec(vece, sat, sat, x);
  3916. tcg_temp_free_vec(x);
  3917. }
  3918. void gen_gvec_sqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
  3919. uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
  3920. {
  3921. static const TCGOpcode vecop_list[] = {
  3922. INDEX_op_sssub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
  3923. };
  3924. static const GVecGen4 ops[4] = {
  3925. { .fniv = gen_sqsub_vec,
  3926. .fno = gen_helper_gvec_sqsub_b,
  3927. .opt_opc = vecop_list,
  3928. .write_aofs = true,
  3929. .vece = MO_8 },
  3930. { .fniv = gen_sqsub_vec,
  3931. .fno = gen_helper_gvec_sqsub_h,
  3932. .opt_opc = vecop_list,
  3933. .write_aofs = true,
  3934. .vece = MO_16 },
  3935. { .fniv = gen_sqsub_vec,
  3936. .fno = gen_helper_gvec_sqsub_s,
  3937. .opt_opc = vecop_list,
  3938. .write_aofs = true,
  3939. .vece = MO_32 },
  3940. { .fniv = gen_sqsub_vec,
  3941. .fno = gen_helper_gvec_sqsub_d,
  3942. .opt_opc = vecop_list,
  3943. .write_aofs = true,
  3944. .vece = MO_64 },
  3945. };
  3946. tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
  3947. rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
  3948. }
  3949. static void gen_sabd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
  3950. {
  3951. TCGv_i32 t = tcg_temp_new_i32();
  3952. tcg_gen_sub_i32(t, a, b);
  3953. tcg_gen_sub_i32(d, b, a);
  3954. tcg_gen_movcond_i32(TCG_COND_LT, d, a, b, d, t);
  3955. tcg_temp_free_i32(t);
  3956. }
  3957. static void gen_sabd_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
  3958. {
  3959. TCGv_i64 t = tcg_temp_new_i64();
  3960. tcg_gen_sub_i64(t, a, b);
  3961. tcg_gen_sub_i64(d, b, a);
  3962. tcg_gen_movcond_i64(TCG_COND_LT, d, a, b, d, t);
  3963. tcg_temp_free_i64(t);
  3964. }
  3965. static void gen_sabd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
  3966. {
  3967. TCGv_vec t = tcg_temp_new_vec_matching(d);
  3968. tcg_gen_smin_vec(vece, t, a, b);
  3969. tcg_gen_smax_vec(vece, d, a, b);
  3970. tcg_gen_sub_vec(vece, d, d, t);
  3971. tcg_temp_free_vec(t);
  3972. }
  3973. void gen_gvec_sabd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
  3974. uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
  3975. {
  3976. static const TCGOpcode vecop_list[] = {
  3977. INDEX_op_sub_vec, INDEX_op_smin_vec, INDEX_op_smax_vec, 0
  3978. };
  3979. static const GVecGen3 ops[4] = {
  3980. { .fniv = gen_sabd_vec,
  3981. .fno = gen_helper_gvec_sabd_b,
  3982. .opt_opc = vecop_list,
  3983. .vece = MO_8 },
  3984. { .fniv = gen_sabd_vec,
  3985. .fno = gen_helper_gvec_sabd_h,
  3986. .opt_opc = vecop_list,
  3987. .vece = MO_16 },
  3988. { .fni4 = gen_sabd_i32,
  3989. .fniv = gen_sabd_vec,
  3990. .fno = gen_helper_gvec_sabd_s,
  3991. .opt_opc = vecop_list,
  3992. .vece = MO_32 },
  3993. { .fni8 = gen_sabd_i64,
  3994. .fniv = gen_sabd_vec,
  3995. .fno = gen_helper_gvec_sabd_d,
  3996. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  3997. .opt_opc = vecop_list,
  3998. .vece = MO_64 },
  3999. };
  4000. tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
  4001. }
  4002. static void gen_uabd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
  4003. {
  4004. TCGv_i32 t = tcg_temp_new_i32();
  4005. tcg_gen_sub_i32(t, a, b);
  4006. tcg_gen_sub_i32(d, b, a);
  4007. tcg_gen_movcond_i32(TCG_COND_LTU, d, a, b, d, t);
  4008. tcg_temp_free_i32(t);
  4009. }
  4010. static void gen_uabd_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
  4011. {
  4012. TCGv_i64 t = tcg_temp_new_i64();
  4013. tcg_gen_sub_i64(t, a, b);
  4014. tcg_gen_sub_i64(d, b, a);
  4015. tcg_gen_movcond_i64(TCG_COND_LTU, d, a, b, d, t);
  4016. tcg_temp_free_i64(t);
  4017. }
  4018. static void gen_uabd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
  4019. {
  4020. TCGv_vec t = tcg_temp_new_vec_matching(d);
  4021. tcg_gen_umin_vec(vece, t, a, b);
  4022. tcg_gen_umax_vec(vece, d, a, b);
  4023. tcg_gen_sub_vec(vece, d, d, t);
  4024. tcg_temp_free_vec(t);
  4025. }
  4026. void gen_gvec_uabd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
  4027. uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
  4028. {
  4029. static const TCGOpcode vecop_list[] = {
  4030. INDEX_op_sub_vec, INDEX_op_umin_vec, INDEX_op_umax_vec, 0
  4031. };
  4032. static const GVecGen3 ops[4] = {
  4033. { .fniv = gen_uabd_vec,
  4034. .fno = gen_helper_gvec_uabd_b,
  4035. .opt_opc = vecop_list,
  4036. .vece = MO_8 },
  4037. { .fniv = gen_uabd_vec,
  4038. .fno = gen_helper_gvec_uabd_h,
  4039. .opt_opc = vecop_list,
  4040. .vece = MO_16 },
  4041. { .fni4 = gen_uabd_i32,
  4042. .fniv = gen_uabd_vec,
  4043. .fno = gen_helper_gvec_uabd_s,
  4044. .opt_opc = vecop_list,
  4045. .vece = MO_32 },
  4046. { .fni8 = gen_uabd_i64,
  4047. .fniv = gen_uabd_vec,
  4048. .fno = gen_helper_gvec_uabd_d,
  4049. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  4050. .opt_opc = vecop_list,
  4051. .vece = MO_64 },
  4052. };
  4053. tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
  4054. }
  4055. static void gen_saba_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
  4056. {
  4057. TCGv_i32 t = tcg_temp_new_i32();
  4058. gen_sabd_i32(t, a, b);
  4059. tcg_gen_add_i32(d, d, t);
  4060. tcg_temp_free_i32(t);
  4061. }
  4062. static void gen_saba_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
  4063. {
  4064. TCGv_i64 t = tcg_temp_new_i64();
  4065. gen_sabd_i64(t, a, b);
  4066. tcg_gen_add_i64(d, d, t);
  4067. tcg_temp_free_i64(t);
  4068. }
  4069. static void gen_saba_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
  4070. {
  4071. TCGv_vec t = tcg_temp_new_vec_matching(d);
  4072. gen_sabd_vec(vece, t, a, b);
  4073. tcg_gen_add_vec(vece, d, d, t);
  4074. tcg_temp_free_vec(t);
  4075. }
  4076. void gen_gvec_saba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
  4077. uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
  4078. {
  4079. static const TCGOpcode vecop_list[] = {
  4080. INDEX_op_sub_vec, INDEX_op_add_vec,
  4081. INDEX_op_smin_vec, INDEX_op_smax_vec, 0
  4082. };
  4083. static const GVecGen3 ops[4] = {
  4084. { .fniv = gen_saba_vec,
  4085. .fno = gen_helper_gvec_saba_b,
  4086. .opt_opc = vecop_list,
  4087. .load_dest = true,
  4088. .vece = MO_8 },
  4089. { .fniv = gen_saba_vec,
  4090. .fno = gen_helper_gvec_saba_h,
  4091. .opt_opc = vecop_list,
  4092. .load_dest = true,
  4093. .vece = MO_16 },
  4094. { .fni4 = gen_saba_i32,
  4095. .fniv = gen_saba_vec,
  4096. .fno = gen_helper_gvec_saba_s,
  4097. .opt_opc = vecop_list,
  4098. .load_dest = true,
  4099. .vece = MO_32 },
  4100. { .fni8 = gen_saba_i64,
  4101. .fniv = gen_saba_vec,
  4102. .fno = gen_helper_gvec_saba_d,
  4103. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  4104. .opt_opc = vecop_list,
  4105. .load_dest = true,
  4106. .vece = MO_64 },
  4107. };
  4108. tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
  4109. }
  4110. static void gen_uaba_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
  4111. {
  4112. TCGv_i32 t = tcg_temp_new_i32();
  4113. gen_uabd_i32(t, a, b);
  4114. tcg_gen_add_i32(d, d, t);
  4115. tcg_temp_free_i32(t);
  4116. }
  4117. static void gen_uaba_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
  4118. {
  4119. TCGv_i64 t = tcg_temp_new_i64();
  4120. gen_uabd_i64(t, a, b);
  4121. tcg_gen_add_i64(d, d, t);
  4122. tcg_temp_free_i64(t);
  4123. }
  4124. static void gen_uaba_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
  4125. {
  4126. TCGv_vec t = tcg_temp_new_vec_matching(d);
  4127. gen_uabd_vec(vece, t, a, b);
  4128. tcg_gen_add_vec(vece, d, d, t);
  4129. tcg_temp_free_vec(t);
  4130. }
  4131. void gen_gvec_uaba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
  4132. uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
  4133. {
  4134. static const TCGOpcode vecop_list[] = {
  4135. INDEX_op_sub_vec, INDEX_op_add_vec,
  4136. INDEX_op_umin_vec, INDEX_op_umax_vec, 0
  4137. };
  4138. static const GVecGen3 ops[4] = {
  4139. { .fniv = gen_uaba_vec,
  4140. .fno = gen_helper_gvec_uaba_b,
  4141. .opt_opc = vecop_list,
  4142. .load_dest = true,
  4143. .vece = MO_8 },
  4144. { .fniv = gen_uaba_vec,
  4145. .fno = gen_helper_gvec_uaba_h,
  4146. .opt_opc = vecop_list,
  4147. .load_dest = true,
  4148. .vece = MO_16 },
  4149. { .fni4 = gen_uaba_i32,
  4150. .fniv = gen_uaba_vec,
  4151. .fno = gen_helper_gvec_uaba_s,
  4152. .opt_opc = vecop_list,
  4153. .load_dest = true,
  4154. .vece = MO_32 },
  4155. { .fni8 = gen_uaba_i64,
  4156. .fniv = gen_uaba_vec,
  4157. .fno = gen_helper_gvec_uaba_d,
  4158. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  4159. .opt_opc = vecop_list,
  4160. .load_dest = true,
  4161. .vece = MO_64 },
  4162. };
  4163. tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
  4164. }
  4165. static int disas_coproc_insn(DisasContext *s, uint32_t insn)
  4166. {
  4167. int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
  4168. const ARMCPRegInfo *ri;
  4169. cpnum = (insn >> 8) & 0xf;
  4170. /* First check for coprocessor space used for XScale/iwMMXt insns */
  4171. if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
  4172. if (extract32(s->c15_cpar, cpnum, 1) == 0) {
  4173. return 1;
  4174. }
  4175. if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
  4176. return disas_iwmmxt_insn(s, insn);
  4177. } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
  4178. return disas_dsp_insn(s, insn);
  4179. }
  4180. return 1;
  4181. }
  4182. /* Otherwise treat as a generic register access */
  4183. is64 = (insn & (1 << 25)) == 0;
  4184. if (!is64 && ((insn & (1 << 4)) == 0)) {
  4185. /* cdp */
  4186. return 1;
  4187. }
  4188. crm = insn & 0xf;
  4189. if (is64) {
  4190. crn = 0;
  4191. opc1 = (insn >> 4) & 0xf;
  4192. opc2 = 0;
  4193. rt2 = (insn >> 16) & 0xf;
  4194. } else {
  4195. crn = (insn >> 16) & 0xf;
  4196. opc1 = (insn >> 21) & 7;
  4197. opc2 = (insn >> 5) & 7;
  4198. rt2 = 0;
  4199. }
  4200. isread = (insn >> 20) & 1;
  4201. rt = (insn >> 12) & 0xf;
  4202. ri = get_arm_cp_reginfo(s->cp_regs,
  4203. ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
  4204. if (ri) {
  4205. bool need_exit_tb;
  4206. /* Check access permissions */
  4207. if (!cp_access_ok(s->current_el, ri, isread)) {
  4208. return 1;
  4209. }
  4210. if (s->hstr_active || ri->accessfn ||
  4211. (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
  4212. /* Emit code to perform further access permissions checks at
  4213. * runtime; this may result in an exception.
  4214. * Note that on XScale all cp0..c13 registers do an access check
  4215. * call in order to handle c15_cpar.
  4216. */
  4217. TCGv_ptr tmpptr;
  4218. TCGv_i32 tcg_syn, tcg_isread;
  4219. uint32_t syndrome;
  4220. /* Note that since we are an implementation which takes an
  4221. * exception on a trapped conditional instruction only if the
  4222. * instruction passes its condition code check, we can take
  4223. * advantage of the clause in the ARM ARM that allows us to set
  4224. * the COND field in the instruction to 0xE in all cases.
  4225. * We could fish the actual condition out of the insn (ARM)
  4226. * or the condexec bits (Thumb) but it isn't necessary.
  4227. */
  4228. switch (cpnum) {
  4229. case 14:
  4230. if (is64) {
  4231. syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
  4232. isread, false);
  4233. } else {
  4234. syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
  4235. rt, isread, false);
  4236. }
  4237. break;
  4238. case 15:
  4239. if (is64) {
  4240. syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
  4241. isread, false);
  4242. } else {
  4243. syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
  4244. rt, isread, false);
  4245. }
  4246. break;
  4247. default:
  4248. /* ARMv8 defines that only coprocessors 14 and 15 exist,
  4249. * so this can only happen if this is an ARMv7 or earlier CPU,
  4250. * in which case the syndrome information won't actually be
  4251. * guest visible.
  4252. */
  4253. assert(!arm_dc_feature(s, ARM_FEATURE_V8));
  4254. syndrome = syn_uncategorized();
  4255. break;
  4256. }
  4257. gen_set_condexec(s);
  4258. gen_set_pc_im(s, s->pc_curr);
  4259. tmpptr = tcg_const_ptr(ri);
  4260. tcg_syn = tcg_const_i32(syndrome);
  4261. tcg_isread = tcg_const_i32(isread);
  4262. gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
  4263. tcg_isread);
  4264. tcg_temp_free_ptr(tmpptr);
  4265. tcg_temp_free_i32(tcg_syn);
  4266. tcg_temp_free_i32(tcg_isread);
  4267. } else if (ri->type & ARM_CP_RAISES_EXC) {
  4268. /*
  4269. * The readfn or writefn might raise an exception;
  4270. * synchronize the CPU state in case it does.
  4271. */
  4272. gen_set_condexec(s);
  4273. gen_set_pc_im(s, s->pc_curr);
  4274. }
  4275. /* Handle special cases first */
  4276. switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
  4277. case ARM_CP_NOP:
  4278. return 0;
  4279. case ARM_CP_WFI:
  4280. if (isread) {
  4281. return 1;
  4282. }
  4283. gen_set_pc_im(s, s->base.pc_next);
  4284. s->base.is_jmp = DISAS_WFI;
  4285. return 0;
  4286. default:
  4287. break;
  4288. }
  4289. if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
  4290. gen_io_start();
  4291. }
  4292. if (isread) {
  4293. /* Read */
  4294. if (is64) {
  4295. TCGv_i64 tmp64;
  4296. TCGv_i32 tmp;
  4297. if (ri->type & ARM_CP_CONST) {
  4298. tmp64 = tcg_const_i64(ri->resetvalue);
  4299. } else if (ri->readfn) {
  4300. TCGv_ptr tmpptr;
  4301. tmp64 = tcg_temp_new_i64();
  4302. tmpptr = tcg_const_ptr(ri);
  4303. gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
  4304. tcg_temp_free_ptr(tmpptr);
  4305. } else {
  4306. tmp64 = tcg_temp_new_i64();
  4307. tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
  4308. }
  4309. tmp = tcg_temp_new_i32();
  4310. tcg_gen_extrl_i64_i32(tmp, tmp64);
  4311. store_reg(s, rt, tmp);
  4312. tmp = tcg_temp_new_i32();
  4313. tcg_gen_extrh_i64_i32(tmp, tmp64);
  4314. tcg_temp_free_i64(tmp64);
  4315. store_reg(s, rt2, tmp);
  4316. } else {
  4317. TCGv_i32 tmp;
  4318. if (ri->type & ARM_CP_CONST) {
  4319. tmp = tcg_const_i32(ri->resetvalue);
  4320. } else if (ri->readfn) {
  4321. TCGv_ptr tmpptr;
  4322. tmp = tcg_temp_new_i32();
  4323. tmpptr = tcg_const_ptr(ri);
  4324. gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
  4325. tcg_temp_free_ptr(tmpptr);
  4326. } else {
  4327. tmp = load_cpu_offset(ri->fieldoffset);
  4328. }
  4329. if (rt == 15) {
  4330. /* Destination register of r15 for 32 bit loads sets
  4331. * the condition codes from the high 4 bits of the value
  4332. */
  4333. gen_set_nzcv(tmp);
  4334. tcg_temp_free_i32(tmp);
  4335. } else {
  4336. store_reg(s, rt, tmp);
  4337. }
  4338. }
  4339. } else {
  4340. /* Write */
  4341. if (ri->type & ARM_CP_CONST) {
  4342. /* If not forbidden by access permissions, treat as WI */
  4343. return 0;
  4344. }
  4345. if (is64) {
  4346. TCGv_i32 tmplo, tmphi;
  4347. TCGv_i64 tmp64 = tcg_temp_new_i64();
  4348. tmplo = load_reg(s, rt);
  4349. tmphi = load_reg(s, rt2);
  4350. tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
  4351. tcg_temp_free_i32(tmplo);
  4352. tcg_temp_free_i32(tmphi);
  4353. if (ri->writefn) {
  4354. TCGv_ptr tmpptr = tcg_const_ptr(ri);
  4355. gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
  4356. tcg_temp_free_ptr(tmpptr);
  4357. } else {
  4358. tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
  4359. }
  4360. tcg_temp_free_i64(tmp64);
  4361. } else {
  4362. if (ri->writefn) {
  4363. TCGv_i32 tmp;
  4364. TCGv_ptr tmpptr;
  4365. tmp = load_reg(s, rt);
  4366. tmpptr = tcg_const_ptr(ri);
  4367. gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
  4368. tcg_temp_free_ptr(tmpptr);
  4369. tcg_temp_free_i32(tmp);
  4370. } else {
  4371. TCGv_i32 tmp = load_reg(s, rt);
  4372. store_cpu_offset(tmp, ri->fieldoffset);
  4373. }
  4374. }
  4375. }
  4376. /* I/O operations must end the TB here (whether read or write) */
  4377. need_exit_tb = ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) &&
  4378. (ri->type & ARM_CP_IO));
  4379. if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
  4380. /*
  4381. * A write to any coprocessor register that ends a TB
  4382. * must rebuild the hflags for the next TB.
  4383. */
  4384. TCGv_i32 tcg_el = tcg_const_i32(s->current_el);
  4385. if (arm_dc_feature(s, ARM_FEATURE_M)) {
  4386. gen_helper_rebuild_hflags_m32(cpu_env, tcg_el);
  4387. } else {
  4388. if (ri->type & ARM_CP_NEWEL) {
  4389. gen_helper_rebuild_hflags_a32_newel(cpu_env);
  4390. } else {
  4391. gen_helper_rebuild_hflags_a32(cpu_env, tcg_el);
  4392. }
  4393. }
  4394. tcg_temp_free_i32(tcg_el);
  4395. /*
  4396. * We default to ending the TB on a coprocessor register write,
  4397. * but allow this to be suppressed by the register definition
  4398. * (usually only necessary to work around guest bugs).
  4399. */
  4400. need_exit_tb = true;
  4401. }
  4402. if (need_exit_tb) {
  4403. gen_lookup_tb(s);
  4404. }
  4405. return 0;
  4406. }
  4407. /* Unknown register; this might be a guest error or a QEMU
  4408. * unimplemented feature.
  4409. */
  4410. if (is64) {
  4411. qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
  4412. "64 bit system register cp:%d opc1: %d crm:%d "
  4413. "(%s)\n",
  4414. isread ? "read" : "write", cpnum, opc1, crm,
  4415. s->ns ? "non-secure" : "secure");
  4416. } else {
  4417. qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
  4418. "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
  4419. "(%s)\n",
  4420. isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
  4421. s->ns ? "non-secure" : "secure");
  4422. }
  4423. return 1;
  4424. }
  4425. /* Store a 64-bit value to a register pair. Clobbers val. */
  4426. static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
  4427. {
  4428. TCGv_i32 tmp;
  4429. tmp = tcg_temp_new_i32();
  4430. tcg_gen_extrl_i64_i32(tmp, val);
  4431. store_reg(s, rlow, tmp);
  4432. tmp = tcg_temp_new_i32();
  4433. tcg_gen_extrh_i64_i32(tmp, val);
  4434. store_reg(s, rhigh, tmp);
  4435. }
  4436. /* load and add a 64-bit value from a register pair. */
  4437. static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
  4438. {
  4439. TCGv_i64 tmp;
  4440. TCGv_i32 tmpl;
  4441. TCGv_i32 tmph;
  4442. /* Load 64-bit value rd:rn. */
  4443. tmpl = load_reg(s, rlow);
  4444. tmph = load_reg(s, rhigh);
  4445. tmp = tcg_temp_new_i64();
  4446. tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
  4447. tcg_temp_free_i32(tmpl);
  4448. tcg_temp_free_i32(tmph);
  4449. tcg_gen_add_i64(val, val, tmp);
  4450. tcg_temp_free_i64(tmp);
  4451. }
  4452. /* Set N and Z flags from hi|lo. */
  4453. static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
  4454. {
  4455. tcg_gen_mov_i32(cpu_NF, hi);
  4456. tcg_gen_or_i32(cpu_ZF, lo, hi);
  4457. }
  4458. /* Load/Store exclusive instructions are implemented by remembering
  4459. the value/address loaded, and seeing if these are the same
  4460. when the store is performed. This should be sufficient to implement
  4461. the architecturally mandated semantics, and avoids having to monitor
  4462. regular stores. The compare vs the remembered value is done during
  4463. the cmpxchg operation, but we must compare the addresses manually. */
  4464. static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
  4465. TCGv_i32 addr, int size)
  4466. {
  4467. TCGv_i32 tmp = tcg_temp_new_i32();
  4468. MemOp opc = size | MO_ALIGN | s->be_data;
  4469. s->is_ldex = true;
  4470. if (size == 3) {
  4471. TCGv_i32 tmp2 = tcg_temp_new_i32();
  4472. TCGv_i64 t64 = tcg_temp_new_i64();
  4473. /* For AArch32, architecturally the 32-bit word at the lowest
  4474. * address is always Rt and the one at addr+4 is Rt2, even if
  4475. * the CPU is big-endian. That means we don't want to do a
  4476. * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
  4477. * for an architecturally 64-bit access, but instead do a
  4478. * 64-bit access using MO_BE if appropriate and then split
  4479. * the two halves.
  4480. * This only makes a difference for BE32 user-mode, where
  4481. * frob64() must not flip the two halves of the 64-bit data
  4482. * but this code must treat BE32 user-mode like BE32 system.
  4483. */
  4484. TCGv taddr = gen_aa32_addr(s, addr, opc);
  4485. tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
  4486. tcg_temp_free(taddr);
  4487. tcg_gen_mov_i64(cpu_exclusive_val, t64);
  4488. if (s->be_data == MO_BE) {
  4489. tcg_gen_extr_i64_i32(tmp2, tmp, t64);
  4490. } else {
  4491. tcg_gen_extr_i64_i32(tmp, tmp2, t64);
  4492. }
  4493. tcg_temp_free_i64(t64);
  4494. store_reg(s, rt2, tmp2);
  4495. } else {
  4496. gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
  4497. tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
  4498. }
  4499. store_reg(s, rt, tmp);
  4500. tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
  4501. }
  4502. static void gen_clrex(DisasContext *s)
  4503. {
  4504. tcg_gen_movi_i64(cpu_exclusive_addr, -1);
  4505. }
  4506. static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
  4507. TCGv_i32 addr, int size)
  4508. {
  4509. TCGv_i32 t0, t1, t2;
  4510. TCGv_i64 extaddr;
  4511. TCGv taddr;
  4512. TCGLabel *done_label;
  4513. TCGLabel *fail_label;
  4514. MemOp opc = size | MO_ALIGN | s->be_data;
  4515. /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
  4516. [addr] = {Rt};
  4517. {Rd} = 0;
  4518. } else {
  4519. {Rd} = 1;
  4520. } */
  4521. fail_label = gen_new_label();
  4522. done_label = gen_new_label();
  4523. extaddr = tcg_temp_new_i64();
  4524. tcg_gen_extu_i32_i64(extaddr, addr);
  4525. tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
  4526. tcg_temp_free_i64(extaddr);
  4527. taddr = gen_aa32_addr(s, addr, opc);
  4528. t0 = tcg_temp_new_i32();
  4529. t1 = load_reg(s, rt);
  4530. if (size == 3) {
  4531. TCGv_i64 o64 = tcg_temp_new_i64();
  4532. TCGv_i64 n64 = tcg_temp_new_i64();
  4533. t2 = load_reg(s, rt2);
  4534. /* For AArch32, architecturally the 32-bit word at the lowest
  4535. * address is always Rt and the one at addr+4 is Rt2, even if
  4536. * the CPU is big-endian. Since we're going to treat this as a
  4537. * single 64-bit BE store, we need to put the two halves in the
  4538. * opposite order for BE to LE, so that they end up in the right
  4539. * places.
  4540. * We don't want gen_aa32_frob64() because that does the wrong
  4541. * thing for BE32 usermode.
  4542. */
  4543. if (s->be_data == MO_BE) {
  4544. tcg_gen_concat_i32_i64(n64, t2, t1);
  4545. } else {
  4546. tcg_gen_concat_i32_i64(n64, t1, t2);
  4547. }
  4548. tcg_temp_free_i32(t2);
  4549. tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
  4550. get_mem_index(s), opc);
  4551. tcg_temp_free_i64(n64);
  4552. tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
  4553. tcg_gen_extrl_i64_i32(t0, o64);
  4554. tcg_temp_free_i64(o64);
  4555. } else {
  4556. t2 = tcg_temp_new_i32();
  4557. tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
  4558. tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
  4559. tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
  4560. tcg_temp_free_i32(t2);
  4561. }
  4562. tcg_temp_free_i32(t1);
  4563. tcg_temp_free(taddr);
  4564. tcg_gen_mov_i32(cpu_R[rd], t0);
  4565. tcg_temp_free_i32(t0);
  4566. tcg_gen_br(done_label);
  4567. gen_set_label(fail_label);
  4568. tcg_gen_movi_i32(cpu_R[rd], 1);
  4569. gen_set_label(done_label);
  4570. tcg_gen_movi_i64(cpu_exclusive_addr, -1);
  4571. }
  4572. /* gen_srs:
  4573. * @env: CPUARMState
  4574. * @s: DisasContext
  4575. * @mode: mode field from insn (which stack to store to)
  4576. * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
  4577. * @writeback: true if writeback bit set
  4578. *
  4579. * Generate code for the SRS (Store Return State) insn.
  4580. */
  4581. static void gen_srs(DisasContext *s,
  4582. uint32_t mode, uint32_t amode, bool writeback)
  4583. {
  4584. int32_t offset;
  4585. TCGv_i32 addr, tmp;
  4586. bool undef = false;
  4587. /* SRS is:
  4588. * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
  4589. * and specified mode is monitor mode
  4590. * - UNDEFINED in Hyp mode
  4591. * - UNPREDICTABLE in User or System mode
  4592. * - UNPREDICTABLE if the specified mode is:
  4593. * -- not implemented
  4594. * -- not a valid mode number
  4595. * -- a mode that's at a higher exception level
  4596. * -- Monitor, if we are Non-secure
  4597. * For the UNPREDICTABLE cases we choose to UNDEF.
  4598. */
  4599. if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
  4600. gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), 3);
  4601. return;
  4602. }
  4603. if (s->current_el == 0 || s->current_el == 2) {
  4604. undef = true;
  4605. }
  4606. switch (mode) {
  4607. case ARM_CPU_MODE_USR:
  4608. case ARM_CPU_MODE_FIQ:
  4609. case ARM_CPU_MODE_IRQ:
  4610. case ARM_CPU_MODE_SVC:
  4611. case ARM_CPU_MODE_ABT:
  4612. case ARM_CPU_MODE_UND:
  4613. case ARM_CPU_MODE_SYS:
  4614. break;
  4615. case ARM_CPU_MODE_HYP:
  4616. if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
  4617. undef = true;
  4618. }
  4619. break;
  4620. case ARM_CPU_MODE_MON:
  4621. /* No need to check specifically for "are we non-secure" because
  4622. * we've already made EL0 UNDEF and handled the trap for S-EL1;
  4623. * so if this isn't EL3 then we must be non-secure.
  4624. */
  4625. if (s->current_el != 3) {
  4626. undef = true;
  4627. }
  4628. break;
  4629. default:
  4630. undef = true;
  4631. }
  4632. if (undef) {
  4633. unallocated_encoding(s);
  4634. return;
  4635. }
  4636. addr = tcg_temp_new_i32();
  4637. tmp = tcg_const_i32(mode);
  4638. /* get_r13_banked() will raise an exception if called from System mode */
  4639. gen_set_condexec(s);
  4640. gen_set_pc_im(s, s->pc_curr);
  4641. gen_helper_get_r13_banked(addr, cpu_env, tmp);
  4642. tcg_temp_free_i32(tmp);
  4643. switch (amode) {
  4644. case 0: /* DA */
  4645. offset = -4;
  4646. break;
  4647. case 1: /* IA */
  4648. offset = 0;
  4649. break;
  4650. case 2: /* DB */
  4651. offset = -8;
  4652. break;
  4653. case 3: /* IB */
  4654. offset = 4;
  4655. break;
  4656. default:
  4657. abort();
  4658. }
  4659. tcg_gen_addi_i32(addr, addr, offset);
  4660. tmp = load_reg(s, 14);
  4661. gen_aa32_st32(s, tmp, addr, get_mem_index(s));
  4662. tcg_temp_free_i32(tmp);
  4663. tmp = load_cpu_field(spsr);
  4664. tcg_gen_addi_i32(addr, addr, 4);
  4665. gen_aa32_st32(s, tmp, addr, get_mem_index(s));
  4666. tcg_temp_free_i32(tmp);
  4667. if (writeback) {
  4668. switch (amode) {
  4669. case 0:
  4670. offset = -8;
  4671. break;
  4672. case 1:
  4673. offset = 4;
  4674. break;
  4675. case 2:
  4676. offset = -4;
  4677. break;
  4678. case 3:
  4679. offset = 0;
  4680. break;
  4681. default:
  4682. abort();
  4683. }
  4684. tcg_gen_addi_i32(addr, addr, offset);
  4685. tmp = tcg_const_i32(mode);
  4686. gen_helper_set_r13_banked(cpu_env, tmp, addr);
  4687. tcg_temp_free_i32(tmp);
  4688. }
  4689. tcg_temp_free_i32(addr);
  4690. s->base.is_jmp = DISAS_UPDATE_EXIT;
  4691. }
  4692. /* Generate a label used for skipping this instruction */
  4693. static void arm_gen_condlabel(DisasContext *s)
  4694. {
  4695. if (!s->condjmp) {
  4696. s->condlabel = gen_new_label();
  4697. s->condjmp = 1;
  4698. }
  4699. }
  4700. /* Skip this instruction if the ARM condition is false */
  4701. static void arm_skip_unless(DisasContext *s, uint32_t cond)
  4702. {
  4703. arm_gen_condlabel(s);
  4704. arm_gen_test_cc(cond ^ 1, s->condlabel);
  4705. }
  4706. /*
  4707. * Constant expanders for the decoders.
  4708. */
  4709. static int negate(DisasContext *s, int x)
  4710. {
  4711. return -x;
  4712. }
  4713. static int plus_2(DisasContext *s, int x)
  4714. {
  4715. return x + 2;
  4716. }
  4717. static int times_2(DisasContext *s, int x)
  4718. {
  4719. return x * 2;
  4720. }
  4721. static int times_4(DisasContext *s, int x)
  4722. {
  4723. return x * 4;
  4724. }
  4725. /* Return only the rotation part of T32ExpandImm. */
  4726. static int t32_expandimm_rot(DisasContext *s, int x)
  4727. {
  4728. return x & 0xc00 ? extract32(x, 7, 5) : 0;
  4729. }
  4730. /* Return the unrotated immediate from T32ExpandImm. */
  4731. static int t32_expandimm_imm(DisasContext *s, int x)
  4732. {
  4733. int imm = extract32(x, 0, 8);
  4734. switch (extract32(x, 8, 4)) {
  4735. case 0: /* XY */
  4736. /* Nothing to do. */
  4737. break;
  4738. case 1: /* 00XY00XY */
  4739. imm *= 0x00010001;
  4740. break;
  4741. case 2: /* XY00XY00 */
  4742. imm *= 0x01000100;
  4743. break;
  4744. case 3: /* XYXYXYXY */
  4745. imm *= 0x01010101;
  4746. break;
  4747. default:
  4748. /* Rotated constant. */
  4749. imm |= 0x80;
  4750. break;
  4751. }
  4752. return imm;
  4753. }
  4754. static int t32_branch24(DisasContext *s, int x)
  4755. {
  4756. /* Convert J1:J2 at x[22:21] to I2:I1, which involves I=J^~S. */
  4757. x ^= !(x < 0) * (3 << 21);
  4758. /* Append the final zero. */
  4759. return x << 1;
  4760. }
  4761. static int t16_setflags(DisasContext *s)
  4762. {
  4763. return s->condexec_mask == 0;
  4764. }
  4765. static int t16_push_list(DisasContext *s, int x)
  4766. {
  4767. return (x & 0xff) | (x & 0x100) << (14 - 8);
  4768. }
  4769. static int t16_pop_list(DisasContext *s, int x)
  4770. {
  4771. return (x & 0xff) | (x & 0x100) << (15 - 8);
  4772. }
  4773. /*
  4774. * Include the generated decoders.
  4775. */
  4776. #include "decode-a32.c.inc"
  4777. #include "decode-a32-uncond.c.inc"
  4778. #include "decode-t32.c.inc"
  4779. #include "decode-t16.c.inc"
  4780. /* Helpers to swap operands for reverse-subtract. */
  4781. static void gen_rsb(TCGv_i32 dst, TCGv_i32 a, TCGv_i32 b)
  4782. {
  4783. tcg_gen_sub_i32(dst, b, a);
  4784. }
  4785. static void gen_rsb_CC(TCGv_i32 dst, TCGv_i32 a, TCGv_i32 b)
  4786. {
  4787. gen_sub_CC(dst, b, a);
  4788. }
  4789. static void gen_rsc(TCGv_i32 dest, TCGv_i32 a, TCGv_i32 b)
  4790. {
  4791. gen_sub_carry(dest, b, a);
  4792. }
  4793. static void gen_rsc_CC(TCGv_i32 dest, TCGv_i32 a, TCGv_i32 b)
  4794. {
  4795. gen_sbc_CC(dest, b, a);
  4796. }
  4797. /*
  4798. * Helpers for the data processing routines.
  4799. *
  4800. * After the computation store the results back.
  4801. * This may be suppressed altogether (STREG_NONE), require a runtime
  4802. * check against the stack limits (STREG_SP_CHECK), or generate an
  4803. * exception return. Oh, or store into a register.
  4804. *
  4805. * Always return true, indicating success for a trans_* function.
  4806. */
  4807. typedef enum {
  4808. STREG_NONE,
  4809. STREG_NORMAL,
  4810. STREG_SP_CHECK,
  4811. STREG_EXC_RET,
  4812. } StoreRegKind;
  4813. static bool store_reg_kind(DisasContext *s, int rd,
  4814. TCGv_i32 val, StoreRegKind kind)
  4815. {
  4816. switch (kind) {
  4817. case STREG_NONE:
  4818. tcg_temp_free_i32(val);
  4819. return true;
  4820. case STREG_NORMAL:
  4821. /* See ALUWritePC: Interworking only from a32 mode. */
  4822. if (s->thumb) {
  4823. store_reg(s, rd, val);
  4824. } else {
  4825. store_reg_bx(s, rd, val);
  4826. }
  4827. return true;
  4828. case STREG_SP_CHECK:
  4829. store_sp_checked(s, val);
  4830. return true;
  4831. case STREG_EXC_RET:
  4832. gen_exception_return(s, val);
  4833. return true;
  4834. }
  4835. g_assert_not_reached();
  4836. }
  4837. /*
  4838. * Data Processing (register)
  4839. *
  4840. * Operate, with set flags, one register source,
  4841. * one immediate shifted register source, and a destination.
  4842. */
  4843. static bool op_s_rrr_shi(DisasContext *s, arg_s_rrr_shi *a,
  4844. void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
  4845. int logic_cc, StoreRegKind kind)
  4846. {
  4847. TCGv_i32 tmp1, tmp2;
  4848. tmp2 = load_reg(s, a->rm);
  4849. gen_arm_shift_im(tmp2, a->shty, a->shim, logic_cc);
  4850. tmp1 = load_reg(s, a->rn);
  4851. gen(tmp1, tmp1, tmp2);
  4852. tcg_temp_free_i32(tmp2);
  4853. if (logic_cc) {
  4854. gen_logic_CC(tmp1);
  4855. }
  4856. return store_reg_kind(s, a->rd, tmp1, kind);
  4857. }
  4858. static bool op_s_rxr_shi(DisasContext *s, arg_s_rrr_shi *a,
  4859. void (*gen)(TCGv_i32, TCGv_i32),
  4860. int logic_cc, StoreRegKind kind)
  4861. {
  4862. TCGv_i32 tmp;
  4863. tmp = load_reg(s, a->rm);
  4864. gen_arm_shift_im(tmp, a->shty, a->shim, logic_cc);
  4865. gen(tmp, tmp);
  4866. if (logic_cc) {
  4867. gen_logic_CC(tmp);
  4868. }
  4869. return store_reg_kind(s, a->rd, tmp, kind);
  4870. }
  4871. /*
  4872. * Data-processing (register-shifted register)
  4873. *
  4874. * Operate, with set flags, one register source,
  4875. * one register shifted register source, and a destination.
  4876. */
  4877. static bool op_s_rrr_shr(DisasContext *s, arg_s_rrr_shr *a,
  4878. void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
  4879. int logic_cc, StoreRegKind kind)
  4880. {
  4881. TCGv_i32 tmp1, tmp2;
  4882. tmp1 = load_reg(s, a->rs);
  4883. tmp2 = load_reg(s, a->rm);
  4884. gen_arm_shift_reg(tmp2, a->shty, tmp1, logic_cc);
  4885. tmp1 = load_reg(s, a->rn);
  4886. gen(tmp1, tmp1, tmp2);
  4887. tcg_temp_free_i32(tmp2);
  4888. if (logic_cc) {
  4889. gen_logic_CC(tmp1);
  4890. }
  4891. return store_reg_kind(s, a->rd, tmp1, kind);
  4892. }
  4893. static bool op_s_rxr_shr(DisasContext *s, arg_s_rrr_shr *a,
  4894. void (*gen)(TCGv_i32, TCGv_i32),
  4895. int logic_cc, StoreRegKind kind)
  4896. {
  4897. TCGv_i32 tmp1, tmp2;
  4898. tmp1 = load_reg(s, a->rs);
  4899. tmp2 = load_reg(s, a->rm);
  4900. gen_arm_shift_reg(tmp2, a->shty, tmp1, logic_cc);
  4901. gen(tmp2, tmp2);
  4902. if (logic_cc) {
  4903. gen_logic_CC(tmp2);
  4904. }
  4905. return store_reg_kind(s, a->rd, tmp2, kind);
  4906. }
  4907. /*
  4908. * Data-processing (immediate)
  4909. *
  4910. * Operate, with set flags, one register source,
  4911. * one rotated immediate, and a destination.
  4912. *
  4913. * Note that logic_cc && a->rot setting CF based on the msb of the
  4914. * immediate is the reason why we must pass in the unrotated form
  4915. * of the immediate.
  4916. */
  4917. static bool op_s_rri_rot(DisasContext *s, arg_s_rri_rot *a,
  4918. void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
  4919. int logic_cc, StoreRegKind kind)
  4920. {
  4921. TCGv_i32 tmp1, tmp2;
  4922. uint32_t imm;
  4923. imm = ror32(a->imm, a->rot);
  4924. if (logic_cc && a->rot) {
  4925. tcg_gen_movi_i32(cpu_CF, imm >> 31);
  4926. }
  4927. tmp2 = tcg_const_i32(imm);
  4928. tmp1 = load_reg(s, a->rn);
  4929. gen(tmp1, tmp1, tmp2);
  4930. tcg_temp_free_i32(tmp2);
  4931. if (logic_cc) {
  4932. gen_logic_CC(tmp1);
  4933. }
  4934. return store_reg_kind(s, a->rd, tmp1, kind);
  4935. }
  4936. static bool op_s_rxi_rot(DisasContext *s, arg_s_rri_rot *a,
  4937. void (*gen)(TCGv_i32, TCGv_i32),
  4938. int logic_cc, StoreRegKind kind)
  4939. {
  4940. TCGv_i32 tmp;
  4941. uint32_t imm;
  4942. imm = ror32(a->imm, a->rot);
  4943. if (logic_cc && a->rot) {
  4944. tcg_gen_movi_i32(cpu_CF, imm >> 31);
  4945. }
  4946. tmp = tcg_const_i32(imm);
  4947. gen(tmp, tmp);
  4948. if (logic_cc) {
  4949. gen_logic_CC(tmp);
  4950. }
  4951. return store_reg_kind(s, a->rd, tmp, kind);
  4952. }
  4953. #define DO_ANY3(NAME, OP, L, K) \
  4954. static bool trans_##NAME##_rrri(DisasContext *s, arg_s_rrr_shi *a) \
  4955. { StoreRegKind k = (K); return op_s_rrr_shi(s, a, OP, L, k); } \
  4956. static bool trans_##NAME##_rrrr(DisasContext *s, arg_s_rrr_shr *a) \
  4957. { StoreRegKind k = (K); return op_s_rrr_shr(s, a, OP, L, k); } \
  4958. static bool trans_##NAME##_rri(DisasContext *s, arg_s_rri_rot *a) \
  4959. { StoreRegKind k = (K); return op_s_rri_rot(s, a, OP, L, k); }
  4960. #define DO_ANY2(NAME, OP, L, K) \
  4961. static bool trans_##NAME##_rxri(DisasContext *s, arg_s_rrr_shi *a) \
  4962. { StoreRegKind k = (K); return op_s_rxr_shi(s, a, OP, L, k); } \
  4963. static bool trans_##NAME##_rxrr(DisasContext *s, arg_s_rrr_shr *a) \
  4964. { StoreRegKind k = (K); return op_s_rxr_shr(s, a, OP, L, k); } \
  4965. static bool trans_##NAME##_rxi(DisasContext *s, arg_s_rri_rot *a) \
  4966. { StoreRegKind k = (K); return op_s_rxi_rot(s, a, OP, L, k); }
  4967. #define DO_CMP2(NAME, OP, L) \
  4968. static bool trans_##NAME##_xrri(DisasContext *s, arg_s_rrr_shi *a) \
  4969. { return op_s_rrr_shi(s, a, OP, L, STREG_NONE); } \
  4970. static bool trans_##NAME##_xrrr(DisasContext *s, arg_s_rrr_shr *a) \
  4971. { return op_s_rrr_shr(s, a, OP, L, STREG_NONE); } \
  4972. static bool trans_##NAME##_xri(DisasContext *s, arg_s_rri_rot *a) \
  4973. { return op_s_rri_rot(s, a, OP, L, STREG_NONE); }
  4974. DO_ANY3(AND, tcg_gen_and_i32, a->s, STREG_NORMAL)
  4975. DO_ANY3(EOR, tcg_gen_xor_i32, a->s, STREG_NORMAL)
  4976. DO_ANY3(ORR, tcg_gen_or_i32, a->s, STREG_NORMAL)
  4977. DO_ANY3(BIC, tcg_gen_andc_i32, a->s, STREG_NORMAL)
  4978. DO_ANY3(RSB, a->s ? gen_rsb_CC : gen_rsb, false, STREG_NORMAL)
  4979. DO_ANY3(ADC, a->s ? gen_adc_CC : gen_add_carry, false, STREG_NORMAL)
  4980. DO_ANY3(SBC, a->s ? gen_sbc_CC : gen_sub_carry, false, STREG_NORMAL)
  4981. DO_ANY3(RSC, a->s ? gen_rsc_CC : gen_rsc, false, STREG_NORMAL)
  4982. DO_CMP2(TST, tcg_gen_and_i32, true)
  4983. DO_CMP2(TEQ, tcg_gen_xor_i32, true)
  4984. DO_CMP2(CMN, gen_add_CC, false)
  4985. DO_CMP2(CMP, gen_sub_CC, false)
  4986. DO_ANY3(ADD, a->s ? gen_add_CC : tcg_gen_add_i32, false,
  4987. a->rd == 13 && a->rn == 13 ? STREG_SP_CHECK : STREG_NORMAL)
  4988. /*
  4989. * Note for the computation of StoreRegKind we return out of the
  4990. * middle of the functions that are expanded by DO_ANY3, and that
  4991. * we modify a->s via that parameter before it is used by OP.
  4992. */
  4993. DO_ANY3(SUB, a->s ? gen_sub_CC : tcg_gen_sub_i32, false,
  4994. ({
  4995. StoreRegKind ret = STREG_NORMAL;
  4996. if (a->rd == 15 && a->s) {
  4997. /*
  4998. * See ALUExceptionReturn:
  4999. * In User mode, UNPREDICTABLE; we choose UNDEF.
  5000. * In Hyp mode, UNDEFINED.
  5001. */
  5002. if (IS_USER(s) || s->current_el == 2) {
  5003. unallocated_encoding(s);
  5004. return true;
  5005. }
  5006. /* There is no writeback of nzcv to PSTATE. */
  5007. a->s = 0;
  5008. ret = STREG_EXC_RET;
  5009. } else if (a->rd == 13 && a->rn == 13) {
  5010. ret = STREG_SP_CHECK;
  5011. }
  5012. ret;
  5013. }))
  5014. DO_ANY2(MOV, tcg_gen_mov_i32, a->s,
  5015. ({
  5016. StoreRegKind ret = STREG_NORMAL;
  5017. if (a->rd == 15 && a->s) {
  5018. /*
  5019. * See ALUExceptionReturn:
  5020. * In User mode, UNPREDICTABLE; we choose UNDEF.
  5021. * In Hyp mode, UNDEFINED.
  5022. */
  5023. if (IS_USER(s) || s->current_el == 2) {
  5024. unallocated_encoding(s);
  5025. return true;
  5026. }
  5027. /* There is no writeback of nzcv to PSTATE. */
  5028. a->s = 0;
  5029. ret = STREG_EXC_RET;
  5030. } else if (a->rd == 13) {
  5031. ret = STREG_SP_CHECK;
  5032. }
  5033. ret;
  5034. }))
  5035. DO_ANY2(MVN, tcg_gen_not_i32, a->s, STREG_NORMAL)
  5036. /*
  5037. * ORN is only available with T32, so there is no register-shifted-register
  5038. * form of the insn. Using the DO_ANY3 macro would create an unused function.
  5039. */
  5040. static bool trans_ORN_rrri(DisasContext *s, arg_s_rrr_shi *a)
  5041. {
  5042. return op_s_rrr_shi(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL);
  5043. }
  5044. static bool trans_ORN_rri(DisasContext *s, arg_s_rri_rot *a)
  5045. {
  5046. return op_s_rri_rot(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL);
  5047. }
  5048. #undef DO_ANY3
  5049. #undef DO_ANY2
  5050. #undef DO_CMP2
  5051. static bool trans_ADR(DisasContext *s, arg_ri *a)
  5052. {
  5053. store_reg_bx(s, a->rd, add_reg_for_lit(s, 15, a->imm));
  5054. return true;
  5055. }
  5056. static bool trans_MOVW(DisasContext *s, arg_MOVW *a)
  5057. {
  5058. TCGv_i32 tmp;
  5059. if (!ENABLE_ARCH_6T2) {
  5060. return false;
  5061. }
  5062. tmp = tcg_const_i32(a->imm);
  5063. store_reg(s, a->rd, tmp);
  5064. return true;
  5065. }
  5066. static bool trans_MOVT(DisasContext *s, arg_MOVW *a)
  5067. {
  5068. TCGv_i32 tmp;
  5069. if (!ENABLE_ARCH_6T2) {
  5070. return false;
  5071. }
  5072. tmp = load_reg(s, a->rd);
  5073. tcg_gen_ext16u_i32(tmp, tmp);
  5074. tcg_gen_ori_i32(tmp, tmp, a->imm << 16);
  5075. store_reg(s, a->rd, tmp);
  5076. return true;
  5077. }
  5078. /*
  5079. * Multiply and multiply accumulate
  5080. */
  5081. static bool op_mla(DisasContext *s, arg_s_rrrr *a, bool add)
  5082. {
  5083. TCGv_i32 t1, t2;
  5084. t1 = load_reg(s, a->rn);
  5085. t2 = load_reg(s, a->rm);
  5086. tcg_gen_mul_i32(t1, t1, t2);
  5087. tcg_temp_free_i32(t2);
  5088. if (add) {
  5089. t2 = load_reg(s, a->ra);
  5090. tcg_gen_add_i32(t1, t1, t2);
  5091. tcg_temp_free_i32(t2);
  5092. }
  5093. if (a->s) {
  5094. gen_logic_CC(t1);
  5095. }
  5096. store_reg(s, a->rd, t1);
  5097. return true;
  5098. }
  5099. static bool trans_MUL(DisasContext *s, arg_MUL *a)
  5100. {
  5101. return op_mla(s, a, false);
  5102. }
  5103. static bool trans_MLA(DisasContext *s, arg_MLA *a)
  5104. {
  5105. return op_mla(s, a, true);
  5106. }
  5107. static bool trans_MLS(DisasContext *s, arg_MLS *a)
  5108. {
  5109. TCGv_i32 t1, t2;
  5110. if (!ENABLE_ARCH_6T2) {
  5111. return false;
  5112. }
  5113. t1 = load_reg(s, a->rn);
  5114. t2 = load_reg(s, a->rm);
  5115. tcg_gen_mul_i32(t1, t1, t2);
  5116. tcg_temp_free_i32(t2);
  5117. t2 = load_reg(s, a->ra);
  5118. tcg_gen_sub_i32(t1, t2, t1);
  5119. tcg_temp_free_i32(t2);
  5120. store_reg(s, a->rd, t1);
  5121. return true;
  5122. }
  5123. static bool op_mlal(DisasContext *s, arg_s_rrrr *a, bool uns, bool add)
  5124. {
  5125. TCGv_i32 t0, t1, t2, t3;
  5126. t0 = load_reg(s, a->rm);
  5127. t1 = load_reg(s, a->rn);
  5128. if (uns) {
  5129. tcg_gen_mulu2_i32(t0, t1, t0, t1);
  5130. } else {
  5131. tcg_gen_muls2_i32(t0, t1, t0, t1);
  5132. }
  5133. if (add) {
  5134. t2 = load_reg(s, a->ra);
  5135. t3 = load_reg(s, a->rd);
  5136. tcg_gen_add2_i32(t0, t1, t0, t1, t2, t3);
  5137. tcg_temp_free_i32(t2);
  5138. tcg_temp_free_i32(t3);
  5139. }
  5140. if (a->s) {
  5141. gen_logicq_cc(t0, t1);
  5142. }
  5143. store_reg(s, a->ra, t0);
  5144. store_reg(s, a->rd, t1);
  5145. return true;
  5146. }
  5147. static bool trans_UMULL(DisasContext *s, arg_UMULL *a)
  5148. {
  5149. return op_mlal(s, a, true, false);
  5150. }
  5151. static bool trans_SMULL(DisasContext *s, arg_SMULL *a)
  5152. {
  5153. return op_mlal(s, a, false, false);
  5154. }
  5155. static bool trans_UMLAL(DisasContext *s, arg_UMLAL *a)
  5156. {
  5157. return op_mlal(s, a, true, true);
  5158. }
  5159. static bool trans_SMLAL(DisasContext *s, arg_SMLAL *a)
  5160. {
  5161. return op_mlal(s, a, false, true);
  5162. }
  5163. static bool trans_UMAAL(DisasContext *s, arg_UMAAL *a)
  5164. {
  5165. TCGv_i32 t0, t1, t2, zero;
  5166. if (s->thumb
  5167. ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
  5168. : !ENABLE_ARCH_6) {
  5169. return false;
  5170. }
  5171. t0 = load_reg(s, a->rm);
  5172. t1 = load_reg(s, a->rn);
  5173. tcg_gen_mulu2_i32(t0, t1, t0, t1);
  5174. zero = tcg_const_i32(0);
  5175. t2 = load_reg(s, a->ra);
  5176. tcg_gen_add2_i32(t0, t1, t0, t1, t2, zero);
  5177. tcg_temp_free_i32(t2);
  5178. t2 = load_reg(s, a->rd);
  5179. tcg_gen_add2_i32(t0, t1, t0, t1, t2, zero);
  5180. tcg_temp_free_i32(t2);
  5181. tcg_temp_free_i32(zero);
  5182. store_reg(s, a->ra, t0);
  5183. store_reg(s, a->rd, t1);
  5184. return true;
  5185. }
  5186. /*
  5187. * Saturating addition and subtraction
  5188. */
  5189. static bool op_qaddsub(DisasContext *s, arg_rrr *a, bool add, bool doub)
  5190. {
  5191. TCGv_i32 t0, t1;
  5192. if (s->thumb
  5193. ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
  5194. : !ENABLE_ARCH_5TE) {
  5195. return false;
  5196. }
  5197. t0 = load_reg(s, a->rm);
  5198. t1 = load_reg(s, a->rn);
  5199. if (doub) {
  5200. gen_helper_add_saturate(t1, cpu_env, t1, t1);
  5201. }
  5202. if (add) {
  5203. gen_helper_add_saturate(t0, cpu_env, t0, t1);
  5204. } else {
  5205. gen_helper_sub_saturate(t0, cpu_env, t0, t1);
  5206. }
  5207. tcg_temp_free_i32(t1);
  5208. store_reg(s, a->rd, t0);
  5209. return true;
  5210. }
  5211. #define DO_QADDSUB(NAME, ADD, DOUB) \
  5212. static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
  5213. { \
  5214. return op_qaddsub(s, a, ADD, DOUB); \
  5215. }
  5216. DO_QADDSUB(QADD, true, false)
  5217. DO_QADDSUB(QSUB, false, false)
  5218. DO_QADDSUB(QDADD, true, true)
  5219. DO_QADDSUB(QDSUB, false, true)
  5220. #undef DO_QADDSUB
  5221. /*
  5222. * Halfword multiply and multiply accumulate
  5223. */
  5224. static bool op_smlaxxx(DisasContext *s, arg_rrrr *a,
  5225. int add_long, bool nt, bool mt)
  5226. {
  5227. TCGv_i32 t0, t1, tl, th;
  5228. if (s->thumb
  5229. ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
  5230. : !ENABLE_ARCH_5TE) {
  5231. return false;
  5232. }
  5233. t0 = load_reg(s, a->rn);
  5234. t1 = load_reg(s, a->rm);
  5235. gen_mulxy(t0, t1, nt, mt);
  5236. tcg_temp_free_i32(t1);
  5237. switch (add_long) {
  5238. case 0:
  5239. store_reg(s, a->rd, t0);
  5240. break;
  5241. case 1:
  5242. t1 = load_reg(s, a->ra);
  5243. gen_helper_add_setq(t0, cpu_env, t0, t1);
  5244. tcg_temp_free_i32(t1);
  5245. store_reg(s, a->rd, t0);
  5246. break;
  5247. case 2:
  5248. tl = load_reg(s, a->ra);
  5249. th = load_reg(s, a->rd);
  5250. /* Sign-extend the 32-bit product to 64 bits. */
  5251. t1 = tcg_temp_new_i32();
  5252. tcg_gen_sari_i32(t1, t0, 31);
  5253. tcg_gen_add2_i32(tl, th, tl, th, t0, t1);
  5254. tcg_temp_free_i32(t0);
  5255. tcg_temp_free_i32(t1);
  5256. store_reg(s, a->ra, tl);
  5257. store_reg(s, a->rd, th);
  5258. break;
  5259. default:
  5260. g_assert_not_reached();
  5261. }
  5262. return true;
  5263. }
  5264. #define DO_SMLAX(NAME, add, nt, mt) \
  5265. static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \
  5266. { \
  5267. return op_smlaxxx(s, a, add, nt, mt); \
  5268. }
  5269. DO_SMLAX(SMULBB, 0, 0, 0)
  5270. DO_SMLAX(SMULBT, 0, 0, 1)
  5271. DO_SMLAX(SMULTB, 0, 1, 0)
  5272. DO_SMLAX(SMULTT, 0, 1, 1)
  5273. DO_SMLAX(SMLABB, 1, 0, 0)
  5274. DO_SMLAX(SMLABT, 1, 0, 1)
  5275. DO_SMLAX(SMLATB, 1, 1, 0)
  5276. DO_SMLAX(SMLATT, 1, 1, 1)
  5277. DO_SMLAX(SMLALBB, 2, 0, 0)
  5278. DO_SMLAX(SMLALBT, 2, 0, 1)
  5279. DO_SMLAX(SMLALTB, 2, 1, 0)
  5280. DO_SMLAX(SMLALTT, 2, 1, 1)
  5281. #undef DO_SMLAX
  5282. static bool op_smlawx(DisasContext *s, arg_rrrr *a, bool add, bool mt)
  5283. {
  5284. TCGv_i32 t0, t1;
  5285. if (!ENABLE_ARCH_5TE) {
  5286. return false;
  5287. }
  5288. t0 = load_reg(s, a->rn);
  5289. t1 = load_reg(s, a->rm);
  5290. /*
  5291. * Since the nominal result is product<47:16>, shift the 16-bit
  5292. * input up by 16 bits, so that the result is at product<63:32>.
  5293. */
  5294. if (mt) {
  5295. tcg_gen_andi_i32(t1, t1, 0xffff0000);
  5296. } else {
  5297. tcg_gen_shli_i32(t1, t1, 16);
  5298. }
  5299. tcg_gen_muls2_i32(t0, t1, t0, t1);
  5300. tcg_temp_free_i32(t0);
  5301. if (add) {
  5302. t0 = load_reg(s, a->ra);
  5303. gen_helper_add_setq(t1, cpu_env, t1, t0);
  5304. tcg_temp_free_i32(t0);
  5305. }
  5306. store_reg(s, a->rd, t1);
  5307. return true;
  5308. }
  5309. #define DO_SMLAWX(NAME, add, mt) \
  5310. static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \
  5311. { \
  5312. return op_smlawx(s, a, add, mt); \
  5313. }
  5314. DO_SMLAWX(SMULWB, 0, 0)
  5315. DO_SMLAWX(SMULWT, 0, 1)
  5316. DO_SMLAWX(SMLAWB, 1, 0)
  5317. DO_SMLAWX(SMLAWT, 1, 1)
  5318. #undef DO_SMLAWX
  5319. /*
  5320. * MSR (immediate) and hints
  5321. */
  5322. static bool trans_YIELD(DisasContext *s, arg_YIELD *a)
  5323. {
  5324. /*
  5325. * When running single-threaded TCG code, use the helper to ensure that
  5326. * the next round-robin scheduled vCPU gets a crack. When running in
  5327. * MTTCG we don't generate jumps to the helper as it won't affect the
  5328. * scheduling of other vCPUs.
  5329. */
  5330. if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
  5331. gen_set_pc_im(s, s->base.pc_next);
  5332. s->base.is_jmp = DISAS_YIELD;
  5333. }
  5334. return true;
  5335. }
  5336. static bool trans_WFE(DisasContext *s, arg_WFE *a)
  5337. {
  5338. /*
  5339. * When running single-threaded TCG code, use the helper to ensure that
  5340. * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
  5341. * just skip this instruction. Currently the SEV/SEVL instructions,
  5342. * which are *one* of many ways to wake the CPU from WFE, are not
  5343. * implemented so we can't sleep like WFI does.
  5344. */
  5345. if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
  5346. gen_set_pc_im(s, s->base.pc_next);
  5347. s->base.is_jmp = DISAS_WFE;
  5348. }
  5349. return true;
  5350. }
  5351. static bool trans_WFI(DisasContext *s, arg_WFI *a)
  5352. {
  5353. /* For WFI, halt the vCPU until an IRQ. */
  5354. gen_set_pc_im(s, s->base.pc_next);
  5355. s->base.is_jmp = DISAS_WFI;
  5356. return true;
  5357. }
  5358. static bool trans_NOP(DisasContext *s, arg_NOP *a)
  5359. {
  5360. return true;
  5361. }
  5362. static bool trans_MSR_imm(DisasContext *s, arg_MSR_imm *a)
  5363. {
  5364. uint32_t val = ror32(a->imm, a->rot * 2);
  5365. uint32_t mask = msr_mask(s, a->mask, a->r);
  5366. if (gen_set_psr_im(s, mask, a->r, val)) {
  5367. unallocated_encoding(s);
  5368. }
  5369. return true;
  5370. }
  5371. /*
  5372. * Cyclic Redundancy Check
  5373. */
  5374. static bool op_crc32(DisasContext *s, arg_rrr *a, bool c, MemOp sz)
  5375. {
  5376. TCGv_i32 t1, t2, t3;
  5377. if (!dc_isar_feature(aa32_crc32, s)) {
  5378. return false;
  5379. }
  5380. t1 = load_reg(s, a->rn);
  5381. t2 = load_reg(s, a->rm);
  5382. switch (sz) {
  5383. case MO_8:
  5384. gen_uxtb(t2);
  5385. break;
  5386. case MO_16:
  5387. gen_uxth(t2);
  5388. break;
  5389. case MO_32:
  5390. break;
  5391. default:
  5392. g_assert_not_reached();
  5393. }
  5394. t3 = tcg_const_i32(1 << sz);
  5395. if (c) {
  5396. gen_helper_crc32c(t1, t1, t2, t3);
  5397. } else {
  5398. gen_helper_crc32(t1, t1, t2, t3);
  5399. }
  5400. tcg_temp_free_i32(t2);
  5401. tcg_temp_free_i32(t3);
  5402. store_reg(s, a->rd, t1);
  5403. return true;
  5404. }
  5405. #define DO_CRC32(NAME, c, sz) \
  5406. static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
  5407. { return op_crc32(s, a, c, sz); }
  5408. DO_CRC32(CRC32B, false, MO_8)
  5409. DO_CRC32(CRC32H, false, MO_16)
  5410. DO_CRC32(CRC32W, false, MO_32)
  5411. DO_CRC32(CRC32CB, true, MO_8)
  5412. DO_CRC32(CRC32CH, true, MO_16)
  5413. DO_CRC32(CRC32CW, true, MO_32)
  5414. #undef DO_CRC32
  5415. /*
  5416. * Miscellaneous instructions
  5417. */
  5418. static bool trans_MRS_bank(DisasContext *s, arg_MRS_bank *a)
  5419. {
  5420. if (arm_dc_feature(s, ARM_FEATURE_M)) {
  5421. return false;
  5422. }
  5423. gen_mrs_banked(s, a->r, a->sysm, a->rd);
  5424. return true;
  5425. }
  5426. static bool trans_MSR_bank(DisasContext *s, arg_MSR_bank *a)
  5427. {
  5428. if (arm_dc_feature(s, ARM_FEATURE_M)) {
  5429. return false;
  5430. }
  5431. gen_msr_banked(s, a->r, a->sysm, a->rn);
  5432. return true;
  5433. }
  5434. static bool trans_MRS_reg(DisasContext *s, arg_MRS_reg *a)
  5435. {
  5436. TCGv_i32 tmp;
  5437. if (arm_dc_feature(s, ARM_FEATURE_M)) {
  5438. return false;
  5439. }
  5440. if (a->r) {
  5441. if (IS_USER(s)) {
  5442. unallocated_encoding(s);
  5443. return true;
  5444. }
  5445. tmp = load_cpu_field(spsr);
  5446. } else {
  5447. tmp = tcg_temp_new_i32();
  5448. gen_helper_cpsr_read(tmp, cpu_env);
  5449. }
  5450. store_reg(s, a->rd, tmp);
  5451. return true;
  5452. }
  5453. static bool trans_MSR_reg(DisasContext *s, arg_MSR_reg *a)
  5454. {
  5455. TCGv_i32 tmp;
  5456. uint32_t mask = msr_mask(s, a->mask, a->r);
  5457. if (arm_dc_feature(s, ARM_FEATURE_M)) {
  5458. return false;
  5459. }
  5460. tmp = load_reg(s, a->rn);
  5461. if (gen_set_psr(s, mask, a->r, tmp)) {
  5462. unallocated_encoding(s);
  5463. }
  5464. return true;
  5465. }
  5466. static bool trans_MRS_v7m(DisasContext *s, arg_MRS_v7m *a)
  5467. {
  5468. TCGv_i32 tmp;
  5469. if (!arm_dc_feature(s, ARM_FEATURE_M)) {
  5470. return false;
  5471. }
  5472. tmp = tcg_const_i32(a->sysm);
  5473. gen_helper_v7m_mrs(tmp, cpu_env, tmp);
  5474. store_reg(s, a->rd, tmp);
  5475. return true;
  5476. }
  5477. static bool trans_MSR_v7m(DisasContext *s, arg_MSR_v7m *a)
  5478. {
  5479. TCGv_i32 addr, reg;
  5480. if (!arm_dc_feature(s, ARM_FEATURE_M)) {
  5481. return false;
  5482. }
  5483. addr = tcg_const_i32((a->mask << 10) | a->sysm);
  5484. reg = load_reg(s, a->rn);
  5485. gen_helper_v7m_msr(cpu_env, addr, reg);
  5486. tcg_temp_free_i32(addr);
  5487. tcg_temp_free_i32(reg);
  5488. /* If we wrote to CONTROL, the EL might have changed */
  5489. gen_helper_rebuild_hflags_m32_newel(cpu_env);
  5490. gen_lookup_tb(s);
  5491. return true;
  5492. }
  5493. static bool trans_BX(DisasContext *s, arg_BX *a)
  5494. {
  5495. if (!ENABLE_ARCH_4T) {
  5496. return false;
  5497. }
  5498. gen_bx_excret(s, load_reg(s, a->rm));
  5499. return true;
  5500. }
  5501. static bool trans_BXJ(DisasContext *s, arg_BXJ *a)
  5502. {
  5503. if (!ENABLE_ARCH_5J || arm_dc_feature(s, ARM_FEATURE_M)) {
  5504. return false;
  5505. }
  5506. /* Trivial implementation equivalent to bx. */
  5507. gen_bx(s, load_reg(s, a->rm));
  5508. return true;
  5509. }
  5510. static bool trans_BLX_r(DisasContext *s, arg_BLX_r *a)
  5511. {
  5512. TCGv_i32 tmp;
  5513. if (!ENABLE_ARCH_5) {
  5514. return false;
  5515. }
  5516. tmp = load_reg(s, a->rm);
  5517. tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb);
  5518. gen_bx(s, tmp);
  5519. return true;
  5520. }
  5521. /*
  5522. * BXNS/BLXNS: only exist for v8M with the security extensions,
  5523. * and always UNDEF if NonSecure. We don't implement these in
  5524. * the user-only mode either (in theory you can use them from
  5525. * Secure User mode but they are too tied in to system emulation).
  5526. */
  5527. static bool trans_BXNS(DisasContext *s, arg_BXNS *a)
  5528. {
  5529. if (!s->v8m_secure || IS_USER_ONLY) {
  5530. unallocated_encoding(s);
  5531. } else {
  5532. gen_bxns(s, a->rm);
  5533. }
  5534. return true;
  5535. }
  5536. static bool trans_BLXNS(DisasContext *s, arg_BLXNS *a)
  5537. {
  5538. if (!s->v8m_secure || IS_USER_ONLY) {
  5539. unallocated_encoding(s);
  5540. } else {
  5541. gen_blxns(s, a->rm);
  5542. }
  5543. return true;
  5544. }
  5545. static bool trans_CLZ(DisasContext *s, arg_CLZ *a)
  5546. {
  5547. TCGv_i32 tmp;
  5548. if (!ENABLE_ARCH_5) {
  5549. return false;
  5550. }
  5551. tmp = load_reg(s, a->rm);
  5552. tcg_gen_clzi_i32(tmp, tmp, 32);
  5553. store_reg(s, a->rd, tmp);
  5554. return true;
  5555. }
  5556. static bool trans_ERET(DisasContext *s, arg_ERET *a)
  5557. {
  5558. TCGv_i32 tmp;
  5559. if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
  5560. return false;
  5561. }
  5562. if (IS_USER(s)) {
  5563. unallocated_encoding(s);
  5564. return true;
  5565. }
  5566. if (s->current_el == 2) {
  5567. /* ERET from Hyp uses ELR_Hyp, not LR */
  5568. tmp = load_cpu_field(elr_el[2]);
  5569. } else {
  5570. tmp = load_reg(s, 14);
  5571. }
  5572. gen_exception_return(s, tmp);
  5573. return true;
  5574. }
  5575. static bool trans_HLT(DisasContext *s, arg_HLT *a)
  5576. {
  5577. gen_hlt(s, a->imm);
  5578. return true;
  5579. }
  5580. static bool trans_BKPT(DisasContext *s, arg_BKPT *a)
  5581. {
  5582. if (!ENABLE_ARCH_5) {
  5583. return false;
  5584. }
  5585. if (arm_dc_feature(s, ARM_FEATURE_M) &&
  5586. semihosting_enabled() &&
  5587. #ifndef CONFIG_USER_ONLY
  5588. !IS_USER(s) &&
  5589. #endif
  5590. (a->imm == 0xab)) {
  5591. gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
  5592. } else {
  5593. gen_exception_bkpt_insn(s, syn_aa32_bkpt(a->imm, false));
  5594. }
  5595. return true;
  5596. }
  5597. static bool trans_HVC(DisasContext *s, arg_HVC *a)
  5598. {
  5599. if (!ENABLE_ARCH_7 || arm_dc_feature(s, ARM_FEATURE_M)) {
  5600. return false;
  5601. }
  5602. if (IS_USER(s)) {
  5603. unallocated_encoding(s);
  5604. } else {
  5605. gen_hvc(s, a->imm);
  5606. }
  5607. return true;
  5608. }
  5609. static bool trans_SMC(DisasContext *s, arg_SMC *a)
  5610. {
  5611. if (!ENABLE_ARCH_6K || arm_dc_feature(s, ARM_FEATURE_M)) {
  5612. return false;
  5613. }
  5614. if (IS_USER(s)) {
  5615. unallocated_encoding(s);
  5616. } else {
  5617. gen_smc(s);
  5618. }
  5619. return true;
  5620. }
  5621. static bool trans_SG(DisasContext *s, arg_SG *a)
  5622. {
  5623. if (!arm_dc_feature(s, ARM_FEATURE_M) ||
  5624. !arm_dc_feature(s, ARM_FEATURE_V8)) {
  5625. return false;
  5626. }
  5627. /*
  5628. * SG (v8M only)
  5629. * The bulk of the behaviour for this instruction is implemented
  5630. * in v7m_handle_execute_nsc(), which deals with the insn when
  5631. * it is executed by a CPU in non-secure state from memory
  5632. * which is Secure & NonSecure-Callable.
  5633. * Here we only need to handle the remaining cases:
  5634. * * in NS memory (including the "security extension not
  5635. * implemented" case) : NOP
  5636. * * in S memory but CPU already secure (clear IT bits)
  5637. * We know that the attribute for the memory this insn is
  5638. * in must match the current CPU state, because otherwise
  5639. * get_phys_addr_pmsav8 would have generated an exception.
  5640. */
  5641. if (s->v8m_secure) {
  5642. /* Like the IT insn, we don't need to generate any code */
  5643. s->condexec_cond = 0;
  5644. s->condexec_mask = 0;
  5645. }
  5646. return true;
  5647. }
  5648. static bool trans_TT(DisasContext *s, arg_TT *a)
  5649. {
  5650. TCGv_i32 addr, tmp;
  5651. if (!arm_dc_feature(s, ARM_FEATURE_M) ||
  5652. !arm_dc_feature(s, ARM_FEATURE_V8)) {
  5653. return false;
  5654. }
  5655. if (a->rd == 13 || a->rd == 15 || a->rn == 15) {
  5656. /* We UNDEF for these UNPREDICTABLE cases */
  5657. unallocated_encoding(s);
  5658. return true;
  5659. }
  5660. if (a->A && !s->v8m_secure) {
  5661. /* This case is UNDEFINED. */
  5662. unallocated_encoding(s);
  5663. return true;
  5664. }
  5665. addr = load_reg(s, a->rn);
  5666. tmp = tcg_const_i32((a->A << 1) | a->T);
  5667. gen_helper_v7m_tt(tmp, cpu_env, addr, tmp);
  5668. tcg_temp_free_i32(addr);
  5669. store_reg(s, a->rd, tmp);
  5670. return true;
  5671. }
  5672. /*
  5673. * Load/store register index
  5674. */
  5675. static ISSInfo make_issinfo(DisasContext *s, int rd, bool p, bool w)
  5676. {
  5677. ISSInfo ret;
  5678. /* ISS not valid if writeback */
  5679. if (p && !w) {
  5680. ret = rd;
  5681. if (s->base.pc_next - s->pc_curr == 2) {
  5682. ret |= ISSIs16Bit;
  5683. }
  5684. } else {
  5685. ret = ISSInvalid;
  5686. }
  5687. return ret;
  5688. }
  5689. static TCGv_i32 op_addr_rr_pre(DisasContext *s, arg_ldst_rr *a)
  5690. {
  5691. TCGv_i32 addr = load_reg(s, a->rn);
  5692. if (s->v8m_stackcheck && a->rn == 13 && a->w) {
  5693. gen_helper_v8m_stackcheck(cpu_env, addr);
  5694. }
  5695. if (a->p) {
  5696. TCGv_i32 ofs = load_reg(s, a->rm);
  5697. gen_arm_shift_im(ofs, a->shtype, a->shimm, 0);
  5698. if (a->u) {
  5699. tcg_gen_add_i32(addr, addr, ofs);
  5700. } else {
  5701. tcg_gen_sub_i32(addr, addr, ofs);
  5702. }
  5703. tcg_temp_free_i32(ofs);
  5704. }
  5705. return addr;
  5706. }
  5707. static void op_addr_rr_post(DisasContext *s, arg_ldst_rr *a,
  5708. TCGv_i32 addr, int address_offset)
  5709. {
  5710. if (!a->p) {
  5711. TCGv_i32 ofs = load_reg(s, a->rm);
  5712. gen_arm_shift_im(ofs, a->shtype, a->shimm, 0);
  5713. if (a->u) {
  5714. tcg_gen_add_i32(addr, addr, ofs);
  5715. } else {
  5716. tcg_gen_sub_i32(addr, addr, ofs);
  5717. }
  5718. tcg_temp_free_i32(ofs);
  5719. } else if (!a->w) {
  5720. tcg_temp_free_i32(addr);
  5721. return;
  5722. }
  5723. tcg_gen_addi_i32(addr, addr, address_offset);
  5724. store_reg(s, a->rn, addr);
  5725. }
  5726. static bool op_load_rr(DisasContext *s, arg_ldst_rr *a,
  5727. MemOp mop, int mem_idx)
  5728. {
  5729. ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w);
  5730. TCGv_i32 addr, tmp;
  5731. addr = op_addr_rr_pre(s, a);
  5732. tmp = tcg_temp_new_i32();
  5733. gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data);
  5734. disas_set_da_iss(s, mop, issinfo);
  5735. /*
  5736. * Perform base writeback before the loaded value to
  5737. * ensure correct behavior with overlapping index registers.
  5738. */
  5739. op_addr_rr_post(s, a, addr, 0);
  5740. store_reg_from_load(s, a->rt, tmp);
  5741. return true;
  5742. }
  5743. static bool op_store_rr(DisasContext *s, arg_ldst_rr *a,
  5744. MemOp mop, int mem_idx)
  5745. {
  5746. ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
  5747. TCGv_i32 addr, tmp;
  5748. addr = op_addr_rr_pre(s, a);
  5749. tmp = load_reg(s, a->rt);
  5750. gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data);
  5751. disas_set_da_iss(s, mop, issinfo);
  5752. tcg_temp_free_i32(tmp);
  5753. op_addr_rr_post(s, a, addr, 0);
  5754. return true;
  5755. }
  5756. static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a)
  5757. {
  5758. int mem_idx = get_mem_index(s);
  5759. TCGv_i32 addr, tmp;
  5760. if (!ENABLE_ARCH_5TE) {
  5761. return false;
  5762. }
  5763. if (a->rt & 1) {
  5764. unallocated_encoding(s);
  5765. return true;
  5766. }
  5767. addr = op_addr_rr_pre(s, a);
  5768. tmp = tcg_temp_new_i32();
  5769. gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
  5770. store_reg(s, a->rt, tmp);
  5771. tcg_gen_addi_i32(addr, addr, 4);
  5772. tmp = tcg_temp_new_i32();
  5773. gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
  5774. store_reg(s, a->rt + 1, tmp);
  5775. /* LDRD w/ base writeback is undefined if the registers overlap. */
  5776. op_addr_rr_post(s, a, addr, -4);
  5777. return true;
  5778. }
  5779. static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a)
  5780. {
  5781. int mem_idx = get_mem_index(s);
  5782. TCGv_i32 addr, tmp;
  5783. if (!ENABLE_ARCH_5TE) {
  5784. return false;
  5785. }
  5786. if (a->rt & 1) {
  5787. unallocated_encoding(s);
  5788. return true;
  5789. }
  5790. addr = op_addr_rr_pre(s, a);
  5791. tmp = load_reg(s, a->rt);
  5792. gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
  5793. tcg_temp_free_i32(tmp);
  5794. tcg_gen_addi_i32(addr, addr, 4);
  5795. tmp = load_reg(s, a->rt + 1);
  5796. gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
  5797. tcg_temp_free_i32(tmp);
  5798. op_addr_rr_post(s, a, addr, -4);
  5799. return true;
  5800. }
  5801. /*
  5802. * Load/store immediate index
  5803. */
  5804. static TCGv_i32 op_addr_ri_pre(DisasContext *s, arg_ldst_ri *a)
  5805. {
  5806. int ofs = a->imm;
  5807. if (!a->u) {
  5808. ofs = -ofs;
  5809. }
  5810. if (s->v8m_stackcheck && a->rn == 13 && a->w) {
  5811. /*
  5812. * Stackcheck. Here we know 'addr' is the current SP;
  5813. * U is set if we're moving SP up, else down. It is
  5814. * UNKNOWN whether the limit check triggers when SP starts
  5815. * below the limit and ends up above it; we chose to do so.
  5816. */
  5817. if (!a->u) {
  5818. TCGv_i32 newsp = tcg_temp_new_i32();
  5819. tcg_gen_addi_i32(newsp, cpu_R[13], ofs);
  5820. gen_helper_v8m_stackcheck(cpu_env, newsp);
  5821. tcg_temp_free_i32(newsp);
  5822. } else {
  5823. gen_helper_v8m_stackcheck(cpu_env, cpu_R[13]);
  5824. }
  5825. }
  5826. return add_reg_for_lit(s, a->rn, a->p ? ofs : 0);
  5827. }
  5828. static void op_addr_ri_post(DisasContext *s, arg_ldst_ri *a,
  5829. TCGv_i32 addr, int address_offset)
  5830. {
  5831. if (!a->p) {
  5832. if (a->u) {
  5833. address_offset += a->imm;
  5834. } else {
  5835. address_offset -= a->imm;
  5836. }
  5837. } else if (!a->w) {
  5838. tcg_temp_free_i32(addr);
  5839. return;
  5840. }
  5841. tcg_gen_addi_i32(addr, addr, address_offset);
  5842. store_reg(s, a->rn, addr);
  5843. }
  5844. static bool op_load_ri(DisasContext *s, arg_ldst_ri *a,
  5845. MemOp mop, int mem_idx)
  5846. {
  5847. ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w);
  5848. TCGv_i32 addr, tmp;
  5849. addr = op_addr_ri_pre(s, a);
  5850. tmp = tcg_temp_new_i32();
  5851. gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data);
  5852. disas_set_da_iss(s, mop, issinfo);
  5853. /*
  5854. * Perform base writeback before the loaded value to
  5855. * ensure correct behavior with overlapping index registers.
  5856. */
  5857. op_addr_ri_post(s, a, addr, 0);
  5858. store_reg_from_load(s, a->rt, tmp);
  5859. return true;
  5860. }
  5861. static bool op_store_ri(DisasContext *s, arg_ldst_ri *a,
  5862. MemOp mop, int mem_idx)
  5863. {
  5864. ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
  5865. TCGv_i32 addr, tmp;
  5866. addr = op_addr_ri_pre(s, a);
  5867. tmp = load_reg(s, a->rt);
  5868. gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data);
  5869. disas_set_da_iss(s, mop, issinfo);
  5870. tcg_temp_free_i32(tmp);
  5871. op_addr_ri_post(s, a, addr, 0);
  5872. return true;
  5873. }
  5874. static bool op_ldrd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
  5875. {
  5876. int mem_idx = get_mem_index(s);
  5877. TCGv_i32 addr, tmp;
  5878. addr = op_addr_ri_pre(s, a);
  5879. tmp = tcg_temp_new_i32();
  5880. gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
  5881. store_reg(s, a->rt, tmp);
  5882. tcg_gen_addi_i32(addr, addr, 4);
  5883. tmp = tcg_temp_new_i32();
  5884. gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
  5885. store_reg(s, rt2, tmp);
  5886. /* LDRD w/ base writeback is undefined if the registers overlap. */
  5887. op_addr_ri_post(s, a, addr, -4);
  5888. return true;
  5889. }
  5890. static bool trans_LDRD_ri_a32(DisasContext *s, arg_ldst_ri *a)
  5891. {
  5892. if (!ENABLE_ARCH_5TE || (a->rt & 1)) {
  5893. return false;
  5894. }
  5895. return op_ldrd_ri(s, a, a->rt + 1);
  5896. }
  5897. static bool trans_LDRD_ri_t32(DisasContext *s, arg_ldst_ri2 *a)
  5898. {
  5899. arg_ldst_ri b = {
  5900. .u = a->u, .w = a->w, .p = a->p,
  5901. .rn = a->rn, .rt = a->rt, .imm = a->imm
  5902. };
  5903. return op_ldrd_ri(s, &b, a->rt2);
  5904. }
  5905. static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
  5906. {
  5907. int mem_idx = get_mem_index(s);
  5908. TCGv_i32 addr, tmp;
  5909. addr = op_addr_ri_pre(s, a);
  5910. tmp = load_reg(s, a->rt);
  5911. gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
  5912. tcg_temp_free_i32(tmp);
  5913. tcg_gen_addi_i32(addr, addr, 4);
  5914. tmp = load_reg(s, rt2);
  5915. gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
  5916. tcg_temp_free_i32(tmp);
  5917. op_addr_ri_post(s, a, addr, -4);
  5918. return true;
  5919. }
  5920. static bool trans_STRD_ri_a32(DisasContext *s, arg_ldst_ri *a)
  5921. {
  5922. if (!ENABLE_ARCH_5TE || (a->rt & 1)) {
  5923. return false;
  5924. }
  5925. return op_strd_ri(s, a, a->rt + 1);
  5926. }
  5927. static bool trans_STRD_ri_t32(DisasContext *s, arg_ldst_ri2 *a)
  5928. {
  5929. arg_ldst_ri b = {
  5930. .u = a->u, .w = a->w, .p = a->p,
  5931. .rn = a->rn, .rt = a->rt, .imm = a->imm
  5932. };
  5933. return op_strd_ri(s, &b, a->rt2);
  5934. }
  5935. #define DO_LDST(NAME, WHICH, MEMOP) \
  5936. static bool trans_##NAME##_ri(DisasContext *s, arg_ldst_ri *a) \
  5937. { \
  5938. return op_##WHICH##_ri(s, a, MEMOP, get_mem_index(s)); \
  5939. } \
  5940. static bool trans_##NAME##T_ri(DisasContext *s, arg_ldst_ri *a) \
  5941. { \
  5942. return op_##WHICH##_ri(s, a, MEMOP, get_a32_user_mem_index(s)); \
  5943. } \
  5944. static bool trans_##NAME##_rr(DisasContext *s, arg_ldst_rr *a) \
  5945. { \
  5946. return op_##WHICH##_rr(s, a, MEMOP, get_mem_index(s)); \
  5947. } \
  5948. static bool trans_##NAME##T_rr(DisasContext *s, arg_ldst_rr *a) \
  5949. { \
  5950. return op_##WHICH##_rr(s, a, MEMOP, get_a32_user_mem_index(s)); \
  5951. }
  5952. DO_LDST(LDR, load, MO_UL)
  5953. DO_LDST(LDRB, load, MO_UB)
  5954. DO_LDST(LDRH, load, MO_UW)
  5955. DO_LDST(LDRSB, load, MO_SB)
  5956. DO_LDST(LDRSH, load, MO_SW)
  5957. DO_LDST(STR, store, MO_UL)
  5958. DO_LDST(STRB, store, MO_UB)
  5959. DO_LDST(STRH, store, MO_UW)
  5960. #undef DO_LDST
  5961. /*
  5962. * Synchronization primitives
  5963. */
  5964. static bool op_swp(DisasContext *s, arg_SWP *a, MemOp opc)
  5965. {
  5966. TCGv_i32 addr, tmp;
  5967. TCGv taddr;
  5968. opc |= s->be_data;
  5969. addr = load_reg(s, a->rn);
  5970. taddr = gen_aa32_addr(s, addr, opc);
  5971. tcg_temp_free_i32(addr);
  5972. tmp = load_reg(s, a->rt2);
  5973. tcg_gen_atomic_xchg_i32(tmp, taddr, tmp, get_mem_index(s), opc);
  5974. tcg_temp_free(taddr);
  5975. store_reg(s, a->rt, tmp);
  5976. return true;
  5977. }
  5978. static bool trans_SWP(DisasContext *s, arg_SWP *a)
  5979. {
  5980. return op_swp(s, a, MO_UL | MO_ALIGN);
  5981. }
  5982. static bool trans_SWPB(DisasContext *s, arg_SWP *a)
  5983. {
  5984. return op_swp(s, a, MO_UB);
  5985. }
  5986. /*
  5987. * Load/Store Exclusive and Load-Acquire/Store-Release
  5988. */
  5989. static bool op_strex(DisasContext *s, arg_STREX *a, MemOp mop, bool rel)
  5990. {
  5991. TCGv_i32 addr;
  5992. /* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */
  5993. bool v8a = ENABLE_ARCH_8 && !arm_dc_feature(s, ARM_FEATURE_M);
  5994. /* We UNDEF for these UNPREDICTABLE cases. */
  5995. if (a->rd == 15 || a->rn == 15 || a->rt == 15
  5996. || a->rd == a->rn || a->rd == a->rt
  5997. || (!v8a && s->thumb && (a->rd == 13 || a->rt == 13))
  5998. || (mop == MO_64
  5999. && (a->rt2 == 15
  6000. || a->rd == a->rt2
  6001. || (!v8a && s->thumb && a->rt2 == 13)))) {
  6002. unallocated_encoding(s);
  6003. return true;
  6004. }
  6005. if (rel) {
  6006. tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
  6007. }
  6008. addr = tcg_temp_local_new_i32();
  6009. load_reg_var(s, addr, a->rn);
  6010. tcg_gen_addi_i32(addr, addr, a->imm);
  6011. gen_store_exclusive(s, a->rd, a->rt, a->rt2, addr, mop);
  6012. tcg_temp_free_i32(addr);
  6013. return true;
  6014. }
  6015. static bool trans_STREX(DisasContext *s, arg_STREX *a)
  6016. {
  6017. if (!ENABLE_ARCH_6) {
  6018. return false;
  6019. }
  6020. return op_strex(s, a, MO_32, false);
  6021. }
  6022. static bool trans_STREXD_a32(DisasContext *s, arg_STREX *a)
  6023. {
  6024. if (!ENABLE_ARCH_6K) {
  6025. return false;
  6026. }
  6027. /* We UNDEF for these UNPREDICTABLE cases. */
  6028. if (a->rt & 1) {
  6029. unallocated_encoding(s);
  6030. return true;
  6031. }
  6032. a->rt2 = a->rt + 1;
  6033. return op_strex(s, a, MO_64, false);
  6034. }
  6035. static bool trans_STREXD_t32(DisasContext *s, arg_STREX *a)
  6036. {
  6037. return op_strex(s, a, MO_64, false);
  6038. }
  6039. static bool trans_STREXB(DisasContext *s, arg_STREX *a)
  6040. {
  6041. if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
  6042. return false;
  6043. }
  6044. return op_strex(s, a, MO_8, false);
  6045. }
  6046. static bool trans_STREXH(DisasContext *s, arg_STREX *a)
  6047. {
  6048. if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
  6049. return false;
  6050. }
  6051. return op_strex(s, a, MO_16, false);
  6052. }
  6053. static bool trans_STLEX(DisasContext *s, arg_STREX *a)
  6054. {
  6055. if (!ENABLE_ARCH_8) {
  6056. return false;
  6057. }
  6058. return op_strex(s, a, MO_32, true);
  6059. }
  6060. static bool trans_STLEXD_a32(DisasContext *s, arg_STREX *a)
  6061. {
  6062. if (!ENABLE_ARCH_8) {
  6063. return false;
  6064. }
  6065. /* We UNDEF for these UNPREDICTABLE cases. */
  6066. if (a->rt & 1) {
  6067. unallocated_encoding(s);
  6068. return true;
  6069. }
  6070. a->rt2 = a->rt + 1;
  6071. return op_strex(s, a, MO_64, true);
  6072. }
  6073. static bool trans_STLEXD_t32(DisasContext *s, arg_STREX *a)
  6074. {
  6075. if (!ENABLE_ARCH_8) {
  6076. return false;
  6077. }
  6078. return op_strex(s, a, MO_64, true);
  6079. }
  6080. static bool trans_STLEXB(DisasContext *s, arg_STREX *a)
  6081. {
  6082. if (!ENABLE_ARCH_8) {
  6083. return false;
  6084. }
  6085. return op_strex(s, a, MO_8, true);
  6086. }
  6087. static bool trans_STLEXH(DisasContext *s, arg_STREX *a)
  6088. {
  6089. if (!ENABLE_ARCH_8) {
  6090. return false;
  6091. }
  6092. return op_strex(s, a, MO_16, true);
  6093. }
  6094. static bool op_stl(DisasContext *s, arg_STL *a, MemOp mop)
  6095. {
  6096. TCGv_i32 addr, tmp;
  6097. if (!ENABLE_ARCH_8) {
  6098. return false;
  6099. }
  6100. /* We UNDEF for these UNPREDICTABLE cases. */
  6101. if (a->rn == 15 || a->rt == 15) {
  6102. unallocated_encoding(s);
  6103. return true;
  6104. }
  6105. addr = load_reg(s, a->rn);
  6106. tmp = load_reg(s, a->rt);
  6107. tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
  6108. gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data);
  6109. disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel | ISSIsWrite);
  6110. tcg_temp_free_i32(tmp);
  6111. tcg_temp_free_i32(addr);
  6112. return true;
  6113. }
  6114. static bool trans_STL(DisasContext *s, arg_STL *a)
  6115. {
  6116. return op_stl(s, a, MO_UL);
  6117. }
  6118. static bool trans_STLB(DisasContext *s, arg_STL *a)
  6119. {
  6120. return op_stl(s, a, MO_UB);
  6121. }
  6122. static bool trans_STLH(DisasContext *s, arg_STL *a)
  6123. {
  6124. return op_stl(s, a, MO_UW);
  6125. }
  6126. static bool op_ldrex(DisasContext *s, arg_LDREX *a, MemOp mop, bool acq)
  6127. {
  6128. TCGv_i32 addr;
  6129. /* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */
  6130. bool v8a = ENABLE_ARCH_8 && !arm_dc_feature(s, ARM_FEATURE_M);
  6131. /* We UNDEF for these UNPREDICTABLE cases. */
  6132. if (a->rn == 15 || a->rt == 15
  6133. || (!v8a && s->thumb && a->rt == 13)
  6134. || (mop == MO_64
  6135. && (a->rt2 == 15 || a->rt == a->rt2
  6136. || (!v8a && s->thumb && a->rt2 == 13)))) {
  6137. unallocated_encoding(s);
  6138. return true;
  6139. }
  6140. addr = tcg_temp_local_new_i32();
  6141. load_reg_var(s, addr, a->rn);
  6142. tcg_gen_addi_i32(addr, addr, a->imm);
  6143. gen_load_exclusive(s, a->rt, a->rt2, addr, mop);
  6144. tcg_temp_free_i32(addr);
  6145. if (acq) {
  6146. tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
  6147. }
  6148. return true;
  6149. }
  6150. static bool trans_LDREX(DisasContext *s, arg_LDREX *a)
  6151. {
  6152. if (!ENABLE_ARCH_6) {
  6153. return false;
  6154. }
  6155. return op_ldrex(s, a, MO_32, false);
  6156. }
  6157. static bool trans_LDREXD_a32(DisasContext *s, arg_LDREX *a)
  6158. {
  6159. if (!ENABLE_ARCH_6K) {
  6160. return false;
  6161. }
  6162. /* We UNDEF for these UNPREDICTABLE cases. */
  6163. if (a->rt & 1) {
  6164. unallocated_encoding(s);
  6165. return true;
  6166. }
  6167. a->rt2 = a->rt + 1;
  6168. return op_ldrex(s, a, MO_64, false);
  6169. }
  6170. static bool trans_LDREXD_t32(DisasContext *s, arg_LDREX *a)
  6171. {
  6172. return op_ldrex(s, a, MO_64, false);
  6173. }
  6174. static bool trans_LDREXB(DisasContext *s, arg_LDREX *a)
  6175. {
  6176. if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
  6177. return false;
  6178. }
  6179. return op_ldrex(s, a, MO_8, false);
  6180. }
  6181. static bool trans_LDREXH(DisasContext *s, arg_LDREX *a)
  6182. {
  6183. if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
  6184. return false;
  6185. }
  6186. return op_ldrex(s, a, MO_16, false);
  6187. }
  6188. static bool trans_LDAEX(DisasContext *s, arg_LDREX *a)
  6189. {
  6190. if (!ENABLE_ARCH_8) {
  6191. return false;
  6192. }
  6193. return op_ldrex(s, a, MO_32, true);
  6194. }
  6195. static bool trans_LDAEXD_a32(DisasContext *s, arg_LDREX *a)
  6196. {
  6197. if (!ENABLE_ARCH_8) {
  6198. return false;
  6199. }
  6200. /* We UNDEF for these UNPREDICTABLE cases. */
  6201. if (a->rt & 1) {
  6202. unallocated_encoding(s);
  6203. return true;
  6204. }
  6205. a->rt2 = a->rt + 1;
  6206. return op_ldrex(s, a, MO_64, true);
  6207. }
  6208. static bool trans_LDAEXD_t32(DisasContext *s, arg_LDREX *a)
  6209. {
  6210. if (!ENABLE_ARCH_8) {
  6211. return false;
  6212. }
  6213. return op_ldrex(s, a, MO_64, true);
  6214. }
  6215. static bool trans_LDAEXB(DisasContext *s, arg_LDREX *a)
  6216. {
  6217. if (!ENABLE_ARCH_8) {
  6218. return false;
  6219. }
  6220. return op_ldrex(s, a, MO_8, true);
  6221. }
  6222. static bool trans_LDAEXH(DisasContext *s, arg_LDREX *a)
  6223. {
  6224. if (!ENABLE_ARCH_8) {
  6225. return false;
  6226. }
  6227. return op_ldrex(s, a, MO_16, true);
  6228. }
  6229. static bool op_lda(DisasContext *s, arg_LDA *a, MemOp mop)
  6230. {
  6231. TCGv_i32 addr, tmp;
  6232. if (!ENABLE_ARCH_8) {
  6233. return false;
  6234. }
  6235. /* We UNDEF for these UNPREDICTABLE cases. */
  6236. if (a->rn == 15 || a->rt == 15) {
  6237. unallocated_encoding(s);
  6238. return true;
  6239. }
  6240. addr = load_reg(s, a->rn);
  6241. tmp = tcg_temp_new_i32();
  6242. gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data);
  6243. disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel);
  6244. tcg_temp_free_i32(addr);
  6245. store_reg(s, a->rt, tmp);
  6246. tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
  6247. return true;
  6248. }
  6249. static bool trans_LDA(DisasContext *s, arg_LDA *a)
  6250. {
  6251. return op_lda(s, a, MO_UL);
  6252. }
  6253. static bool trans_LDAB(DisasContext *s, arg_LDA *a)
  6254. {
  6255. return op_lda(s, a, MO_UB);
  6256. }
  6257. static bool trans_LDAH(DisasContext *s, arg_LDA *a)
  6258. {
  6259. return op_lda(s, a, MO_UW);
  6260. }
  6261. /*
  6262. * Media instructions
  6263. */
  6264. static bool trans_USADA8(DisasContext *s, arg_USADA8 *a)
  6265. {
  6266. TCGv_i32 t1, t2;
  6267. if (!ENABLE_ARCH_6) {
  6268. return false;
  6269. }
  6270. t1 = load_reg(s, a->rn);
  6271. t2 = load_reg(s, a->rm);
  6272. gen_helper_usad8(t1, t1, t2);
  6273. tcg_temp_free_i32(t2);
  6274. if (a->ra != 15) {
  6275. t2 = load_reg(s, a->ra);
  6276. tcg_gen_add_i32(t1, t1, t2);
  6277. tcg_temp_free_i32(t2);
  6278. }
  6279. store_reg(s, a->rd, t1);
  6280. return true;
  6281. }
  6282. static bool op_bfx(DisasContext *s, arg_UBFX *a, bool u)
  6283. {
  6284. TCGv_i32 tmp;
  6285. int width = a->widthm1 + 1;
  6286. int shift = a->lsb;
  6287. if (!ENABLE_ARCH_6T2) {
  6288. return false;
  6289. }
  6290. if (shift + width > 32) {
  6291. /* UNPREDICTABLE; we choose to UNDEF */
  6292. unallocated_encoding(s);
  6293. return true;
  6294. }
  6295. tmp = load_reg(s, a->rn);
  6296. if (u) {
  6297. tcg_gen_extract_i32(tmp, tmp, shift, width);
  6298. } else {
  6299. tcg_gen_sextract_i32(tmp, tmp, shift, width);
  6300. }
  6301. store_reg(s, a->rd, tmp);
  6302. return true;
  6303. }
  6304. static bool trans_SBFX(DisasContext *s, arg_SBFX *a)
  6305. {
  6306. return op_bfx(s, a, false);
  6307. }
  6308. static bool trans_UBFX(DisasContext *s, arg_UBFX *a)
  6309. {
  6310. return op_bfx(s, a, true);
  6311. }
  6312. static bool trans_BFCI(DisasContext *s, arg_BFCI *a)
  6313. {
  6314. TCGv_i32 tmp;
  6315. int msb = a->msb, lsb = a->lsb;
  6316. int width;
  6317. if (!ENABLE_ARCH_6T2) {
  6318. return false;
  6319. }
  6320. if (msb < lsb) {
  6321. /* UNPREDICTABLE; we choose to UNDEF */
  6322. unallocated_encoding(s);
  6323. return true;
  6324. }
  6325. width = msb + 1 - lsb;
  6326. if (a->rn == 15) {
  6327. /* BFC */
  6328. tmp = tcg_const_i32(0);
  6329. } else {
  6330. /* BFI */
  6331. tmp = load_reg(s, a->rn);
  6332. }
  6333. if (width != 32) {
  6334. TCGv_i32 tmp2 = load_reg(s, a->rd);
  6335. tcg_gen_deposit_i32(tmp, tmp2, tmp, lsb, width);
  6336. tcg_temp_free_i32(tmp2);
  6337. }
  6338. store_reg(s, a->rd, tmp);
  6339. return true;
  6340. }
  6341. static bool trans_UDF(DisasContext *s, arg_UDF *a)
  6342. {
  6343. unallocated_encoding(s);
  6344. return true;
  6345. }
  6346. /*
  6347. * Parallel addition and subtraction
  6348. */
  6349. static bool op_par_addsub(DisasContext *s, arg_rrr *a,
  6350. void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
  6351. {
  6352. TCGv_i32 t0, t1;
  6353. if (s->thumb
  6354. ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
  6355. : !ENABLE_ARCH_6) {
  6356. return false;
  6357. }
  6358. t0 = load_reg(s, a->rn);
  6359. t1 = load_reg(s, a->rm);
  6360. gen(t0, t0, t1);
  6361. tcg_temp_free_i32(t1);
  6362. store_reg(s, a->rd, t0);
  6363. return true;
  6364. }
  6365. static bool op_par_addsub_ge(DisasContext *s, arg_rrr *a,
  6366. void (*gen)(TCGv_i32, TCGv_i32,
  6367. TCGv_i32, TCGv_ptr))
  6368. {
  6369. TCGv_i32 t0, t1;
  6370. TCGv_ptr ge;
  6371. if (s->thumb
  6372. ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
  6373. : !ENABLE_ARCH_6) {
  6374. return false;
  6375. }
  6376. t0 = load_reg(s, a->rn);
  6377. t1 = load_reg(s, a->rm);
  6378. ge = tcg_temp_new_ptr();
  6379. tcg_gen_addi_ptr(ge, cpu_env, offsetof(CPUARMState, GE));
  6380. gen(t0, t0, t1, ge);
  6381. tcg_temp_free_ptr(ge);
  6382. tcg_temp_free_i32(t1);
  6383. store_reg(s, a->rd, t0);
  6384. return true;
  6385. }
  6386. #define DO_PAR_ADDSUB(NAME, helper) \
  6387. static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
  6388. { \
  6389. return op_par_addsub(s, a, helper); \
  6390. }
  6391. #define DO_PAR_ADDSUB_GE(NAME, helper) \
  6392. static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
  6393. { \
  6394. return op_par_addsub_ge(s, a, helper); \
  6395. }
  6396. DO_PAR_ADDSUB_GE(SADD16, gen_helper_sadd16)
  6397. DO_PAR_ADDSUB_GE(SASX, gen_helper_saddsubx)
  6398. DO_PAR_ADDSUB_GE(SSAX, gen_helper_ssubaddx)
  6399. DO_PAR_ADDSUB_GE(SSUB16, gen_helper_ssub16)
  6400. DO_PAR_ADDSUB_GE(SADD8, gen_helper_sadd8)
  6401. DO_PAR_ADDSUB_GE(SSUB8, gen_helper_ssub8)
  6402. DO_PAR_ADDSUB_GE(UADD16, gen_helper_uadd16)
  6403. DO_PAR_ADDSUB_GE(UASX, gen_helper_uaddsubx)
  6404. DO_PAR_ADDSUB_GE(USAX, gen_helper_usubaddx)
  6405. DO_PAR_ADDSUB_GE(USUB16, gen_helper_usub16)
  6406. DO_PAR_ADDSUB_GE(UADD8, gen_helper_uadd8)
  6407. DO_PAR_ADDSUB_GE(USUB8, gen_helper_usub8)
  6408. DO_PAR_ADDSUB(QADD16, gen_helper_qadd16)
  6409. DO_PAR_ADDSUB(QASX, gen_helper_qaddsubx)
  6410. DO_PAR_ADDSUB(QSAX, gen_helper_qsubaddx)
  6411. DO_PAR_ADDSUB(QSUB16, gen_helper_qsub16)
  6412. DO_PAR_ADDSUB(QADD8, gen_helper_qadd8)
  6413. DO_PAR_ADDSUB(QSUB8, gen_helper_qsub8)
  6414. DO_PAR_ADDSUB(UQADD16, gen_helper_uqadd16)
  6415. DO_PAR_ADDSUB(UQASX, gen_helper_uqaddsubx)
  6416. DO_PAR_ADDSUB(UQSAX, gen_helper_uqsubaddx)
  6417. DO_PAR_ADDSUB(UQSUB16, gen_helper_uqsub16)
  6418. DO_PAR_ADDSUB(UQADD8, gen_helper_uqadd8)
  6419. DO_PAR_ADDSUB(UQSUB8, gen_helper_uqsub8)
  6420. DO_PAR_ADDSUB(SHADD16, gen_helper_shadd16)
  6421. DO_PAR_ADDSUB(SHASX, gen_helper_shaddsubx)
  6422. DO_PAR_ADDSUB(SHSAX, gen_helper_shsubaddx)
  6423. DO_PAR_ADDSUB(SHSUB16, gen_helper_shsub16)
  6424. DO_PAR_ADDSUB(SHADD8, gen_helper_shadd8)
  6425. DO_PAR_ADDSUB(SHSUB8, gen_helper_shsub8)
  6426. DO_PAR_ADDSUB(UHADD16, gen_helper_uhadd16)
  6427. DO_PAR_ADDSUB(UHASX, gen_helper_uhaddsubx)
  6428. DO_PAR_ADDSUB(UHSAX, gen_helper_uhsubaddx)
  6429. DO_PAR_ADDSUB(UHSUB16, gen_helper_uhsub16)
  6430. DO_PAR_ADDSUB(UHADD8, gen_helper_uhadd8)
  6431. DO_PAR_ADDSUB(UHSUB8, gen_helper_uhsub8)
  6432. #undef DO_PAR_ADDSUB
  6433. #undef DO_PAR_ADDSUB_GE
  6434. /*
  6435. * Packing, unpacking, saturation, and reversal
  6436. */
  6437. static bool trans_PKH(DisasContext *s, arg_PKH *a)
  6438. {
  6439. TCGv_i32 tn, tm;
  6440. int shift = a->imm;
  6441. if (s->thumb
  6442. ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
  6443. : !ENABLE_ARCH_6) {
  6444. return false;
  6445. }
  6446. tn = load_reg(s, a->rn);
  6447. tm = load_reg(s, a->rm);
  6448. if (a->tb) {
  6449. /* PKHTB */
  6450. if (shift == 0) {
  6451. shift = 31;
  6452. }
  6453. tcg_gen_sari_i32(tm, tm, shift);
  6454. tcg_gen_deposit_i32(tn, tn, tm, 0, 16);
  6455. } else {
  6456. /* PKHBT */
  6457. tcg_gen_shli_i32(tm, tm, shift);
  6458. tcg_gen_deposit_i32(tn, tm, tn, 0, 16);
  6459. }
  6460. tcg_temp_free_i32(tm);
  6461. store_reg(s, a->rd, tn);
  6462. return true;
  6463. }
  6464. static bool op_sat(DisasContext *s, arg_sat *a,
  6465. void (*gen)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
  6466. {
  6467. TCGv_i32 tmp, satimm;
  6468. int shift = a->imm;
  6469. if (!ENABLE_ARCH_6) {
  6470. return false;
  6471. }
  6472. tmp = load_reg(s, a->rn);
  6473. if (a->sh) {
  6474. tcg_gen_sari_i32(tmp, tmp, shift ? shift : 31);
  6475. } else {
  6476. tcg_gen_shli_i32(tmp, tmp, shift);
  6477. }
  6478. satimm = tcg_const_i32(a->satimm);
  6479. gen(tmp, cpu_env, tmp, satimm);
  6480. tcg_temp_free_i32(satimm);
  6481. store_reg(s, a->rd, tmp);
  6482. return true;
  6483. }
  6484. static bool trans_SSAT(DisasContext *s, arg_sat *a)
  6485. {
  6486. return op_sat(s, a, gen_helper_ssat);
  6487. }
  6488. static bool trans_USAT(DisasContext *s, arg_sat *a)
  6489. {
  6490. return op_sat(s, a, gen_helper_usat);
  6491. }
  6492. static bool trans_SSAT16(DisasContext *s, arg_sat *a)
  6493. {
  6494. if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
  6495. return false;
  6496. }
  6497. return op_sat(s, a, gen_helper_ssat16);
  6498. }
  6499. static bool trans_USAT16(DisasContext *s, arg_sat *a)
  6500. {
  6501. if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
  6502. return false;
  6503. }
  6504. return op_sat(s, a, gen_helper_usat16);
  6505. }
  6506. static bool op_xta(DisasContext *s, arg_rrr_rot *a,
  6507. void (*gen_extract)(TCGv_i32, TCGv_i32),
  6508. void (*gen_add)(TCGv_i32, TCGv_i32, TCGv_i32))
  6509. {
  6510. TCGv_i32 tmp;
  6511. if (!ENABLE_ARCH_6) {
  6512. return false;
  6513. }
  6514. tmp = load_reg(s, a->rm);
  6515. /*
  6516. * TODO: In many cases we could do a shift instead of a rotate.
  6517. * Combined with a simple extend, that becomes an extract.
  6518. */
  6519. tcg_gen_rotri_i32(tmp, tmp, a->rot * 8);
  6520. gen_extract(tmp, tmp);
  6521. if (a->rn != 15) {
  6522. TCGv_i32 tmp2 = load_reg(s, a->rn);
  6523. gen_add(tmp, tmp, tmp2);
  6524. tcg_temp_free_i32(tmp2);
  6525. }
  6526. store_reg(s, a->rd, tmp);
  6527. return true;
  6528. }
  6529. static bool trans_SXTAB(DisasContext *s, arg_rrr_rot *a)
  6530. {
  6531. return op_xta(s, a, tcg_gen_ext8s_i32, tcg_gen_add_i32);
  6532. }
  6533. static bool trans_SXTAH(DisasContext *s, arg_rrr_rot *a)
  6534. {
  6535. return op_xta(s, a, tcg_gen_ext16s_i32, tcg_gen_add_i32);
  6536. }
  6537. static bool trans_SXTAB16(DisasContext *s, arg_rrr_rot *a)
  6538. {
  6539. if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
  6540. return false;
  6541. }
  6542. return op_xta(s, a, gen_helper_sxtb16, gen_add16);
  6543. }
  6544. static bool trans_UXTAB(DisasContext *s, arg_rrr_rot *a)
  6545. {
  6546. return op_xta(s, a, tcg_gen_ext8u_i32, tcg_gen_add_i32);
  6547. }
  6548. static bool trans_UXTAH(DisasContext *s, arg_rrr_rot *a)
  6549. {
  6550. return op_xta(s, a, tcg_gen_ext16u_i32, tcg_gen_add_i32);
  6551. }
  6552. static bool trans_UXTAB16(DisasContext *s, arg_rrr_rot *a)
  6553. {
  6554. if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
  6555. return false;
  6556. }
  6557. return op_xta(s, a, gen_helper_uxtb16, gen_add16);
  6558. }
  6559. static bool trans_SEL(DisasContext *s, arg_rrr *a)
  6560. {
  6561. TCGv_i32 t1, t2, t3;
  6562. if (s->thumb
  6563. ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
  6564. : !ENABLE_ARCH_6) {
  6565. return false;
  6566. }
  6567. t1 = load_reg(s, a->rn);
  6568. t2 = load_reg(s, a->rm);
  6569. t3 = tcg_temp_new_i32();
  6570. tcg_gen_ld_i32(t3, cpu_env, offsetof(CPUARMState, GE));
  6571. gen_helper_sel_flags(t1, t3, t1, t2);
  6572. tcg_temp_free_i32(t3);
  6573. tcg_temp_free_i32(t2);
  6574. store_reg(s, a->rd, t1);
  6575. return true;
  6576. }
  6577. static bool op_rr(DisasContext *s, arg_rr *a,
  6578. void (*gen)(TCGv_i32, TCGv_i32))
  6579. {
  6580. TCGv_i32 tmp;
  6581. tmp = load_reg(s, a->rm);
  6582. gen(tmp, tmp);
  6583. store_reg(s, a->rd, tmp);
  6584. return true;
  6585. }
  6586. static bool trans_REV(DisasContext *s, arg_rr *a)
  6587. {
  6588. if (!ENABLE_ARCH_6) {
  6589. return false;
  6590. }
  6591. return op_rr(s, a, tcg_gen_bswap32_i32);
  6592. }
  6593. static bool trans_REV16(DisasContext *s, arg_rr *a)
  6594. {
  6595. if (!ENABLE_ARCH_6) {
  6596. return false;
  6597. }
  6598. return op_rr(s, a, gen_rev16);
  6599. }
  6600. static bool trans_REVSH(DisasContext *s, arg_rr *a)
  6601. {
  6602. if (!ENABLE_ARCH_6) {
  6603. return false;
  6604. }
  6605. return op_rr(s, a, gen_revsh);
  6606. }
  6607. static bool trans_RBIT(DisasContext *s, arg_rr *a)
  6608. {
  6609. if (!ENABLE_ARCH_6T2) {
  6610. return false;
  6611. }
  6612. return op_rr(s, a, gen_helper_rbit);
  6613. }
  6614. /*
  6615. * Signed multiply, signed and unsigned divide
  6616. */
  6617. static bool op_smlad(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub)
  6618. {
  6619. TCGv_i32 t1, t2;
  6620. if (!ENABLE_ARCH_6) {
  6621. return false;
  6622. }
  6623. t1 = load_reg(s, a->rn);
  6624. t2 = load_reg(s, a->rm);
  6625. if (m_swap) {
  6626. gen_swap_half(t2, t2);
  6627. }
  6628. gen_smul_dual(t1, t2);
  6629. if (sub) {
  6630. /* This subtraction cannot overflow. */
  6631. tcg_gen_sub_i32(t1, t1, t2);
  6632. } else {
  6633. /*
  6634. * This addition cannot overflow 32 bits; however it may
  6635. * overflow considered as a signed operation, in which case
  6636. * we must set the Q flag.
  6637. */
  6638. gen_helper_add_setq(t1, cpu_env, t1, t2);
  6639. }
  6640. tcg_temp_free_i32(t2);
  6641. if (a->ra != 15) {
  6642. t2 = load_reg(s, a->ra);
  6643. gen_helper_add_setq(t1, cpu_env, t1, t2);
  6644. tcg_temp_free_i32(t2);
  6645. }
  6646. store_reg(s, a->rd, t1);
  6647. return true;
  6648. }
  6649. static bool trans_SMLAD(DisasContext *s, arg_rrrr *a)
  6650. {
  6651. return op_smlad(s, a, false, false);
  6652. }
  6653. static bool trans_SMLADX(DisasContext *s, arg_rrrr *a)
  6654. {
  6655. return op_smlad(s, a, true, false);
  6656. }
  6657. static bool trans_SMLSD(DisasContext *s, arg_rrrr *a)
  6658. {
  6659. return op_smlad(s, a, false, true);
  6660. }
  6661. static bool trans_SMLSDX(DisasContext *s, arg_rrrr *a)
  6662. {
  6663. return op_smlad(s, a, true, true);
  6664. }
  6665. static bool op_smlald(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub)
  6666. {
  6667. TCGv_i32 t1, t2;
  6668. TCGv_i64 l1, l2;
  6669. if (!ENABLE_ARCH_6) {
  6670. return false;
  6671. }
  6672. t1 = load_reg(s, a->rn);
  6673. t2 = load_reg(s, a->rm);
  6674. if (m_swap) {
  6675. gen_swap_half(t2, t2);
  6676. }
  6677. gen_smul_dual(t1, t2);
  6678. l1 = tcg_temp_new_i64();
  6679. l2 = tcg_temp_new_i64();
  6680. tcg_gen_ext_i32_i64(l1, t1);
  6681. tcg_gen_ext_i32_i64(l2, t2);
  6682. tcg_temp_free_i32(t1);
  6683. tcg_temp_free_i32(t2);
  6684. if (sub) {
  6685. tcg_gen_sub_i64(l1, l1, l2);
  6686. } else {
  6687. tcg_gen_add_i64(l1, l1, l2);
  6688. }
  6689. tcg_temp_free_i64(l2);
  6690. gen_addq(s, l1, a->ra, a->rd);
  6691. gen_storeq_reg(s, a->ra, a->rd, l1);
  6692. tcg_temp_free_i64(l1);
  6693. return true;
  6694. }
  6695. static bool trans_SMLALD(DisasContext *s, arg_rrrr *a)
  6696. {
  6697. return op_smlald(s, a, false, false);
  6698. }
  6699. static bool trans_SMLALDX(DisasContext *s, arg_rrrr *a)
  6700. {
  6701. return op_smlald(s, a, true, false);
  6702. }
  6703. static bool trans_SMLSLD(DisasContext *s, arg_rrrr *a)
  6704. {
  6705. return op_smlald(s, a, false, true);
  6706. }
  6707. static bool trans_SMLSLDX(DisasContext *s, arg_rrrr *a)
  6708. {
  6709. return op_smlald(s, a, true, true);
  6710. }
  6711. static bool op_smmla(DisasContext *s, arg_rrrr *a, bool round, bool sub)
  6712. {
  6713. TCGv_i32 t1, t2;
  6714. if (s->thumb
  6715. ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
  6716. : !ENABLE_ARCH_6) {
  6717. return false;
  6718. }
  6719. t1 = load_reg(s, a->rn);
  6720. t2 = load_reg(s, a->rm);
  6721. tcg_gen_muls2_i32(t2, t1, t1, t2);
  6722. if (a->ra != 15) {
  6723. TCGv_i32 t3 = load_reg(s, a->ra);
  6724. if (sub) {
  6725. /*
  6726. * For SMMLS, we need a 64-bit subtract. Borrow caused by
  6727. * a non-zero multiplicand lowpart, and the correct result
  6728. * lowpart for rounding.
  6729. */
  6730. TCGv_i32 zero = tcg_const_i32(0);
  6731. tcg_gen_sub2_i32(t2, t1, zero, t3, t2, t1);
  6732. tcg_temp_free_i32(zero);
  6733. } else {
  6734. tcg_gen_add_i32(t1, t1, t3);
  6735. }
  6736. tcg_temp_free_i32(t3);
  6737. }
  6738. if (round) {
  6739. /*
  6740. * Adding 0x80000000 to the 64-bit quantity means that we have
  6741. * carry in to the high word when the low word has the msb set.
  6742. */
  6743. tcg_gen_shri_i32(t2, t2, 31);
  6744. tcg_gen_add_i32(t1, t1, t2);
  6745. }
  6746. tcg_temp_free_i32(t2);
  6747. store_reg(s, a->rd, t1);
  6748. return true;
  6749. }
  6750. static bool trans_SMMLA(DisasContext *s, arg_rrrr *a)
  6751. {
  6752. return op_smmla(s, a, false, false);
  6753. }
  6754. static bool trans_SMMLAR(DisasContext *s, arg_rrrr *a)
  6755. {
  6756. return op_smmla(s, a, true, false);
  6757. }
  6758. static bool trans_SMMLS(DisasContext *s, arg_rrrr *a)
  6759. {
  6760. return op_smmla(s, a, false, true);
  6761. }
  6762. static bool trans_SMMLSR(DisasContext *s, arg_rrrr *a)
  6763. {
  6764. return op_smmla(s, a, true, true);
  6765. }
  6766. static bool op_div(DisasContext *s, arg_rrr *a, bool u)
  6767. {
  6768. TCGv_i32 t1, t2;
  6769. if (s->thumb
  6770. ? !dc_isar_feature(aa32_thumb_div, s)
  6771. : !dc_isar_feature(aa32_arm_div, s)) {
  6772. return false;
  6773. }
  6774. t1 = load_reg(s, a->rn);
  6775. t2 = load_reg(s, a->rm);
  6776. if (u) {
  6777. gen_helper_udiv(t1, t1, t2);
  6778. } else {
  6779. gen_helper_sdiv(t1, t1, t2);
  6780. }
  6781. tcg_temp_free_i32(t2);
  6782. store_reg(s, a->rd, t1);
  6783. return true;
  6784. }
  6785. static bool trans_SDIV(DisasContext *s, arg_rrr *a)
  6786. {
  6787. return op_div(s, a, false);
  6788. }
  6789. static bool trans_UDIV(DisasContext *s, arg_rrr *a)
  6790. {
  6791. return op_div(s, a, true);
  6792. }
  6793. /*
  6794. * Block data transfer
  6795. */
  6796. static TCGv_i32 op_addr_block_pre(DisasContext *s, arg_ldst_block *a, int n)
  6797. {
  6798. TCGv_i32 addr = load_reg(s, a->rn);
  6799. if (a->b) {
  6800. if (a->i) {
  6801. /* pre increment */
  6802. tcg_gen_addi_i32(addr, addr, 4);
  6803. } else {
  6804. /* pre decrement */
  6805. tcg_gen_addi_i32(addr, addr, -(n * 4));
  6806. }
  6807. } else if (!a->i && n != 1) {
  6808. /* post decrement */
  6809. tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
  6810. }
  6811. if (s->v8m_stackcheck && a->rn == 13 && a->w) {
  6812. /*
  6813. * If the writeback is incrementing SP rather than
  6814. * decrementing it, and the initial SP is below the
  6815. * stack limit but the final written-back SP would
  6816. * be above, then then we must not perform any memory
  6817. * accesses, but it is IMPDEF whether we generate
  6818. * an exception. We choose to do so in this case.
  6819. * At this point 'addr' is the lowest address, so
  6820. * either the original SP (if incrementing) or our
  6821. * final SP (if decrementing), so that's what we check.
  6822. */
  6823. gen_helper_v8m_stackcheck(cpu_env, addr);
  6824. }
  6825. return addr;
  6826. }
  6827. static void op_addr_block_post(DisasContext *s, arg_ldst_block *a,
  6828. TCGv_i32 addr, int n)
  6829. {
  6830. if (a->w) {
  6831. /* write back */
  6832. if (!a->b) {
  6833. if (a->i) {
  6834. /* post increment */
  6835. tcg_gen_addi_i32(addr, addr, 4);
  6836. } else {
  6837. /* post decrement */
  6838. tcg_gen_addi_i32(addr, addr, -(n * 4));
  6839. }
  6840. } else if (!a->i && n != 1) {
  6841. /* pre decrement */
  6842. tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
  6843. }
  6844. store_reg(s, a->rn, addr);
  6845. } else {
  6846. tcg_temp_free_i32(addr);
  6847. }
  6848. }
  6849. static bool op_stm(DisasContext *s, arg_ldst_block *a, int min_n)
  6850. {
  6851. int i, j, n, list, mem_idx;
  6852. bool user = a->u;
  6853. TCGv_i32 addr, tmp, tmp2;
  6854. if (user) {
  6855. /* STM (user) */
  6856. if (IS_USER(s)) {
  6857. /* Only usable in supervisor mode. */
  6858. unallocated_encoding(s);
  6859. return true;
  6860. }
  6861. }
  6862. list = a->list;
  6863. n = ctpop16(list);
  6864. if (n < min_n || a->rn == 15) {
  6865. unallocated_encoding(s);
  6866. return true;
  6867. }
  6868. addr = op_addr_block_pre(s, a, n);
  6869. mem_idx = get_mem_index(s);
  6870. for (i = j = 0; i < 16; i++) {
  6871. if (!(list & (1 << i))) {
  6872. continue;
  6873. }
  6874. if (user && i != 15) {
  6875. tmp = tcg_temp_new_i32();
  6876. tmp2 = tcg_const_i32(i);
  6877. gen_helper_get_user_reg(tmp, cpu_env, tmp2);
  6878. tcg_temp_free_i32(tmp2);
  6879. } else {
  6880. tmp = load_reg(s, i);
  6881. }
  6882. gen_aa32_st32(s, tmp, addr, mem_idx);
  6883. tcg_temp_free_i32(tmp);
  6884. /* No need to add after the last transfer. */
  6885. if (++j != n) {
  6886. tcg_gen_addi_i32(addr, addr, 4);
  6887. }
  6888. }
  6889. op_addr_block_post(s, a, addr, n);
  6890. return true;
  6891. }
  6892. static bool trans_STM(DisasContext *s, arg_ldst_block *a)
  6893. {
  6894. /* BitCount(list) < 1 is UNPREDICTABLE */
  6895. return op_stm(s, a, 1);
  6896. }
  6897. static bool trans_STM_t32(DisasContext *s, arg_ldst_block *a)
  6898. {
  6899. /* Writeback register in register list is UNPREDICTABLE for T32. */
  6900. if (a->w && (a->list & (1 << a->rn))) {
  6901. unallocated_encoding(s);
  6902. return true;
  6903. }
  6904. /* BitCount(list) < 2 is UNPREDICTABLE */
  6905. return op_stm(s, a, 2);
  6906. }
  6907. static bool do_ldm(DisasContext *s, arg_ldst_block *a, int min_n)
  6908. {
  6909. int i, j, n, list, mem_idx;
  6910. bool loaded_base;
  6911. bool user = a->u;
  6912. bool exc_return = false;
  6913. TCGv_i32 addr, tmp, tmp2, loaded_var;
  6914. if (user) {
  6915. /* LDM (user), LDM (exception return) */
  6916. if (IS_USER(s)) {
  6917. /* Only usable in supervisor mode. */
  6918. unallocated_encoding(s);
  6919. return true;
  6920. }
  6921. if (extract32(a->list, 15, 1)) {
  6922. exc_return = true;
  6923. user = false;
  6924. } else {
  6925. /* LDM (user) does not allow writeback. */
  6926. if (a->w) {
  6927. unallocated_encoding(s);
  6928. return true;
  6929. }
  6930. }
  6931. }
  6932. list = a->list;
  6933. n = ctpop16(list);
  6934. if (n < min_n || a->rn == 15) {
  6935. unallocated_encoding(s);
  6936. return true;
  6937. }
  6938. addr = op_addr_block_pre(s, a, n);
  6939. mem_idx = get_mem_index(s);
  6940. loaded_base = false;
  6941. loaded_var = NULL;
  6942. for (i = j = 0; i < 16; i++) {
  6943. if (!(list & (1 << i))) {
  6944. continue;
  6945. }
  6946. tmp = tcg_temp_new_i32();
  6947. gen_aa32_ld32u(s, tmp, addr, mem_idx);
  6948. if (user) {
  6949. tmp2 = tcg_const_i32(i);
  6950. gen_helper_set_user_reg(cpu_env, tmp2, tmp);
  6951. tcg_temp_free_i32(tmp2);
  6952. tcg_temp_free_i32(tmp);
  6953. } else if (i == a->rn) {
  6954. loaded_var = tmp;
  6955. loaded_base = true;
  6956. } else if (i == 15 && exc_return) {
  6957. store_pc_exc_ret(s, tmp);
  6958. } else {
  6959. store_reg_from_load(s, i, tmp);
  6960. }
  6961. /* No need to add after the last transfer. */
  6962. if (++j != n) {
  6963. tcg_gen_addi_i32(addr, addr, 4);
  6964. }
  6965. }
  6966. op_addr_block_post(s, a, addr, n);
  6967. if (loaded_base) {
  6968. /* Note that we reject base == pc above. */
  6969. store_reg(s, a->rn, loaded_var);
  6970. }
  6971. if (exc_return) {
  6972. /* Restore CPSR from SPSR. */
  6973. tmp = load_cpu_field(spsr);
  6974. if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
  6975. gen_io_start();
  6976. }
  6977. gen_helper_cpsr_write_eret(cpu_env, tmp);
  6978. tcg_temp_free_i32(tmp);
  6979. /* Must exit loop to check un-masked IRQs */
  6980. s->base.is_jmp = DISAS_EXIT;
  6981. }
  6982. return true;
  6983. }
  6984. static bool trans_LDM_a32(DisasContext *s, arg_ldst_block *a)
  6985. {
  6986. /*
  6987. * Writeback register in register list is UNPREDICTABLE
  6988. * for ArchVersion() >= 7. Prior to v7, A32 would write
  6989. * an UNKNOWN value to the base register.
  6990. */
  6991. if (ENABLE_ARCH_7 && a->w && (a->list & (1 << a->rn))) {
  6992. unallocated_encoding(s);
  6993. return true;
  6994. }
  6995. /* BitCount(list) < 1 is UNPREDICTABLE */
  6996. return do_ldm(s, a, 1);
  6997. }
  6998. static bool trans_LDM_t32(DisasContext *s, arg_ldst_block *a)
  6999. {
  7000. /* Writeback register in register list is UNPREDICTABLE for T32. */
  7001. if (a->w && (a->list & (1 << a->rn))) {
  7002. unallocated_encoding(s);
  7003. return true;
  7004. }
  7005. /* BitCount(list) < 2 is UNPREDICTABLE */
  7006. return do_ldm(s, a, 2);
  7007. }
  7008. static bool trans_LDM_t16(DisasContext *s, arg_ldst_block *a)
  7009. {
  7010. /* Writeback is conditional on the base register not being loaded. */
  7011. a->w = !(a->list & (1 << a->rn));
  7012. /* BitCount(list) < 1 is UNPREDICTABLE */
  7013. return do_ldm(s, a, 1);
  7014. }
  7015. /*
  7016. * Branch, branch with link
  7017. */
  7018. static bool trans_B(DisasContext *s, arg_i *a)
  7019. {
  7020. gen_jmp(s, read_pc(s) + a->imm);
  7021. return true;
  7022. }
  7023. static bool trans_B_cond_thumb(DisasContext *s, arg_ci *a)
  7024. {
  7025. /* This has cond from encoding, required to be outside IT block. */
  7026. if (a->cond >= 0xe) {
  7027. return false;
  7028. }
  7029. if (s->condexec_mask) {
  7030. unallocated_encoding(s);
  7031. return true;
  7032. }
  7033. arm_skip_unless(s, a->cond);
  7034. gen_jmp(s, read_pc(s) + a->imm);
  7035. return true;
  7036. }
  7037. static bool trans_BL(DisasContext *s, arg_i *a)
  7038. {
  7039. tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb);
  7040. gen_jmp(s, read_pc(s) + a->imm);
  7041. return true;
  7042. }
  7043. static bool trans_BLX_i(DisasContext *s, arg_BLX_i *a)
  7044. {
  7045. TCGv_i32 tmp;
  7046. /* For A32, ARCH(5) is checked near the start of the uncond block. */
  7047. if (s->thumb && (a->imm & 2)) {
  7048. return false;
  7049. }
  7050. tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb);
  7051. tmp = tcg_const_i32(!s->thumb);
  7052. store_cpu_field(tmp, thumb);
  7053. gen_jmp(s, (read_pc(s) & ~3) + a->imm);
  7054. return true;
  7055. }
  7056. static bool trans_BL_BLX_prefix(DisasContext *s, arg_BL_BLX_prefix *a)
  7057. {
  7058. assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
  7059. tcg_gen_movi_i32(cpu_R[14], read_pc(s) + (a->imm << 12));
  7060. return true;
  7061. }
  7062. static bool trans_BL_suffix(DisasContext *s, arg_BL_suffix *a)
  7063. {
  7064. TCGv_i32 tmp = tcg_temp_new_i32();
  7065. assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
  7066. tcg_gen_addi_i32(tmp, cpu_R[14], (a->imm << 1) | 1);
  7067. tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1);
  7068. gen_bx(s, tmp);
  7069. return true;
  7070. }
  7071. static bool trans_BLX_suffix(DisasContext *s, arg_BLX_suffix *a)
  7072. {
  7073. TCGv_i32 tmp;
  7074. assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
  7075. if (!ENABLE_ARCH_5) {
  7076. return false;
  7077. }
  7078. tmp = tcg_temp_new_i32();
  7079. tcg_gen_addi_i32(tmp, cpu_R[14], a->imm << 1);
  7080. tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
  7081. tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1);
  7082. gen_bx(s, tmp);
  7083. return true;
  7084. }
  7085. static bool op_tbranch(DisasContext *s, arg_tbranch *a, bool half)
  7086. {
  7087. TCGv_i32 addr, tmp;
  7088. tmp = load_reg(s, a->rm);
  7089. if (half) {
  7090. tcg_gen_add_i32(tmp, tmp, tmp);
  7091. }
  7092. addr = load_reg(s, a->rn);
  7093. tcg_gen_add_i32(addr, addr, tmp);
  7094. gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
  7095. half ? MO_UW | s->be_data : MO_UB);
  7096. tcg_temp_free_i32(addr);
  7097. tcg_gen_add_i32(tmp, tmp, tmp);
  7098. tcg_gen_addi_i32(tmp, tmp, read_pc(s));
  7099. store_reg(s, 15, tmp);
  7100. return true;
  7101. }
  7102. static bool trans_TBB(DisasContext *s, arg_tbranch *a)
  7103. {
  7104. return op_tbranch(s, a, false);
  7105. }
  7106. static bool trans_TBH(DisasContext *s, arg_tbranch *a)
  7107. {
  7108. return op_tbranch(s, a, true);
  7109. }
  7110. static bool trans_CBZ(DisasContext *s, arg_CBZ *a)
  7111. {
  7112. TCGv_i32 tmp = load_reg(s, a->rn);
  7113. arm_gen_condlabel(s);
  7114. tcg_gen_brcondi_i32(a->nz ? TCG_COND_EQ : TCG_COND_NE,
  7115. tmp, 0, s->condlabel);
  7116. tcg_temp_free_i32(tmp);
  7117. gen_jmp(s, read_pc(s) + a->imm);
  7118. return true;
  7119. }
  7120. /*
  7121. * Supervisor call - both T32 & A32 come here so we need to check
  7122. * which mode we are in when checking for semihosting.
  7123. */
  7124. static bool trans_SVC(DisasContext *s, arg_SVC *a)
  7125. {
  7126. const uint32_t semihost_imm = s->thumb ? 0xab : 0x123456;
  7127. if (!arm_dc_feature(s, ARM_FEATURE_M) && semihosting_enabled() &&
  7128. #ifndef CONFIG_USER_ONLY
  7129. !IS_USER(s) &&
  7130. #endif
  7131. (a->imm == semihost_imm)) {
  7132. gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
  7133. } else {
  7134. gen_set_pc_im(s, s->base.pc_next);
  7135. s->svc_imm = a->imm;
  7136. s->base.is_jmp = DISAS_SWI;
  7137. }
  7138. return true;
  7139. }
  7140. /*
  7141. * Unconditional system instructions
  7142. */
  7143. static bool trans_RFE(DisasContext *s, arg_RFE *a)
  7144. {
  7145. static const int8_t pre_offset[4] = {
  7146. /* DA */ -4, /* IA */ 0, /* DB */ -8, /* IB */ 4
  7147. };
  7148. static const int8_t post_offset[4] = {
  7149. /* DA */ -8, /* IA */ 4, /* DB */ -4, /* IB */ 0
  7150. };
  7151. TCGv_i32 addr, t1, t2;
  7152. if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
  7153. return false;
  7154. }
  7155. if (IS_USER(s)) {
  7156. unallocated_encoding(s);
  7157. return true;
  7158. }
  7159. addr = load_reg(s, a->rn);
  7160. tcg_gen_addi_i32(addr, addr, pre_offset[a->pu]);
  7161. /* Load PC into tmp and CPSR into tmp2. */
  7162. t1 = tcg_temp_new_i32();
  7163. gen_aa32_ld32u(s, t1, addr, get_mem_index(s));
  7164. tcg_gen_addi_i32(addr, addr, 4);
  7165. t2 = tcg_temp_new_i32();
  7166. gen_aa32_ld32u(s, t2, addr, get_mem_index(s));
  7167. if (a->w) {
  7168. /* Base writeback. */
  7169. tcg_gen_addi_i32(addr, addr, post_offset[a->pu]);
  7170. store_reg(s, a->rn, addr);
  7171. } else {
  7172. tcg_temp_free_i32(addr);
  7173. }
  7174. gen_rfe(s, t1, t2);
  7175. return true;
  7176. }
  7177. static bool trans_SRS(DisasContext *s, arg_SRS *a)
  7178. {
  7179. if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
  7180. return false;
  7181. }
  7182. gen_srs(s, a->mode, a->pu, a->w);
  7183. return true;
  7184. }
  7185. static bool trans_CPS(DisasContext *s, arg_CPS *a)
  7186. {
  7187. uint32_t mask, val;
  7188. if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
  7189. return false;
  7190. }
  7191. if (IS_USER(s)) {
  7192. /* Implemented as NOP in user mode. */
  7193. return true;
  7194. }
  7195. /* TODO: There are quite a lot of UNPREDICTABLE argument combinations. */
  7196. mask = val = 0;
  7197. if (a->imod & 2) {
  7198. if (a->A) {
  7199. mask |= CPSR_A;
  7200. }
  7201. if (a->I) {
  7202. mask |= CPSR_I;
  7203. }
  7204. if (a->F) {
  7205. mask |= CPSR_F;
  7206. }
  7207. if (a->imod & 1) {
  7208. val |= mask;
  7209. }
  7210. }
  7211. if (a->M) {
  7212. mask |= CPSR_M;
  7213. val |= a->mode;
  7214. }
  7215. if (mask) {
  7216. gen_set_psr_im(s, mask, 0, val);
  7217. }
  7218. return true;
  7219. }
  7220. static bool trans_CPS_v7m(DisasContext *s, arg_CPS_v7m *a)
  7221. {
  7222. TCGv_i32 tmp, addr, el;
  7223. if (!arm_dc_feature(s, ARM_FEATURE_M)) {
  7224. return false;
  7225. }
  7226. if (IS_USER(s)) {
  7227. /* Implemented as NOP in user mode. */
  7228. return true;
  7229. }
  7230. tmp = tcg_const_i32(a->im);
  7231. /* FAULTMASK */
  7232. if (a->F) {
  7233. addr = tcg_const_i32(19);
  7234. gen_helper_v7m_msr(cpu_env, addr, tmp);
  7235. tcg_temp_free_i32(addr);
  7236. }
  7237. /* PRIMASK */
  7238. if (a->I) {
  7239. addr = tcg_const_i32(16);
  7240. gen_helper_v7m_msr(cpu_env, addr, tmp);
  7241. tcg_temp_free_i32(addr);
  7242. }
  7243. el = tcg_const_i32(s->current_el);
  7244. gen_helper_rebuild_hflags_m32(cpu_env, el);
  7245. tcg_temp_free_i32(el);
  7246. tcg_temp_free_i32(tmp);
  7247. gen_lookup_tb(s);
  7248. return true;
  7249. }
  7250. /*
  7251. * Clear-Exclusive, Barriers
  7252. */
  7253. static bool trans_CLREX(DisasContext *s, arg_CLREX *a)
  7254. {
  7255. if (s->thumb
  7256. ? !ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)
  7257. : !ENABLE_ARCH_6K) {
  7258. return false;
  7259. }
  7260. gen_clrex(s);
  7261. return true;
  7262. }
  7263. static bool trans_DSB(DisasContext *s, arg_DSB *a)
  7264. {
  7265. if (!ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)) {
  7266. return false;
  7267. }
  7268. tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
  7269. return true;
  7270. }
  7271. static bool trans_DMB(DisasContext *s, arg_DMB *a)
  7272. {
  7273. return trans_DSB(s, NULL);
  7274. }
  7275. static bool trans_ISB(DisasContext *s, arg_ISB *a)
  7276. {
  7277. if (!ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)) {
  7278. return false;
  7279. }
  7280. /*
  7281. * We need to break the TB after this insn to execute
  7282. * self-modifying code correctly and also to take
  7283. * any pending interrupts immediately.
  7284. */
  7285. gen_goto_tb(s, 0, s->base.pc_next);
  7286. return true;
  7287. }
  7288. static bool trans_SB(DisasContext *s, arg_SB *a)
  7289. {
  7290. if (!dc_isar_feature(aa32_sb, s)) {
  7291. return false;
  7292. }
  7293. /*
  7294. * TODO: There is no speculation barrier opcode
  7295. * for TCG; MB and end the TB instead.
  7296. */
  7297. tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
  7298. gen_goto_tb(s, 0, s->base.pc_next);
  7299. return true;
  7300. }
  7301. static bool trans_SETEND(DisasContext *s, arg_SETEND *a)
  7302. {
  7303. if (!ENABLE_ARCH_6) {
  7304. return false;
  7305. }
  7306. if (a->E != (s->be_data == MO_BE)) {
  7307. gen_helper_setend(cpu_env);
  7308. s->base.is_jmp = DISAS_UPDATE_EXIT;
  7309. }
  7310. return true;
  7311. }
  7312. /*
  7313. * Preload instructions
  7314. * All are nops, contingent on the appropriate arch level.
  7315. */
  7316. static bool trans_PLD(DisasContext *s, arg_PLD *a)
  7317. {
  7318. return ENABLE_ARCH_5TE;
  7319. }
  7320. static bool trans_PLDW(DisasContext *s, arg_PLD *a)
  7321. {
  7322. return arm_dc_feature(s, ARM_FEATURE_V7MP);
  7323. }
  7324. static bool trans_PLI(DisasContext *s, arg_PLD *a)
  7325. {
  7326. return ENABLE_ARCH_7;
  7327. }
  7328. /*
  7329. * If-then
  7330. */
  7331. static bool trans_IT(DisasContext *s, arg_IT *a)
  7332. {
  7333. int cond_mask = a->cond_mask;
  7334. /*
  7335. * No actual code generated for this insn, just setup state.
  7336. *
  7337. * Combinations of firstcond and mask which set up an 0b1111
  7338. * condition are UNPREDICTABLE; we take the CONSTRAINED
  7339. * UNPREDICTABLE choice to treat 0b1111 the same as 0b1110,
  7340. * i.e. both meaning "execute always".
  7341. */
  7342. s->condexec_cond = (cond_mask >> 4) & 0xe;
  7343. s->condexec_mask = cond_mask & 0x1f;
  7344. return true;
  7345. }
  7346. /*
  7347. * Legacy decoder.
  7348. */
  7349. static void disas_arm_insn(DisasContext *s, unsigned int insn)
  7350. {
  7351. unsigned int cond = insn >> 28;
  7352. /* M variants do not implement ARM mode; this must raise the INVSTATE
  7353. * UsageFault exception.
  7354. */
  7355. if (arm_dc_feature(s, ARM_FEATURE_M)) {
  7356. gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized(),
  7357. default_exception_el(s));
  7358. return;
  7359. }
  7360. if (cond == 0xf) {
  7361. /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
  7362. * choose to UNDEF. In ARMv5 and above the space is used
  7363. * for miscellaneous unconditional instructions.
  7364. */
  7365. ARCH(5);
  7366. /* Unconditional instructions. */
  7367. /* TODO: Perhaps merge these into one decodetree output file. */
  7368. if (disas_a32_uncond(s, insn) ||
  7369. disas_vfp_uncond(s, insn) ||
  7370. disas_neon_dp(s, insn) ||
  7371. disas_neon_ls(s, insn) ||
  7372. disas_neon_shared(s, insn)) {
  7373. return;
  7374. }
  7375. /* fall back to legacy decoder */
  7376. if ((insn & 0x0e000f00) == 0x0c000100) {
  7377. if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
  7378. /* iWMMXt register transfer. */
  7379. if (extract32(s->c15_cpar, 1, 1)) {
  7380. if (!disas_iwmmxt_insn(s, insn)) {
  7381. return;
  7382. }
  7383. }
  7384. }
  7385. }
  7386. goto illegal_op;
  7387. }
  7388. if (cond != 0xe) {
  7389. /* if not always execute, we generate a conditional jump to
  7390. next instruction */
  7391. arm_skip_unless(s, cond);
  7392. }
  7393. /* TODO: Perhaps merge these into one decodetree output file. */
  7394. if (disas_a32(s, insn) ||
  7395. disas_vfp(s, insn)) {
  7396. return;
  7397. }
  7398. /* fall back to legacy decoder */
  7399. switch ((insn >> 24) & 0xf) {
  7400. case 0xc:
  7401. case 0xd:
  7402. case 0xe:
  7403. if (((insn >> 8) & 0xe) == 10) {
  7404. /* VFP, but failed disas_vfp. */
  7405. goto illegal_op;
  7406. }
  7407. if (disas_coproc_insn(s, insn)) {
  7408. /* Coprocessor. */
  7409. goto illegal_op;
  7410. }
  7411. break;
  7412. default:
  7413. illegal_op:
  7414. unallocated_encoding(s);
  7415. break;
  7416. }
  7417. }
  7418. static bool thumb_insn_is_16bit(DisasContext *s, uint32_t pc, uint32_t insn)
  7419. {
  7420. /*
  7421. * Return true if this is a 16 bit instruction. We must be precise
  7422. * about this (matching the decode).
  7423. */
  7424. if ((insn >> 11) < 0x1d) {
  7425. /* Definitely a 16-bit instruction */
  7426. return true;
  7427. }
  7428. /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
  7429. * first half of a 32-bit Thumb insn. Thumb-1 cores might
  7430. * end up actually treating this as two 16-bit insns, though,
  7431. * if it's half of a bl/blx pair that might span a page boundary.
  7432. */
  7433. if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
  7434. arm_dc_feature(s, ARM_FEATURE_M)) {
  7435. /* Thumb2 cores (including all M profile ones) always treat
  7436. * 32-bit insns as 32-bit.
  7437. */
  7438. return false;
  7439. }
  7440. if ((insn >> 11) == 0x1e && pc - s->page_start < TARGET_PAGE_SIZE - 3) {
  7441. /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
  7442. * is not on the next page; we merge this into a 32-bit
  7443. * insn.
  7444. */
  7445. return false;
  7446. }
  7447. /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
  7448. * 0b1111_1xxx_xxxx_xxxx : BL suffix;
  7449. * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
  7450. * -- handle as single 16 bit insn
  7451. */
  7452. return true;
  7453. }
  7454. /* Translate a 32-bit thumb instruction. */
  7455. static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
  7456. {
  7457. /*
  7458. * ARMv6-M supports a limited subset of Thumb2 instructions.
  7459. * Other Thumb1 architectures allow only 32-bit
  7460. * combined BL/BLX prefix and suffix.
  7461. */
  7462. if (arm_dc_feature(s, ARM_FEATURE_M) &&
  7463. !arm_dc_feature(s, ARM_FEATURE_V7)) {
  7464. int i;
  7465. bool found = false;
  7466. static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
  7467. 0xf3b08040 /* dsb */,
  7468. 0xf3b08050 /* dmb */,
  7469. 0xf3b08060 /* isb */,
  7470. 0xf3e08000 /* mrs */,
  7471. 0xf000d000 /* bl */};
  7472. static const uint32_t armv6m_mask[] = {0xffe0d000,
  7473. 0xfff0d0f0,
  7474. 0xfff0d0f0,
  7475. 0xfff0d0f0,
  7476. 0xffe0d000,
  7477. 0xf800d000};
  7478. for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
  7479. if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
  7480. found = true;
  7481. break;
  7482. }
  7483. }
  7484. if (!found) {
  7485. goto illegal_op;
  7486. }
  7487. } else if ((insn & 0xf800e800) != 0xf000e800) {
  7488. ARCH(6T2);
  7489. }
  7490. if ((insn & 0xef000000) == 0xef000000) {
  7491. /*
  7492. * T32 encodings 0b111p_1111_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
  7493. * transform into
  7494. * A32 encodings 0b1111_001p_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
  7495. */
  7496. uint32_t a32_insn = (insn & 0xe2ffffff) |
  7497. ((insn & (1 << 28)) >> 4) | (1 << 28);
  7498. if (disas_neon_dp(s, a32_insn)) {
  7499. return;
  7500. }
  7501. }
  7502. if ((insn & 0xff100000) == 0xf9000000) {
  7503. /*
  7504. * T32 encodings 0b1111_1001_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
  7505. * transform into
  7506. * A32 encodings 0b1111_0100_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
  7507. */
  7508. uint32_t a32_insn = (insn & 0x00ffffff) | 0xf4000000;
  7509. if (disas_neon_ls(s, a32_insn)) {
  7510. return;
  7511. }
  7512. }
  7513. /*
  7514. * TODO: Perhaps merge these into one decodetree output file.
  7515. * Note disas_vfp is written for a32 with cond field in the
  7516. * top nibble. The t32 encoding requires 0xe in the top nibble.
  7517. */
  7518. if (disas_t32(s, insn) ||
  7519. disas_vfp_uncond(s, insn) ||
  7520. disas_neon_shared(s, insn) ||
  7521. ((insn >> 28) == 0xe && disas_vfp(s, insn))) {
  7522. return;
  7523. }
  7524. /* fall back to legacy decoder */
  7525. switch ((insn >> 25) & 0xf) {
  7526. case 0: case 1: case 2: case 3:
  7527. /* 16-bit instructions. Should never happen. */
  7528. abort();
  7529. case 6: case 7: case 14: case 15:
  7530. /* Coprocessor. */
  7531. if (arm_dc_feature(s, ARM_FEATURE_M)) {
  7532. /* 0b111x_11xx_xxxx_xxxx_xxxx_xxxx_xxxx_xxxx */
  7533. if (extract32(insn, 24, 2) == 3) {
  7534. goto illegal_op; /* op0 = 0b11 : unallocated */
  7535. }
  7536. if (((insn >> 8) & 0xe) == 10 &&
  7537. dc_isar_feature(aa32_fpsp_v2, s)) {
  7538. /* FP, and the CPU supports it */
  7539. goto illegal_op;
  7540. } else {
  7541. /* All other insns: NOCP */
  7542. gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
  7543. syn_uncategorized(),
  7544. default_exception_el(s));
  7545. }
  7546. break;
  7547. }
  7548. if (((insn >> 24) & 3) == 3) {
  7549. /* Neon DP, but failed disas_neon_dp() */
  7550. goto illegal_op;
  7551. } else if (((insn >> 8) & 0xe) == 10) {
  7552. /* VFP, but failed disas_vfp. */
  7553. goto illegal_op;
  7554. } else {
  7555. if (insn & (1 << 28))
  7556. goto illegal_op;
  7557. if (disas_coproc_insn(s, insn)) {
  7558. goto illegal_op;
  7559. }
  7560. }
  7561. break;
  7562. case 12:
  7563. goto illegal_op;
  7564. default:
  7565. illegal_op:
  7566. unallocated_encoding(s);
  7567. }
  7568. }
  7569. static void disas_thumb_insn(DisasContext *s, uint32_t insn)
  7570. {
  7571. if (!disas_t16(s, insn)) {
  7572. unallocated_encoding(s);
  7573. }
  7574. }
  7575. static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
  7576. {
  7577. /* Return true if the insn at dc->base.pc_next might cross a page boundary.
  7578. * (False positives are OK, false negatives are not.)
  7579. * We know this is a Thumb insn, and our caller ensures we are
  7580. * only called if dc->base.pc_next is less than 4 bytes from the page
  7581. * boundary, so we cross the page if the first 16 bits indicate
  7582. * that this is a 32 bit insn.
  7583. */
  7584. uint16_t insn = arm_lduw_code(env, s->base.pc_next, s->sctlr_b);
  7585. return !thumb_insn_is_16bit(s, s->base.pc_next, insn);
  7586. }
  7587. static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
  7588. {
  7589. DisasContext *dc = container_of(dcbase, DisasContext, base);
  7590. CPUARMState *env = cs->env_ptr;
  7591. ARMCPU *cpu = env_archcpu(env);
  7592. uint32_t tb_flags = dc->base.tb->flags;
  7593. uint32_t condexec, core_mmu_idx;
  7594. dc->isar = &cpu->isar;
  7595. dc->condjmp = 0;
  7596. dc->aarch64 = 0;
  7597. /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
  7598. * there is no secure EL1, so we route exceptions to EL3.
  7599. */
  7600. dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
  7601. !arm_el_is_aa64(env, 3);
  7602. dc->thumb = FIELD_EX32(tb_flags, TBFLAG_AM32, THUMB);
  7603. dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
  7604. condexec = FIELD_EX32(tb_flags, TBFLAG_AM32, CONDEXEC);
  7605. dc->condexec_mask = (condexec & 0xf) << 1;
  7606. dc->condexec_cond = condexec >> 4;
  7607. core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
  7608. dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
  7609. dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
  7610. #if !defined(CONFIG_USER_ONLY)
  7611. dc->user = (dc->current_el == 0);
  7612. #endif
  7613. dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
  7614. if (arm_feature(env, ARM_FEATURE_M)) {
  7615. dc->vfp_enabled = 1;
  7616. dc->be_data = MO_TE;
  7617. dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_M32, HANDLER);
  7618. dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
  7619. regime_is_secure(env, dc->mmu_idx);
  7620. dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_M32, STACKCHECK);
  7621. dc->v8m_fpccr_s_wrong =
  7622. FIELD_EX32(tb_flags, TBFLAG_M32, FPCCR_S_WRONG);
  7623. dc->v7m_new_fp_ctxt_needed =
  7624. FIELD_EX32(tb_flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED);
  7625. dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_M32, LSPACT);
  7626. } else {
  7627. dc->be_data =
  7628. FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
  7629. dc->debug_target_el =
  7630. FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
  7631. dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
  7632. dc->hstr_active = FIELD_EX32(tb_flags, TBFLAG_A32, HSTR_ACTIVE);
  7633. dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
  7634. dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
  7635. if (arm_feature(env, ARM_FEATURE_XSCALE)) {
  7636. dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
  7637. } else {
  7638. dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
  7639. dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
  7640. }
  7641. }
  7642. dc->cp_regs = cpu->cp_regs;
  7643. dc->features = env->features;
  7644. /* Single step state. The code-generation logic here is:
  7645. * SS_ACTIVE == 0:
  7646. * generate code with no special handling for single-stepping (except
  7647. * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
  7648. * this happens anyway because those changes are all system register or
  7649. * PSTATE writes).
  7650. * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
  7651. * emit code for one insn
  7652. * emit code to clear PSTATE.SS
  7653. * emit code to generate software step exception for completed step
  7654. * end TB (as usual for having generated an exception)
  7655. * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
  7656. * emit code to generate a software step exception
  7657. * end the TB
  7658. */
  7659. dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
  7660. dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
  7661. dc->is_ldex = false;
  7662. dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
  7663. /* If architectural single step active, limit to 1. */
  7664. if (is_singlestepping(dc)) {
  7665. dc->base.max_insns = 1;
  7666. }
  7667. /* ARM is a fixed-length ISA. Bound the number of insns to execute
  7668. to those left on the page. */
  7669. if (!dc->thumb) {
  7670. int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
  7671. dc->base.max_insns = MIN(dc->base.max_insns, bound);
  7672. }
  7673. cpu_V0 = tcg_temp_new_i64();
  7674. cpu_V1 = tcg_temp_new_i64();
  7675. /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
  7676. cpu_M0 = tcg_temp_new_i64();
  7677. }
  7678. static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
  7679. {
  7680. DisasContext *dc = container_of(dcbase, DisasContext, base);
  7681. /* A note on handling of the condexec (IT) bits:
  7682. *
  7683. * We want to avoid the overhead of having to write the updated condexec
  7684. * bits back to the CPUARMState for every instruction in an IT block. So:
  7685. * (1) if the condexec bits are not already zero then we write
  7686. * zero back into the CPUARMState now. This avoids complications trying
  7687. * to do it at the end of the block. (For example if we don't do this
  7688. * it's hard to identify whether we can safely skip writing condexec
  7689. * at the end of the TB, which we definitely want to do for the case
  7690. * where a TB doesn't do anything with the IT state at all.)
  7691. * (2) if we are going to leave the TB then we call gen_set_condexec()
  7692. * which will write the correct value into CPUARMState if zero is wrong.
  7693. * This is done both for leaving the TB at the end, and for leaving
  7694. * it because of an exception we know will happen, which is done in
  7695. * gen_exception_insn(). The latter is necessary because we need to
  7696. * leave the TB with the PC/IT state just prior to execution of the
  7697. * instruction which caused the exception.
  7698. * (3) if we leave the TB unexpectedly (eg a data abort on a load)
  7699. * then the CPUARMState will be wrong and we need to reset it.
  7700. * This is handled in the same way as restoration of the
  7701. * PC in these situations; we save the value of the condexec bits
  7702. * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
  7703. * then uses this to restore them after an exception.
  7704. *
  7705. * Note that there are no instructions which can read the condexec
  7706. * bits, and none which can write non-static values to them, so
  7707. * we don't need to care about whether CPUARMState is correct in the
  7708. * middle of a TB.
  7709. */
  7710. /* Reset the conditional execution bits immediately. This avoids
  7711. complications trying to do it at the end of the block. */
  7712. if (dc->condexec_mask || dc->condexec_cond) {
  7713. TCGv_i32 tmp = tcg_temp_new_i32();
  7714. tcg_gen_movi_i32(tmp, 0);
  7715. store_cpu_field(tmp, condexec_bits);
  7716. }
  7717. }
  7718. static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
  7719. {
  7720. DisasContext *dc = container_of(dcbase, DisasContext, base);
  7721. tcg_gen_insn_start(dc->base.pc_next,
  7722. (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
  7723. 0);
  7724. dc->insn_start = tcg_last_op();
  7725. }
  7726. static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
  7727. const CPUBreakpoint *bp)
  7728. {
  7729. DisasContext *dc = container_of(dcbase, DisasContext, base);
  7730. if (bp->flags & BP_CPU) {
  7731. gen_set_condexec(dc);
  7732. gen_set_pc_im(dc, dc->base.pc_next);
  7733. gen_helper_check_breakpoints(cpu_env);
  7734. /* End the TB early; it's likely not going to be executed */
  7735. dc->base.is_jmp = DISAS_TOO_MANY;
  7736. } else {
  7737. gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
  7738. /* The address covered by the breakpoint must be
  7739. included in [tb->pc, tb->pc + tb->size) in order
  7740. to for it to be properly cleared -- thus we
  7741. increment the PC here so that the logic setting
  7742. tb->size below does the right thing. */
  7743. /* TODO: Advance PC by correct instruction length to
  7744. * avoid disassembler error messages */
  7745. dc->base.pc_next += 2;
  7746. dc->base.is_jmp = DISAS_NORETURN;
  7747. }
  7748. return true;
  7749. }
  7750. static bool arm_pre_translate_insn(DisasContext *dc)
  7751. {
  7752. #ifdef CONFIG_USER_ONLY
  7753. /* Intercept jump to the magic kernel page. */
  7754. if (dc->base.pc_next >= 0xffff0000) {
  7755. /* We always get here via a jump, so know we are not in a
  7756. conditional execution block. */
  7757. gen_exception_internal(EXCP_KERNEL_TRAP);
  7758. dc->base.is_jmp = DISAS_NORETURN;
  7759. return true;
  7760. }
  7761. #endif
  7762. if (dc->ss_active && !dc->pstate_ss) {
  7763. /* Singlestep state is Active-pending.
  7764. * If we're in this state at the start of a TB then either
  7765. * a) we just took an exception to an EL which is being debugged
  7766. * and this is the first insn in the exception handler
  7767. * b) debug exceptions were masked and we just unmasked them
  7768. * without changing EL (eg by clearing PSTATE.D)
  7769. * In either case we're going to take a swstep exception in the
  7770. * "did not step an insn" case, and so the syndrome ISV and EX
  7771. * bits should be zero.
  7772. */
  7773. assert(dc->base.num_insns == 1);
  7774. gen_swstep_exception(dc, 0, 0);
  7775. dc->base.is_jmp = DISAS_NORETURN;
  7776. return true;
  7777. }
  7778. return false;
  7779. }
  7780. static void arm_post_translate_insn(DisasContext *dc)
  7781. {
  7782. if (dc->condjmp && !dc->base.is_jmp) {
  7783. gen_set_label(dc->condlabel);
  7784. dc->condjmp = 0;
  7785. }
  7786. translator_loop_temp_check(&dc->base);
  7787. }
  7788. static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
  7789. {
  7790. DisasContext *dc = container_of(dcbase, DisasContext, base);
  7791. CPUARMState *env = cpu->env_ptr;
  7792. unsigned int insn;
  7793. if (arm_pre_translate_insn(dc)) {
  7794. return;
  7795. }
  7796. dc->pc_curr = dc->base.pc_next;
  7797. insn = arm_ldl_code(env, dc->base.pc_next, dc->sctlr_b);
  7798. dc->insn = insn;
  7799. dc->base.pc_next += 4;
  7800. disas_arm_insn(dc, insn);
  7801. arm_post_translate_insn(dc);
  7802. /* ARM is a fixed-length ISA. We performed the cross-page check
  7803. in init_disas_context by adjusting max_insns. */
  7804. }
  7805. static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
  7806. {
  7807. /* Return true if this Thumb insn is always unconditional,
  7808. * even inside an IT block. This is true of only a very few
  7809. * instructions: BKPT, HLT, and SG.
  7810. *
  7811. * A larger class of instructions are UNPREDICTABLE if used
  7812. * inside an IT block; we do not need to detect those here, because
  7813. * what we do by default (perform the cc check and update the IT
  7814. * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
  7815. * choice for those situations.
  7816. *
  7817. * insn is either a 16-bit or a 32-bit instruction; the two are
  7818. * distinguishable because for the 16-bit case the top 16 bits
  7819. * are zeroes, and that isn't a valid 32-bit encoding.
  7820. */
  7821. if ((insn & 0xffffff00) == 0xbe00) {
  7822. /* BKPT */
  7823. return true;
  7824. }
  7825. if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
  7826. !arm_dc_feature(s, ARM_FEATURE_M)) {
  7827. /* HLT: v8A only. This is unconditional even when it is going to
  7828. * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
  7829. * For v7 cores this was a plain old undefined encoding and so
  7830. * honours its cc check. (We might be using the encoding as
  7831. * a semihosting trap, but we don't change the cc check behaviour
  7832. * on that account, because a debugger connected to a real v7A
  7833. * core and emulating semihosting traps by catching the UNDEF
  7834. * exception would also only see cases where the cc check passed.
  7835. * No guest code should be trying to do a HLT semihosting trap
  7836. * in an IT block anyway.
  7837. */
  7838. return true;
  7839. }
  7840. if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
  7841. arm_dc_feature(s, ARM_FEATURE_M)) {
  7842. /* SG: v8M only */
  7843. return true;
  7844. }
  7845. return false;
  7846. }
  7847. static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
  7848. {
  7849. DisasContext *dc = container_of(dcbase, DisasContext, base);
  7850. CPUARMState *env = cpu->env_ptr;
  7851. uint32_t insn;
  7852. bool is_16bit;
  7853. if (arm_pre_translate_insn(dc)) {
  7854. return;
  7855. }
  7856. dc->pc_curr = dc->base.pc_next;
  7857. insn = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
  7858. is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn);
  7859. dc->base.pc_next += 2;
  7860. if (!is_16bit) {
  7861. uint32_t insn2 = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
  7862. insn = insn << 16 | insn2;
  7863. dc->base.pc_next += 2;
  7864. }
  7865. dc->insn = insn;
  7866. if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
  7867. uint32_t cond = dc->condexec_cond;
  7868. /*
  7869. * Conditionally skip the insn. Note that both 0xe and 0xf mean
  7870. * "always"; 0xf is not "never".
  7871. */
  7872. if (cond < 0x0e) {
  7873. arm_skip_unless(dc, cond);
  7874. }
  7875. }
  7876. if (is_16bit) {
  7877. disas_thumb_insn(dc, insn);
  7878. } else {
  7879. disas_thumb2_insn(dc, insn);
  7880. }
  7881. /* Advance the Thumb condexec condition. */
  7882. if (dc->condexec_mask) {
  7883. dc->condexec_cond = ((dc->condexec_cond & 0xe) |
  7884. ((dc->condexec_mask >> 4) & 1));
  7885. dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
  7886. if (dc->condexec_mask == 0) {
  7887. dc->condexec_cond = 0;
  7888. }
  7889. }
  7890. arm_post_translate_insn(dc);
  7891. /* Thumb is a variable-length ISA. Stop translation when the next insn
  7892. * will touch a new page. This ensures that prefetch aborts occur at
  7893. * the right place.
  7894. *
  7895. * We want to stop the TB if the next insn starts in a new page,
  7896. * or if it spans between this page and the next. This means that
  7897. * if we're looking at the last halfword in the page we need to
  7898. * see if it's a 16-bit Thumb insn (which will fit in this TB)
  7899. * or a 32-bit Thumb insn (which won't).
  7900. * This is to avoid generating a silly TB with a single 16-bit insn
  7901. * in it at the end of this page (which would execute correctly
  7902. * but isn't very efficient).
  7903. */
  7904. if (dc->base.is_jmp == DISAS_NEXT
  7905. && (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE
  7906. || (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE - 3
  7907. && insn_crosses_page(env, dc)))) {
  7908. dc->base.is_jmp = DISAS_TOO_MANY;
  7909. }
  7910. }
  7911. static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
  7912. {
  7913. DisasContext *dc = container_of(dcbase, DisasContext, base);
  7914. if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
  7915. /* FIXME: This can theoretically happen with self-modifying code. */
  7916. cpu_abort(cpu, "IO on conditional branch instruction");
  7917. }
  7918. /* At this stage dc->condjmp will only be set when the skipped
  7919. instruction was a conditional branch or trap, and the PC has
  7920. already been written. */
  7921. gen_set_condexec(dc);
  7922. if (dc->base.is_jmp == DISAS_BX_EXCRET) {
  7923. /* Exception return branches need some special case code at the
  7924. * end of the TB, which is complex enough that it has to
  7925. * handle the single-step vs not and the condition-failed
  7926. * insn codepath itself.
  7927. */
  7928. gen_bx_excret_final_code(dc);
  7929. } else if (unlikely(is_singlestepping(dc))) {
  7930. /* Unconditional and "condition passed" instruction codepath. */
  7931. switch (dc->base.is_jmp) {
  7932. case DISAS_SWI:
  7933. gen_ss_advance(dc);
  7934. gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
  7935. default_exception_el(dc));
  7936. break;
  7937. case DISAS_HVC:
  7938. gen_ss_advance(dc);
  7939. gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
  7940. break;
  7941. case DISAS_SMC:
  7942. gen_ss_advance(dc);
  7943. gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
  7944. break;
  7945. case DISAS_NEXT:
  7946. case DISAS_TOO_MANY:
  7947. case DISAS_UPDATE_EXIT:
  7948. case DISAS_UPDATE_NOCHAIN:
  7949. gen_set_pc_im(dc, dc->base.pc_next);
  7950. /* fall through */
  7951. default:
  7952. /* FIXME: Single stepping a WFI insn will not halt the CPU. */
  7953. gen_singlestep_exception(dc);
  7954. break;
  7955. case DISAS_NORETURN:
  7956. break;
  7957. }
  7958. } else {
  7959. /* While branches must always occur at the end of an IT block,
  7960. there are a few other things that can cause us to terminate
  7961. the TB in the middle of an IT block:
  7962. - Exception generating instructions (bkpt, swi, undefined).
  7963. - Page boundaries.
  7964. - Hardware watchpoints.
  7965. Hardware breakpoints have already been handled and skip this code.
  7966. */
  7967. switch(dc->base.is_jmp) {
  7968. case DISAS_NEXT:
  7969. case DISAS_TOO_MANY:
  7970. gen_goto_tb(dc, 1, dc->base.pc_next);
  7971. break;
  7972. case DISAS_UPDATE_NOCHAIN:
  7973. gen_set_pc_im(dc, dc->base.pc_next);
  7974. /* fall through */
  7975. case DISAS_JUMP:
  7976. gen_goto_ptr();
  7977. break;
  7978. case DISAS_UPDATE_EXIT:
  7979. gen_set_pc_im(dc, dc->base.pc_next);
  7980. /* fall through */
  7981. default:
  7982. /* indicate that the hash table must be used to find the next TB */
  7983. tcg_gen_exit_tb(NULL, 0);
  7984. break;
  7985. case DISAS_NORETURN:
  7986. /* nothing more to generate */
  7987. break;
  7988. case DISAS_WFI:
  7989. {
  7990. TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
  7991. !(dc->insn & (1U << 31))) ? 2 : 4);
  7992. gen_helper_wfi(cpu_env, tmp);
  7993. tcg_temp_free_i32(tmp);
  7994. /* The helper doesn't necessarily throw an exception, but we
  7995. * must go back to the main loop to check for interrupts anyway.
  7996. */
  7997. tcg_gen_exit_tb(NULL, 0);
  7998. break;
  7999. }
  8000. case DISAS_WFE:
  8001. gen_helper_wfe(cpu_env);
  8002. break;
  8003. case DISAS_YIELD:
  8004. gen_helper_yield(cpu_env);
  8005. break;
  8006. case DISAS_SWI:
  8007. gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
  8008. default_exception_el(dc));
  8009. break;
  8010. case DISAS_HVC:
  8011. gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
  8012. break;
  8013. case DISAS_SMC:
  8014. gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
  8015. break;
  8016. }
  8017. }
  8018. if (dc->condjmp) {
  8019. /* "Condition failed" instruction codepath for the branch/trap insn */
  8020. gen_set_label(dc->condlabel);
  8021. gen_set_condexec(dc);
  8022. if (unlikely(is_singlestepping(dc))) {
  8023. gen_set_pc_im(dc, dc->base.pc_next);
  8024. gen_singlestep_exception(dc);
  8025. } else {
  8026. gen_goto_tb(dc, 1, dc->base.pc_next);
  8027. }
  8028. }
  8029. }
  8030. static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
  8031. {
  8032. DisasContext *dc = container_of(dcbase, DisasContext, base);
  8033. qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
  8034. log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
  8035. }
  8036. static const TranslatorOps arm_translator_ops = {
  8037. .init_disas_context = arm_tr_init_disas_context,
  8038. .tb_start = arm_tr_tb_start,
  8039. .insn_start = arm_tr_insn_start,
  8040. .breakpoint_check = arm_tr_breakpoint_check,
  8041. .translate_insn = arm_tr_translate_insn,
  8042. .tb_stop = arm_tr_tb_stop,
  8043. .disas_log = arm_tr_disas_log,
  8044. };
  8045. static const TranslatorOps thumb_translator_ops = {
  8046. .init_disas_context = arm_tr_init_disas_context,
  8047. .tb_start = arm_tr_tb_start,
  8048. .insn_start = arm_tr_insn_start,
  8049. .breakpoint_check = arm_tr_breakpoint_check,
  8050. .translate_insn = thumb_tr_translate_insn,
  8051. .tb_stop = arm_tr_tb_stop,
  8052. .disas_log = arm_tr_disas_log,
  8053. };
  8054. /* generate intermediate code for basic block 'tb'. */
  8055. void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
  8056. {
  8057. DisasContext dc = { };
  8058. const TranslatorOps *ops = &arm_translator_ops;
  8059. if (FIELD_EX32(tb->flags, TBFLAG_AM32, THUMB)) {
  8060. ops = &thumb_translator_ops;
  8061. }
  8062. #ifdef TARGET_AARCH64
  8063. if (FIELD_EX32(tb->flags, TBFLAG_ANY, AARCH64_STATE)) {
  8064. ops = &aarch64_translator_ops;
  8065. }
  8066. #endif
  8067. translator_loop(ops, &dc.base, cpu, tb, max_insns);
  8068. }
  8069. void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
  8070. target_ulong *data)
  8071. {
  8072. if (is_a64(env)) {
  8073. env->pc = data[0];
  8074. env->condexec_bits = 0;
  8075. env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
  8076. } else {
  8077. env->regs[15] = data[0];
  8078. env->condexec_bits = data[1];
  8079. env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
  8080. }
  8081. }