12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884 |
- /*
- * Tiny Code Generator for QEMU
- *
- * Copyright (c) 2008 Fabrice Bellard
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
- #include "elf.h"
- #include "../tcg-pool.c.inc"
- #if defined _CALL_DARWIN || defined __APPLE__
- #define TCG_TARGET_CALL_DARWIN
- #endif
- #ifdef _CALL_SYSV
- # define TCG_TARGET_CALL_ALIGN_ARGS 1
- #endif
- /* For some memory operations, we need a scratch that isn't R0. For the AIX
- calling convention, we can re-use the TOC register since we'll be reloading
- it at every call. Otherwise R12 will do nicely as neither a call-saved
- register nor a parameter register. */
- #ifdef _CALL_AIX
- # define TCG_REG_TMP1 TCG_REG_R2
- #else
- # define TCG_REG_TMP1 TCG_REG_R12
- #endif
- #define TCG_VEC_TMP1 TCG_REG_V0
- #define TCG_VEC_TMP2 TCG_REG_V1
- #define TCG_REG_TB TCG_REG_R31
- #define USE_REG_TB (TCG_TARGET_REG_BITS == 64)
- /* Shorthand for size of a pointer. Avoid promotion to unsigned. */
- #define SZP ((int)sizeof(void *))
- /* Shorthand for size of a register. */
- #define SZR (TCG_TARGET_REG_BITS / 8)
- #define TCG_CT_CONST_S16 0x100
- #define TCG_CT_CONST_U16 0x200
- #define TCG_CT_CONST_S32 0x400
- #define TCG_CT_CONST_U32 0x800
- #define TCG_CT_CONST_ZERO 0x1000
- #define TCG_CT_CONST_MONE 0x2000
- #define TCG_CT_CONST_WSZ 0x4000
- static tcg_insn_unit *tb_ret_addr;
- TCGPowerISA have_isa;
- static bool have_isel;
- bool have_altivec;
- bool have_vsx;
- #ifndef CONFIG_SOFTMMU
- #define TCG_GUEST_BASE_REG 30
- #endif
- #ifdef CONFIG_DEBUG_TCG
- static const char tcg_target_reg_names[TCG_TARGET_NB_REGS][4] = {
- "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
- "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
- "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
- "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
- "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
- "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
- "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
- "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
- };
- #endif
- static const int tcg_target_reg_alloc_order[] = {
- TCG_REG_R14, /* call saved registers */
- TCG_REG_R15,
- TCG_REG_R16,
- TCG_REG_R17,
- TCG_REG_R18,
- TCG_REG_R19,
- TCG_REG_R20,
- TCG_REG_R21,
- TCG_REG_R22,
- TCG_REG_R23,
- TCG_REG_R24,
- TCG_REG_R25,
- TCG_REG_R26,
- TCG_REG_R27,
- TCG_REG_R28,
- TCG_REG_R29,
- TCG_REG_R30,
- TCG_REG_R31,
- TCG_REG_R12, /* call clobbered, non-arguments */
- TCG_REG_R11,
- TCG_REG_R2,
- TCG_REG_R13,
- TCG_REG_R10, /* call clobbered, arguments */
- TCG_REG_R9,
- TCG_REG_R8,
- TCG_REG_R7,
- TCG_REG_R6,
- TCG_REG_R5,
- TCG_REG_R4,
- TCG_REG_R3,
- /* V0 and V1 reserved as temporaries; V20 - V31 are call-saved */
- TCG_REG_V2, /* call clobbered, vectors */
- TCG_REG_V3,
- TCG_REG_V4,
- TCG_REG_V5,
- TCG_REG_V6,
- TCG_REG_V7,
- TCG_REG_V8,
- TCG_REG_V9,
- TCG_REG_V10,
- TCG_REG_V11,
- TCG_REG_V12,
- TCG_REG_V13,
- TCG_REG_V14,
- TCG_REG_V15,
- TCG_REG_V16,
- TCG_REG_V17,
- TCG_REG_V18,
- TCG_REG_V19,
- };
- static const int tcg_target_call_iarg_regs[] = {
- TCG_REG_R3,
- TCG_REG_R4,
- TCG_REG_R5,
- TCG_REG_R6,
- TCG_REG_R7,
- TCG_REG_R8,
- TCG_REG_R9,
- TCG_REG_R10
- };
- static const int tcg_target_call_oarg_regs[] = {
- TCG_REG_R3,
- TCG_REG_R4
- };
- static const int tcg_target_callee_save_regs[] = {
- #ifdef TCG_TARGET_CALL_DARWIN
- TCG_REG_R11,
- #endif
- TCG_REG_R14,
- TCG_REG_R15,
- TCG_REG_R16,
- TCG_REG_R17,
- TCG_REG_R18,
- TCG_REG_R19,
- TCG_REG_R20,
- TCG_REG_R21,
- TCG_REG_R22,
- TCG_REG_R23,
- TCG_REG_R24,
- TCG_REG_R25,
- TCG_REG_R26,
- TCG_REG_R27, /* currently used for the global env */
- TCG_REG_R28,
- TCG_REG_R29,
- TCG_REG_R30,
- TCG_REG_R31
- };
- static inline bool in_range_b(tcg_target_long target)
- {
- return target == sextract64(target, 0, 26);
- }
- static uint32_t reloc_pc24_val(tcg_insn_unit *pc, tcg_insn_unit *target)
- {
- ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
- tcg_debug_assert(in_range_b(disp));
- return disp & 0x3fffffc;
- }
- static bool reloc_pc24(tcg_insn_unit *pc, tcg_insn_unit *target)
- {
- ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
- if (in_range_b(disp)) {
- *pc = (*pc & ~0x3fffffc) | (disp & 0x3fffffc);
- return true;
- }
- return false;
- }
- static uint16_t reloc_pc14_val(tcg_insn_unit *pc, tcg_insn_unit *target)
- {
- ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
- tcg_debug_assert(disp == (int16_t) disp);
- return disp & 0xfffc;
- }
- static bool reloc_pc14(tcg_insn_unit *pc, tcg_insn_unit *target)
- {
- ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
- if (disp == (int16_t) disp) {
- *pc = (*pc & ~0xfffc) | (disp & 0xfffc);
- return true;
- }
- return false;
- }
- /* parse target specific constraints */
- static const char *target_parse_constraint(TCGArgConstraint *ct,
- const char *ct_str, TCGType type)
- {
- switch (*ct_str++) {
- case 'A': case 'B': case 'C': case 'D':
- ct->ct |= TCG_CT_REG;
- tcg_regset_set_reg(ct->u.regs, 3 + ct_str[0] - 'A');
- break;
- case 'r':
- ct->ct |= TCG_CT_REG;
- ct->u.regs = 0xffffffff;
- break;
- case 'v':
- ct->ct |= TCG_CT_REG;
- ct->u.regs = 0xffffffff00000000ull;
- break;
- case 'L': /* qemu_ld constraint */
- ct->ct |= TCG_CT_REG;
- ct->u.regs = 0xffffffff;
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
- #ifdef CONFIG_SOFTMMU
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
- #endif
- break;
- case 'S': /* qemu_st constraint */
- ct->ct |= TCG_CT_REG;
- ct->u.regs = 0xffffffff;
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
- #ifdef CONFIG_SOFTMMU
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6);
- #endif
- break;
- case 'I':
- ct->ct |= TCG_CT_CONST_S16;
- break;
- case 'J':
- ct->ct |= TCG_CT_CONST_U16;
- break;
- case 'M':
- ct->ct |= TCG_CT_CONST_MONE;
- break;
- case 'T':
- ct->ct |= TCG_CT_CONST_S32;
- break;
- case 'U':
- ct->ct |= TCG_CT_CONST_U32;
- break;
- case 'W':
- ct->ct |= TCG_CT_CONST_WSZ;
- break;
- case 'Z':
- ct->ct |= TCG_CT_CONST_ZERO;
- break;
- default:
- return NULL;
- }
- return ct_str;
- }
- /* test if a constant matches the constraint */
- static int tcg_target_const_match(tcg_target_long val, TCGType type,
- const TCGArgConstraint *arg_ct)
- {
- int ct = arg_ct->ct;
- if (ct & TCG_CT_CONST) {
- return 1;
- }
- /* The only 32-bit constraint we use aside from
- TCG_CT_CONST is TCG_CT_CONST_S16. */
- if (type == TCG_TYPE_I32) {
- val = (int32_t)val;
- }
- if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
- return 1;
- } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) {
- return 1;
- } else if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
- return 1;
- } else if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
- return 1;
- } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
- return 1;
- } else if ((ct & TCG_CT_CONST_MONE) && val == -1) {
- return 1;
- } else if ((ct & TCG_CT_CONST_WSZ)
- && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
- return 1;
- }
- return 0;
- }
- #define OPCD(opc) ((opc)<<26)
- #define XO19(opc) (OPCD(19)|((opc)<<1))
- #define MD30(opc) (OPCD(30)|((opc)<<2))
- #define MDS30(opc) (OPCD(30)|((opc)<<1))
- #define XO31(opc) (OPCD(31)|((opc)<<1))
- #define XO58(opc) (OPCD(58)|(opc))
- #define XO62(opc) (OPCD(62)|(opc))
- #define VX4(opc) (OPCD(4)|(opc))
- #define B OPCD( 18)
- #define BC OPCD( 16)
- #define LBZ OPCD( 34)
- #define LHZ OPCD( 40)
- #define LHA OPCD( 42)
- #define LWZ OPCD( 32)
- #define LWZUX XO31( 55)
- #define STB OPCD( 38)
- #define STH OPCD( 44)
- #define STW OPCD( 36)
- #define STD XO62( 0)
- #define STDU XO62( 1)
- #define STDX XO31(149)
- #define LD XO58( 0)
- #define LDX XO31( 21)
- #define LDU XO58( 1)
- #define LDUX XO31( 53)
- #define LWA XO58( 2)
- #define LWAX XO31(341)
- #define ADDIC OPCD( 12)
- #define ADDI OPCD( 14)
- #define ADDIS OPCD( 15)
- #define ORI OPCD( 24)
- #define ORIS OPCD( 25)
- #define XORI OPCD( 26)
- #define XORIS OPCD( 27)
- #define ANDI OPCD( 28)
- #define ANDIS OPCD( 29)
- #define MULLI OPCD( 7)
- #define CMPLI OPCD( 10)
- #define CMPI OPCD( 11)
- #define SUBFIC OPCD( 8)
- #define LWZU OPCD( 33)
- #define STWU OPCD( 37)
- #define RLWIMI OPCD( 20)
- #define RLWINM OPCD( 21)
- #define RLWNM OPCD( 23)
- #define RLDICL MD30( 0)
- #define RLDICR MD30( 1)
- #define RLDIMI MD30( 3)
- #define RLDCL MDS30( 8)
- #define BCLR XO19( 16)
- #define BCCTR XO19(528)
- #define CRAND XO19(257)
- #define CRANDC XO19(129)
- #define CRNAND XO19(225)
- #define CROR XO19(449)
- #define CRNOR XO19( 33)
- #define EXTSB XO31(954)
- #define EXTSH XO31(922)
- #define EXTSW XO31(986)
- #define ADD XO31(266)
- #define ADDE XO31(138)
- #define ADDME XO31(234)
- #define ADDZE XO31(202)
- #define ADDC XO31( 10)
- #define AND XO31( 28)
- #define SUBF XO31( 40)
- #define SUBFC XO31( 8)
- #define SUBFE XO31(136)
- #define SUBFME XO31(232)
- #define SUBFZE XO31(200)
- #define OR XO31(444)
- #define XOR XO31(316)
- #define MULLW XO31(235)
- #define MULHW XO31( 75)
- #define MULHWU XO31( 11)
- #define DIVW XO31(491)
- #define DIVWU XO31(459)
- #define CMP XO31( 0)
- #define CMPL XO31( 32)
- #define LHBRX XO31(790)
- #define LWBRX XO31(534)
- #define LDBRX XO31(532)
- #define STHBRX XO31(918)
- #define STWBRX XO31(662)
- #define STDBRX XO31(660)
- #define MFSPR XO31(339)
- #define MTSPR XO31(467)
- #define SRAWI XO31(824)
- #define NEG XO31(104)
- #define MFCR XO31( 19)
- #define MFOCRF (MFCR | (1u << 20))
- #define NOR XO31(124)
- #define CNTLZW XO31( 26)
- #define CNTLZD XO31( 58)
- #define CNTTZW XO31(538)
- #define CNTTZD XO31(570)
- #define CNTPOPW XO31(378)
- #define CNTPOPD XO31(506)
- #define ANDC XO31( 60)
- #define ORC XO31(412)
- #define EQV XO31(284)
- #define NAND XO31(476)
- #define ISEL XO31( 15)
- #define MULLD XO31(233)
- #define MULHD XO31( 73)
- #define MULHDU XO31( 9)
- #define DIVD XO31(489)
- #define DIVDU XO31(457)
- #define LBZX XO31( 87)
- #define LHZX XO31(279)
- #define LHAX XO31(343)
- #define LWZX XO31( 23)
- #define STBX XO31(215)
- #define STHX XO31(407)
- #define STWX XO31(151)
- #define EIEIO XO31(854)
- #define HWSYNC XO31(598)
- #define LWSYNC (HWSYNC | (1u << 21))
- #define SPR(a, b) ((((a)<<5)|(b))<<11)
- #define LR SPR(8, 0)
- #define CTR SPR(9, 0)
- #define SLW XO31( 24)
- #define SRW XO31(536)
- #define SRAW XO31(792)
- #define SLD XO31( 27)
- #define SRD XO31(539)
- #define SRAD XO31(794)
- #define SRADI XO31(413<<1)
- #define TW XO31( 4)
- #define TRAP (TW | TO(31))
- #define NOP ORI /* ori 0,0,0 */
- #define LVX XO31(103)
- #define LVEBX XO31(7)
- #define LVEHX XO31(39)
- #define LVEWX XO31(71)
- #define LXSDX (XO31(588) | 1) /* v2.06, force tx=1 */
- #define LXVDSX (XO31(332) | 1) /* v2.06, force tx=1 */
- #define LXSIWZX (XO31(12) | 1) /* v2.07, force tx=1 */
- #define LXV (OPCD(61) | 8 | 1) /* v3.00, force tx=1 */
- #define LXSD (OPCD(57) | 2) /* v3.00 */
- #define LXVWSX (XO31(364) | 1) /* v3.00, force tx=1 */
- #define STVX XO31(231)
- #define STVEWX XO31(199)
- #define STXSDX (XO31(716) | 1) /* v2.06, force sx=1 */
- #define STXSIWX (XO31(140) | 1) /* v2.07, force sx=1 */
- #define STXV (OPCD(61) | 8 | 5) /* v3.00, force sx=1 */
- #define STXSD (OPCD(61) | 2) /* v3.00 */
- #define VADDSBS VX4(768)
- #define VADDUBS VX4(512)
- #define VADDUBM VX4(0)
- #define VADDSHS VX4(832)
- #define VADDUHS VX4(576)
- #define VADDUHM VX4(64)
- #define VADDSWS VX4(896)
- #define VADDUWS VX4(640)
- #define VADDUWM VX4(128)
- #define VADDUDM VX4(192) /* v2.07 */
- #define VSUBSBS VX4(1792)
- #define VSUBUBS VX4(1536)
- #define VSUBUBM VX4(1024)
- #define VSUBSHS VX4(1856)
- #define VSUBUHS VX4(1600)
- #define VSUBUHM VX4(1088)
- #define VSUBSWS VX4(1920)
- #define VSUBUWS VX4(1664)
- #define VSUBUWM VX4(1152)
- #define VSUBUDM VX4(1216) /* v2.07 */
- #define VNEGW (VX4(1538) | (6 << 16)) /* v3.00 */
- #define VNEGD (VX4(1538) | (7 << 16)) /* v3.00 */
- #define VMAXSB VX4(258)
- #define VMAXSH VX4(322)
- #define VMAXSW VX4(386)
- #define VMAXSD VX4(450) /* v2.07 */
- #define VMAXUB VX4(2)
- #define VMAXUH VX4(66)
- #define VMAXUW VX4(130)
- #define VMAXUD VX4(194) /* v2.07 */
- #define VMINSB VX4(770)
- #define VMINSH VX4(834)
- #define VMINSW VX4(898)
- #define VMINSD VX4(962) /* v2.07 */
- #define VMINUB VX4(514)
- #define VMINUH VX4(578)
- #define VMINUW VX4(642)
- #define VMINUD VX4(706) /* v2.07 */
- #define VCMPEQUB VX4(6)
- #define VCMPEQUH VX4(70)
- #define VCMPEQUW VX4(134)
- #define VCMPEQUD VX4(199) /* v2.07 */
- #define VCMPGTSB VX4(774)
- #define VCMPGTSH VX4(838)
- #define VCMPGTSW VX4(902)
- #define VCMPGTSD VX4(967) /* v2.07 */
- #define VCMPGTUB VX4(518)
- #define VCMPGTUH VX4(582)
- #define VCMPGTUW VX4(646)
- #define VCMPGTUD VX4(711) /* v2.07 */
- #define VCMPNEB VX4(7) /* v3.00 */
- #define VCMPNEH VX4(71) /* v3.00 */
- #define VCMPNEW VX4(135) /* v3.00 */
- #define VSLB VX4(260)
- #define VSLH VX4(324)
- #define VSLW VX4(388)
- #define VSLD VX4(1476) /* v2.07 */
- #define VSRB VX4(516)
- #define VSRH VX4(580)
- #define VSRW VX4(644)
- #define VSRD VX4(1732) /* v2.07 */
- #define VSRAB VX4(772)
- #define VSRAH VX4(836)
- #define VSRAW VX4(900)
- #define VSRAD VX4(964) /* v2.07 */
- #define VRLB VX4(4)
- #define VRLH VX4(68)
- #define VRLW VX4(132)
- #define VRLD VX4(196) /* v2.07 */
- #define VMULEUB VX4(520)
- #define VMULEUH VX4(584)
- #define VMULEUW VX4(648) /* v2.07 */
- #define VMULOUB VX4(8)
- #define VMULOUH VX4(72)
- #define VMULOUW VX4(136) /* v2.07 */
- #define VMULUWM VX4(137) /* v2.07 */
- #define VMSUMUHM VX4(38)
- #define VMRGHB VX4(12)
- #define VMRGHH VX4(76)
- #define VMRGHW VX4(140)
- #define VMRGLB VX4(268)
- #define VMRGLH VX4(332)
- #define VMRGLW VX4(396)
- #define VPKUHUM VX4(14)
- #define VPKUWUM VX4(78)
- #define VAND VX4(1028)
- #define VANDC VX4(1092)
- #define VNOR VX4(1284)
- #define VOR VX4(1156)
- #define VXOR VX4(1220)
- #define VEQV VX4(1668) /* v2.07 */
- #define VNAND VX4(1412) /* v2.07 */
- #define VORC VX4(1348) /* v2.07 */
- #define VSPLTB VX4(524)
- #define VSPLTH VX4(588)
- #define VSPLTW VX4(652)
- #define VSPLTISB VX4(780)
- #define VSPLTISH VX4(844)
- #define VSPLTISW VX4(908)
- #define VSLDOI VX4(44)
- #define XXPERMDI (OPCD(60) | (10 << 3) | 7) /* v2.06, force ax=bx=tx=1 */
- #define XXSEL (OPCD(60) | (3 << 4) | 0xf) /* v2.06, force ax=bx=cx=tx=1 */
- #define XXSPLTIB (OPCD(60) | (360 << 1) | 1) /* v3.00, force tx=1 */
- #define MFVSRD (XO31(51) | 1) /* v2.07, force sx=1 */
- #define MFVSRWZ (XO31(115) | 1) /* v2.07, force sx=1 */
- #define MTVSRD (XO31(179) | 1) /* v2.07, force tx=1 */
- #define MTVSRWZ (XO31(243) | 1) /* v2.07, force tx=1 */
- #define MTVSRDD (XO31(435) | 1) /* v3.00, force tx=1 */
- #define MTVSRWS (XO31(403) | 1) /* v3.00, force tx=1 */
- #define RT(r) ((r)<<21)
- #define RS(r) ((r)<<21)
- #define RA(r) ((r)<<16)
- #define RB(r) ((r)<<11)
- #define TO(t) ((t)<<21)
- #define SH(s) ((s)<<11)
- #define MB(b) ((b)<<6)
- #define ME(e) ((e)<<1)
- #define BO(o) ((o)<<21)
- #define MB64(b) ((b)<<5)
- #define FXM(b) (1 << (19 - (b)))
- #define VRT(r) (((r) & 31) << 21)
- #define VRA(r) (((r) & 31) << 16)
- #define VRB(r) (((r) & 31) << 11)
- #define VRC(r) (((r) & 31) << 6)
- #define LK 1
- #define TAB(t, a, b) (RT(t) | RA(a) | RB(b))
- #define SAB(s, a, b) (RS(s) | RA(a) | RB(b))
- #define TAI(s, a, i) (RT(s) | RA(a) | ((i) & 0xffff))
- #define SAI(s, a, i) (RS(s) | RA(a) | ((i) & 0xffff))
- #define BF(n) ((n)<<23)
- #define BI(n, c) (((c)+((n)*4))<<16)
- #define BT(n, c) (((c)+((n)*4))<<21)
- #define BA(n, c) (((c)+((n)*4))<<16)
- #define BB(n, c) (((c)+((n)*4))<<11)
- #define BC_(n, c) (((c)+((n)*4))<<6)
- #define BO_COND_TRUE BO(12)
- #define BO_COND_FALSE BO( 4)
- #define BO_ALWAYS BO(20)
- enum {
- CR_LT,
- CR_GT,
- CR_EQ,
- CR_SO
- };
- static const uint32_t tcg_to_bc[] = {
- [TCG_COND_EQ] = BC | BI(7, CR_EQ) | BO_COND_TRUE,
- [TCG_COND_NE] = BC | BI(7, CR_EQ) | BO_COND_FALSE,
- [TCG_COND_LT] = BC | BI(7, CR_LT) | BO_COND_TRUE,
- [TCG_COND_GE] = BC | BI(7, CR_LT) | BO_COND_FALSE,
- [TCG_COND_LE] = BC | BI(7, CR_GT) | BO_COND_FALSE,
- [TCG_COND_GT] = BC | BI(7, CR_GT) | BO_COND_TRUE,
- [TCG_COND_LTU] = BC | BI(7, CR_LT) | BO_COND_TRUE,
- [TCG_COND_GEU] = BC | BI(7, CR_LT) | BO_COND_FALSE,
- [TCG_COND_LEU] = BC | BI(7, CR_GT) | BO_COND_FALSE,
- [TCG_COND_GTU] = BC | BI(7, CR_GT) | BO_COND_TRUE,
- };
- /* The low bit here is set if the RA and RB fields must be inverted. */
- static const uint32_t tcg_to_isel[] = {
- [TCG_COND_EQ] = ISEL | BC_(7, CR_EQ),
- [TCG_COND_NE] = ISEL | BC_(7, CR_EQ) | 1,
- [TCG_COND_LT] = ISEL | BC_(7, CR_LT),
- [TCG_COND_GE] = ISEL | BC_(7, CR_LT) | 1,
- [TCG_COND_LE] = ISEL | BC_(7, CR_GT) | 1,
- [TCG_COND_GT] = ISEL | BC_(7, CR_GT),
- [TCG_COND_LTU] = ISEL | BC_(7, CR_LT),
- [TCG_COND_GEU] = ISEL | BC_(7, CR_LT) | 1,
- [TCG_COND_LEU] = ISEL | BC_(7, CR_GT) | 1,
- [TCG_COND_GTU] = ISEL | BC_(7, CR_GT),
- };
- static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
- intptr_t value, intptr_t addend)
- {
- tcg_insn_unit *target;
- int16_t lo;
- int32_t hi;
- value += addend;
- target = (tcg_insn_unit *)value;
- switch (type) {
- case R_PPC_REL14:
- return reloc_pc14(code_ptr, target);
- case R_PPC_REL24:
- return reloc_pc24(code_ptr, target);
- case R_PPC_ADDR16:
- /*
- * We are (slightly) abusing this relocation type. In particular,
- * assert that the low 2 bits are zero, and do not modify them.
- * That way we can use this with LD et al that have opcode bits
- * in the low 2 bits of the insn.
- */
- if ((value & 3) || value != (int16_t)value) {
- return false;
- }
- *code_ptr = (*code_ptr & ~0xfffc) | (value & 0xfffc);
- break;
- case R_PPC_ADDR32:
- /*
- * We are abusing this relocation type. Again, this points to
- * a pair of insns, lis + load. This is an absolute address
- * relocation for PPC32 so the lis cannot be removed.
- */
- lo = value;
- hi = value - lo;
- if (hi + lo != value) {
- return false;
- }
- code_ptr[0] = deposit32(code_ptr[0], 0, 16, hi >> 16);
- code_ptr[1] = deposit32(code_ptr[1], 0, 16, lo);
- break;
- default:
- g_assert_not_reached();
- }
- return true;
- }
- static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
- TCGReg base, tcg_target_long offset);
- static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
- {
- if (ret == arg) {
- return true;
- }
- switch (type) {
- case TCG_TYPE_I64:
- tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
- /* fallthru */
- case TCG_TYPE_I32:
- if (ret < TCG_REG_V0) {
- if (arg < TCG_REG_V0) {
- tcg_out32(s, OR | SAB(arg, ret, arg));
- break;
- } else if (have_isa_2_07) {
- tcg_out32(s, (type == TCG_TYPE_I32 ? MFVSRWZ : MFVSRD)
- | VRT(arg) | RA(ret));
- break;
- } else {
- /* Altivec does not support vector->integer moves. */
- return false;
- }
- } else if (arg < TCG_REG_V0) {
- if (have_isa_2_07) {
- tcg_out32(s, (type == TCG_TYPE_I32 ? MTVSRWZ : MTVSRD)
- | VRT(ret) | RA(arg));
- break;
- } else {
- /* Altivec does not support integer->vector moves. */
- return false;
- }
- }
- /* fallthru */
- case TCG_TYPE_V64:
- case TCG_TYPE_V128:
- tcg_debug_assert(ret >= TCG_REG_V0 && arg >= TCG_REG_V0);
- tcg_out32(s, VOR | VRT(ret) | VRA(arg) | VRB(arg));
- break;
- default:
- g_assert_not_reached();
- }
- return true;
- }
- static inline void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs,
- int sh, int mb)
- {
- tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
- sh = SH(sh & 0x1f) | (((sh >> 5) & 1) << 1);
- mb = MB64((mb >> 5) | ((mb << 1) & 0x3f));
- tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb);
- }
- static inline void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs,
- int sh, int mb, int me)
- {
- tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me));
- }
- static inline void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src)
- {
- tcg_out_rld(s, RLDICL, dst, src, 0, 32);
- }
- static inline void tcg_out_shli32(TCGContext *s, TCGReg dst, TCGReg src, int c)
- {
- tcg_out_rlw(s, RLWINM, dst, src, c, 0, 31 - c);
- }
- static inline void tcg_out_shli64(TCGContext *s, TCGReg dst, TCGReg src, int c)
- {
- tcg_out_rld(s, RLDICR, dst, src, c, 63 - c);
- }
- static inline void tcg_out_shri32(TCGContext *s, TCGReg dst, TCGReg src, int c)
- {
- tcg_out_rlw(s, RLWINM, dst, src, 32 - c, c, 31);
- }
- static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c)
- {
- tcg_out_rld(s, RLDICL, dst, src, 64 - c, c);
- }
- /* Emit a move into ret of arg, if it can be done in one insn. */
- static bool tcg_out_movi_one(TCGContext *s, TCGReg ret, tcg_target_long arg)
- {
- if (arg == (int16_t)arg) {
- tcg_out32(s, ADDI | TAI(ret, 0, arg));
- return true;
- }
- if (arg == (int32_t)arg && (arg & 0xffff) == 0) {
- tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
- return true;
- }
- return false;
- }
- static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
- tcg_target_long arg, bool in_prologue)
- {
- intptr_t tb_diff;
- tcg_target_long tmp;
- int shift;
- tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
- if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
- arg = (int32_t)arg;
- }
- /* Load 16-bit immediates with one insn. */
- if (tcg_out_movi_one(s, ret, arg)) {
- return;
- }
- /* Load addresses within the TB with one insn. */
- tb_diff = arg - (intptr_t)s->code_gen_ptr;
- if (!in_prologue && USE_REG_TB && tb_diff == (int16_t)tb_diff) {
- tcg_out32(s, ADDI | TAI(ret, TCG_REG_TB, tb_diff));
- return;
- }
- /* Load 32-bit immediates with two insns. Note that we've already
- eliminated bare ADDIS, so we know both insns are required. */
- if (TCG_TARGET_REG_BITS == 32 || arg == (int32_t)arg) {
- tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
- tcg_out32(s, ORI | SAI(ret, ret, arg));
- return;
- }
- if (arg == (uint32_t)arg && !(arg & 0x8000)) {
- tcg_out32(s, ADDI | TAI(ret, 0, arg));
- tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
- return;
- }
- /* Load masked 16-bit value. */
- if (arg > 0 && (arg & 0x8000)) {
- tmp = arg | 0x7fff;
- if ((tmp & (tmp + 1)) == 0) {
- int mb = clz64(tmp + 1) + 1;
- tcg_out32(s, ADDI | TAI(ret, 0, arg));
- tcg_out_rld(s, RLDICL, ret, ret, 0, mb);
- return;
- }
- }
- /* Load common masks with 2 insns. */
- shift = ctz64(arg);
- tmp = arg >> shift;
- if (tmp == (int16_t)tmp) {
- tcg_out32(s, ADDI | TAI(ret, 0, tmp));
- tcg_out_shli64(s, ret, ret, shift);
- return;
- }
- shift = clz64(arg);
- if (tcg_out_movi_one(s, ret, arg << shift)) {
- tcg_out_shri64(s, ret, ret, shift);
- return;
- }
- /* Load addresses within 2GB of TB with 2 (or rarely 3) insns. */
- if (!in_prologue && USE_REG_TB && tb_diff == (int32_t)tb_diff) {
- tcg_out_mem_long(s, ADDI, ADD, ret, TCG_REG_TB, tb_diff);
- return;
- }
- /* Use the constant pool, if possible. */
- if (!in_prologue && USE_REG_TB) {
- new_pool_label(s, arg, R_PPC_ADDR16, s->code_ptr,
- -(intptr_t)s->code_gen_ptr);
- tcg_out32(s, LD | TAI(ret, TCG_REG_TB, 0));
- return;
- }
- tmp = arg >> 31 >> 1;
- tcg_out_movi(s, TCG_TYPE_I32, ret, tmp);
- if (tmp) {
- tcg_out_shli64(s, ret, ret, 32);
- }
- if (arg & 0xffff0000) {
- tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
- }
- if (arg & 0xffff) {
- tcg_out32(s, ORI | SAI(ret, ret, arg));
- }
- }
- static void tcg_out_dupi_vec(TCGContext *s, TCGType type, TCGReg ret,
- tcg_target_long val)
- {
- uint32_t load_insn;
- int rel, low;
- intptr_t add;
- low = (int8_t)val;
- if (low >= -16 && low < 16) {
- if (val == (tcg_target_long)dup_const(MO_8, low)) {
- tcg_out32(s, VSPLTISB | VRT(ret) | ((val & 31) << 16));
- return;
- }
- if (val == (tcg_target_long)dup_const(MO_16, low)) {
- tcg_out32(s, VSPLTISH | VRT(ret) | ((val & 31) << 16));
- return;
- }
- if (val == (tcg_target_long)dup_const(MO_32, low)) {
- tcg_out32(s, VSPLTISW | VRT(ret) | ((val & 31) << 16));
- return;
- }
- }
- if (have_isa_3_00 && val == (tcg_target_long)dup_const(MO_8, val)) {
- tcg_out32(s, XXSPLTIB | VRT(ret) | ((val & 0xff) << 11));
- return;
- }
- /*
- * Otherwise we must load the value from the constant pool.
- */
- if (USE_REG_TB) {
- rel = R_PPC_ADDR16;
- add = -(intptr_t)s->code_gen_ptr;
- } else {
- rel = R_PPC_ADDR32;
- add = 0;
- }
- if (have_vsx) {
- load_insn = type == TCG_TYPE_V64 ? LXSDX : LXVDSX;
- load_insn |= VRT(ret) | RB(TCG_REG_TMP1);
- if (TCG_TARGET_REG_BITS == 64) {
- new_pool_label(s, val, rel, s->code_ptr, add);
- } else {
- new_pool_l2(s, rel, s->code_ptr, add, val, val);
- }
- } else {
- load_insn = LVX | VRT(ret) | RB(TCG_REG_TMP1);
- if (TCG_TARGET_REG_BITS == 64) {
- new_pool_l2(s, rel, s->code_ptr, add, val, val);
- } else {
- new_pool_l4(s, rel, s->code_ptr, add, val, val, val, val);
- }
- }
- if (USE_REG_TB) {
- tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, 0, 0));
- load_insn |= RA(TCG_REG_TB);
- } else {
- tcg_out32(s, ADDIS | TAI(TCG_REG_TMP1, 0, 0));
- tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, TCG_REG_TMP1, 0));
- }
- tcg_out32(s, load_insn);
- }
- static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret,
- tcg_target_long arg)
- {
- switch (type) {
- case TCG_TYPE_I32:
- case TCG_TYPE_I64:
- tcg_debug_assert(ret < TCG_REG_V0);
- tcg_out_movi_int(s, type, ret, arg, false);
- break;
- case TCG_TYPE_V64:
- case TCG_TYPE_V128:
- tcg_debug_assert(ret >= TCG_REG_V0);
- tcg_out_dupi_vec(s, type, ret, arg);
- break;
- default:
- g_assert_not_reached();
- }
- }
- static bool mask_operand(uint32_t c, int *mb, int *me)
- {
- uint32_t lsb, test;
- /* Accept a bit pattern like:
- 0....01....1
- 1....10....0
- 0..01..10..0
- Keep track of the transitions. */
- if (c == 0 || c == -1) {
- return false;
- }
- test = c;
- lsb = test & -test;
- test += lsb;
- if (test & (test - 1)) {
- return false;
- }
- *me = clz32(lsb);
- *mb = test ? clz32(test & -test) + 1 : 0;
- return true;
- }
- static bool mask64_operand(uint64_t c, int *mb, int *me)
- {
- uint64_t lsb;
- if (c == 0) {
- return false;
- }
- lsb = c & -c;
- /* Accept 1..10..0. */
- if (c == -lsb) {
- *mb = 0;
- *me = clz64(lsb);
- return true;
- }
- /* Accept 0..01..1. */
- if (lsb == 1 && (c & (c + 1)) == 0) {
- *mb = clz64(c + 1) + 1;
- *me = 63;
- return true;
- }
- return false;
- }
- static void tcg_out_andi32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
- {
- int mb, me;
- if (mask_operand(c, &mb, &me)) {
- tcg_out_rlw(s, RLWINM, dst, src, 0, mb, me);
- } else if ((c & 0xffff) == c) {
- tcg_out32(s, ANDI | SAI(src, dst, c));
- return;
- } else if ((c & 0xffff0000) == c) {
- tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
- return;
- } else {
- tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R0, c);
- tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
- }
- }
- static void tcg_out_andi64(TCGContext *s, TCGReg dst, TCGReg src, uint64_t c)
- {
- int mb, me;
- tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
- if (mask64_operand(c, &mb, &me)) {
- if (mb == 0) {
- tcg_out_rld(s, RLDICR, dst, src, 0, me);
- } else {
- tcg_out_rld(s, RLDICL, dst, src, 0, mb);
- }
- } else if ((c & 0xffff) == c) {
- tcg_out32(s, ANDI | SAI(src, dst, c));
- return;
- } else if ((c & 0xffff0000) == c) {
- tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
- return;
- } else {
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, c);
- tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
- }
- }
- static void tcg_out_zori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c,
- int op_lo, int op_hi)
- {
- if (c >> 16) {
- tcg_out32(s, op_hi | SAI(src, dst, c >> 16));
- src = dst;
- }
- if (c & 0xffff) {
- tcg_out32(s, op_lo | SAI(src, dst, c));
- src = dst;
- }
- }
- static void tcg_out_ori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
- {
- tcg_out_zori32(s, dst, src, c, ORI, ORIS);
- }
- static void tcg_out_xori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
- {
- tcg_out_zori32(s, dst, src, c, XORI, XORIS);
- }
- static void tcg_out_b(TCGContext *s, int mask, tcg_insn_unit *target)
- {
- ptrdiff_t disp = tcg_pcrel_diff(s, target);
- if (in_range_b(disp)) {
- tcg_out32(s, B | (disp & 0x3fffffc) | mask);
- } else {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, (uintptr_t)target);
- tcg_out32(s, MTSPR | RS(TCG_REG_R0) | CTR);
- tcg_out32(s, BCCTR | BO_ALWAYS | mask);
- }
- }
- static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
- TCGReg base, tcg_target_long offset)
- {
- tcg_target_long orig = offset, l0, l1, extra = 0, align = 0;
- bool is_int_store = false;
- TCGReg rs = TCG_REG_TMP1;
- switch (opi) {
- case LD: case LWA:
- align = 3;
- /* FALLTHRU */
- default:
- if (rt > TCG_REG_R0 && rt < TCG_REG_V0) {
- rs = rt;
- break;
- }
- break;
- case LXSD:
- case STXSD:
- align = 3;
- break;
- case LXV:
- case STXV:
- align = 15;
- break;
- case STD:
- align = 3;
- /* FALLTHRU */
- case STB: case STH: case STW:
- is_int_store = true;
- break;
- }
- /* For unaligned, or very large offsets, use the indexed form. */
- if (offset & align || offset != (int32_t)offset || opi == 0) {
- if (rs == base) {
- rs = TCG_REG_R0;
- }
- tcg_debug_assert(!is_int_store || rs != rt);
- tcg_out_movi(s, TCG_TYPE_PTR, rs, orig);
- tcg_out32(s, opx | TAB(rt & 31, base, rs));
- return;
- }
- l0 = (int16_t)offset;
- offset = (offset - l0) >> 16;
- l1 = (int16_t)offset;
- if (l1 < 0 && orig >= 0) {
- extra = 0x4000;
- l1 = (int16_t)(offset - 0x4000);
- }
- if (l1) {
- tcg_out32(s, ADDIS | TAI(rs, base, l1));
- base = rs;
- }
- if (extra) {
- tcg_out32(s, ADDIS | TAI(rs, base, extra));
- base = rs;
- }
- if (opi != ADDI || base != rt || l0 != 0) {
- tcg_out32(s, opi | TAI(rt & 31, base, l0));
- }
- }
- static void tcg_out_vsldoi(TCGContext *s, TCGReg ret,
- TCGReg va, TCGReg vb, int shb)
- {
- tcg_out32(s, VSLDOI | VRT(ret) | VRA(va) | VRB(vb) | (shb << 6));
- }
- static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
- TCGReg base, intptr_t offset)
- {
- int shift;
- switch (type) {
- case TCG_TYPE_I32:
- if (ret < TCG_REG_V0) {
- tcg_out_mem_long(s, LWZ, LWZX, ret, base, offset);
- break;
- }
- if (have_isa_2_07 && have_vsx) {
- tcg_out_mem_long(s, 0, LXSIWZX, ret, base, offset);
- break;
- }
- tcg_debug_assert((offset & 3) == 0);
- tcg_out_mem_long(s, 0, LVEWX, ret, base, offset);
- shift = (offset - 4) & 0xc;
- if (shift) {
- tcg_out_vsldoi(s, ret, ret, ret, shift);
- }
- break;
- case TCG_TYPE_I64:
- if (ret < TCG_REG_V0) {
- tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
- tcg_out_mem_long(s, LD, LDX, ret, base, offset);
- break;
- }
- /* fallthru */
- case TCG_TYPE_V64:
- tcg_debug_assert(ret >= TCG_REG_V0);
- if (have_vsx) {
- tcg_out_mem_long(s, have_isa_3_00 ? LXSD : 0, LXSDX,
- ret, base, offset);
- break;
- }
- tcg_debug_assert((offset & 7) == 0);
- tcg_out_mem_long(s, 0, LVX, ret, base, offset & -16);
- if (offset & 8) {
- tcg_out_vsldoi(s, ret, ret, ret, 8);
- }
- break;
- case TCG_TYPE_V128:
- tcg_debug_assert(ret >= TCG_REG_V0);
- tcg_debug_assert((offset & 15) == 0);
- tcg_out_mem_long(s, have_isa_3_00 ? LXV : 0,
- LVX, ret, base, offset);
- break;
- default:
- g_assert_not_reached();
- }
- }
- static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
- TCGReg base, intptr_t offset)
- {
- int shift;
- switch (type) {
- case TCG_TYPE_I32:
- if (arg < TCG_REG_V0) {
- tcg_out_mem_long(s, STW, STWX, arg, base, offset);
- break;
- }
- if (have_isa_2_07 && have_vsx) {
- tcg_out_mem_long(s, 0, STXSIWX, arg, base, offset);
- break;
- }
- assert((offset & 3) == 0);
- tcg_debug_assert((offset & 3) == 0);
- shift = (offset - 4) & 0xc;
- if (shift) {
- tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, shift);
- arg = TCG_VEC_TMP1;
- }
- tcg_out_mem_long(s, 0, STVEWX, arg, base, offset);
- break;
- case TCG_TYPE_I64:
- if (arg < TCG_REG_V0) {
- tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
- tcg_out_mem_long(s, STD, STDX, arg, base, offset);
- break;
- }
- /* fallthru */
- case TCG_TYPE_V64:
- tcg_debug_assert(arg >= TCG_REG_V0);
- if (have_vsx) {
- tcg_out_mem_long(s, have_isa_3_00 ? STXSD : 0,
- STXSDX, arg, base, offset);
- break;
- }
- tcg_debug_assert((offset & 7) == 0);
- if (offset & 8) {
- tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, 8);
- arg = TCG_VEC_TMP1;
- }
- tcg_out_mem_long(s, 0, STVEWX, arg, base, offset);
- tcg_out_mem_long(s, 0, STVEWX, arg, base, offset + 4);
- break;
- case TCG_TYPE_V128:
- tcg_debug_assert(arg >= TCG_REG_V0);
- tcg_out_mem_long(s, have_isa_3_00 ? STXV : 0,
- STVX, arg, base, offset);
- break;
- default:
- g_assert_not_reached();
- }
- }
- static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
- TCGReg base, intptr_t ofs)
- {
- return false;
- }
- static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
- int const_arg2, int cr, TCGType type)
- {
- int imm;
- uint32_t op;
- tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
- /* Simplify the comparisons below wrt CMPI. */
- if (type == TCG_TYPE_I32) {
- arg2 = (int32_t)arg2;
- }
- switch (cond) {
- case TCG_COND_EQ:
- case TCG_COND_NE:
- if (const_arg2) {
- if ((int16_t) arg2 == arg2) {
- op = CMPI;
- imm = 1;
- break;
- } else if ((uint16_t) arg2 == arg2) {
- op = CMPLI;
- imm = 1;
- break;
- }
- }
- op = CMPL;
- imm = 0;
- break;
- case TCG_COND_LT:
- case TCG_COND_GE:
- case TCG_COND_LE:
- case TCG_COND_GT:
- if (const_arg2) {
- if ((int16_t) arg2 == arg2) {
- op = CMPI;
- imm = 1;
- break;
- }
- }
- op = CMP;
- imm = 0;
- break;
- case TCG_COND_LTU:
- case TCG_COND_GEU:
- case TCG_COND_LEU:
- case TCG_COND_GTU:
- if (const_arg2) {
- if ((uint16_t) arg2 == arg2) {
- op = CMPLI;
- imm = 1;
- break;
- }
- }
- op = CMPL;
- imm = 0;
- break;
- default:
- tcg_abort();
- }
- op |= BF(cr) | ((type == TCG_TYPE_I64) << 21);
- if (imm) {
- tcg_out32(s, op | RA(arg1) | (arg2 & 0xffff));
- } else {
- if (const_arg2) {
- tcg_out_movi(s, type, TCG_REG_R0, arg2);
- arg2 = TCG_REG_R0;
- }
- tcg_out32(s, op | RA(arg1) | RB(arg2));
- }
- }
- static void tcg_out_setcond_eq0(TCGContext *s, TCGType type,
- TCGReg dst, TCGReg src)
- {
- if (type == TCG_TYPE_I32) {
- tcg_out32(s, CNTLZW | RS(src) | RA(dst));
- tcg_out_shri32(s, dst, dst, 5);
- } else {
- tcg_out32(s, CNTLZD | RS(src) | RA(dst));
- tcg_out_shri64(s, dst, dst, 6);
- }
- }
- static void tcg_out_setcond_ne0(TCGContext *s, TCGReg dst, TCGReg src)
- {
- /* X != 0 implies X + -1 generates a carry. Extra addition
- trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C. */
- if (dst != src) {
- tcg_out32(s, ADDIC | TAI(dst, src, -1));
- tcg_out32(s, SUBFE | TAB(dst, dst, src));
- } else {
- tcg_out32(s, ADDIC | TAI(TCG_REG_R0, src, -1));
- tcg_out32(s, SUBFE | TAB(dst, TCG_REG_R0, src));
- }
- }
- static TCGReg tcg_gen_setcond_xor(TCGContext *s, TCGReg arg1, TCGArg arg2,
- bool const_arg2)
- {
- if (const_arg2) {
- if ((uint32_t)arg2 == arg2) {
- tcg_out_xori32(s, TCG_REG_R0, arg1, arg2);
- } else {
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, arg2);
- tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, TCG_REG_R0));
- }
- } else {
- tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, arg2));
- }
- return TCG_REG_R0;
- }
- static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
- TCGArg arg0, TCGArg arg1, TCGArg arg2,
- int const_arg2)
- {
- int crop, sh;
- tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
- /* Ignore high bits of a potential constant arg2. */
- if (type == TCG_TYPE_I32) {
- arg2 = (uint32_t)arg2;
- }
- /* Handle common and trivial cases before handling anything else. */
- if (arg2 == 0) {
- switch (cond) {
- case TCG_COND_EQ:
- tcg_out_setcond_eq0(s, type, arg0, arg1);
- return;
- case TCG_COND_NE:
- if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
- tcg_out_ext32u(s, TCG_REG_R0, arg1);
- arg1 = TCG_REG_R0;
- }
- tcg_out_setcond_ne0(s, arg0, arg1);
- return;
- case TCG_COND_GE:
- tcg_out32(s, NOR | SAB(arg1, arg0, arg1));
- arg1 = arg0;
- /* FALLTHRU */
- case TCG_COND_LT:
- /* Extract the sign bit. */
- if (type == TCG_TYPE_I32) {
- tcg_out_shri32(s, arg0, arg1, 31);
- } else {
- tcg_out_shri64(s, arg0, arg1, 63);
- }
- return;
- default:
- break;
- }
- }
- /* If we have ISEL, we can implement everything with 3 or 4 insns.
- All other cases below are also at least 3 insns, so speed up the
- code generator by not considering them and always using ISEL. */
- if (have_isel) {
- int isel, tab;
- tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
- isel = tcg_to_isel[cond];
- tcg_out_movi(s, type, arg0, 1);
- if (isel & 1) {
- /* arg0 = (bc ? 0 : 1) */
- tab = TAB(arg0, 0, arg0);
- isel &= ~1;
- } else {
- /* arg0 = (bc ? 1 : 0) */
- tcg_out_movi(s, type, TCG_REG_R0, 0);
- tab = TAB(arg0, arg0, TCG_REG_R0);
- }
- tcg_out32(s, isel | tab);
- return;
- }
- switch (cond) {
- case TCG_COND_EQ:
- arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
- tcg_out_setcond_eq0(s, type, arg0, arg1);
- return;
- case TCG_COND_NE:
- arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
- /* Discard the high bits only once, rather than both inputs. */
- if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
- tcg_out_ext32u(s, TCG_REG_R0, arg1);
- arg1 = TCG_REG_R0;
- }
- tcg_out_setcond_ne0(s, arg0, arg1);
- return;
- case TCG_COND_GT:
- case TCG_COND_GTU:
- sh = 30;
- crop = 0;
- goto crtest;
- case TCG_COND_LT:
- case TCG_COND_LTU:
- sh = 29;
- crop = 0;
- goto crtest;
- case TCG_COND_GE:
- case TCG_COND_GEU:
- sh = 31;
- crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_LT) | BB(7, CR_LT);
- goto crtest;
- case TCG_COND_LE:
- case TCG_COND_LEU:
- sh = 31;
- crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_GT) | BB(7, CR_GT);
- crtest:
- tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
- if (crop) {
- tcg_out32(s, crop);
- }
- tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
- tcg_out_rlw(s, RLWINM, arg0, TCG_REG_R0, sh, 31, 31);
- break;
- default:
- tcg_abort();
- }
- }
- static void tcg_out_bc(TCGContext *s, int bc, TCGLabel *l)
- {
- if (l->has_value) {
- bc |= reloc_pc14_val(s->code_ptr, l->u.value_ptr);
- } else {
- tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, l, 0);
- }
- tcg_out32(s, bc);
- }
- static void tcg_out_brcond(TCGContext *s, TCGCond cond,
- TCGArg arg1, TCGArg arg2, int const_arg2,
- TCGLabel *l, TCGType type)
- {
- tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
- tcg_out_bc(s, tcg_to_bc[cond], l);
- }
- static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
- TCGArg dest, TCGArg c1, TCGArg c2, TCGArg v1,
- TCGArg v2, bool const_c2)
- {
- /* If for some reason both inputs are zero, don't produce bad code. */
- if (v1 == 0 && v2 == 0) {
- tcg_out_movi(s, type, dest, 0);
- return;
- }
- tcg_out_cmp(s, cond, c1, c2, const_c2, 7, type);
- if (have_isel) {
- int isel = tcg_to_isel[cond];
- /* Swap the V operands if the operation indicates inversion. */
- if (isel & 1) {
- int t = v1;
- v1 = v2;
- v2 = t;
- isel &= ~1;
- }
- /* V1 == 0 is handled by isel; V2 == 0 must be handled by hand. */
- if (v2 == 0) {
- tcg_out_movi(s, type, TCG_REG_R0, 0);
- }
- tcg_out32(s, isel | TAB(dest, v1, v2));
- } else {
- if (dest == v2) {
- cond = tcg_invert_cond(cond);
- v2 = v1;
- } else if (dest != v1) {
- if (v1 == 0) {
- tcg_out_movi(s, type, dest, 0);
- } else {
- tcg_out_mov(s, type, dest, v1);
- }
- }
- /* Branch forward over one insn */
- tcg_out32(s, tcg_to_bc[cond] | 8);
- if (v2 == 0) {
- tcg_out_movi(s, type, dest, 0);
- } else {
- tcg_out_mov(s, type, dest, v2);
- }
- }
- }
- static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc,
- TCGArg a0, TCGArg a1, TCGArg a2, bool const_a2)
- {
- if (const_a2 && a2 == (type == TCG_TYPE_I32 ? 32 : 64)) {
- tcg_out32(s, opc | RA(a0) | RS(a1));
- } else {
- tcg_out_cmp(s, TCG_COND_EQ, a1, 0, 1, 7, type);
- /* Note that the only other valid constant for a2 is 0. */
- if (have_isel) {
- tcg_out32(s, opc | RA(TCG_REG_R0) | RS(a1));
- tcg_out32(s, tcg_to_isel[TCG_COND_EQ] | TAB(a0, a2, TCG_REG_R0));
- } else if (!const_a2 && a0 == a2) {
- tcg_out32(s, tcg_to_bc[TCG_COND_EQ] | 8);
- tcg_out32(s, opc | RA(a0) | RS(a1));
- } else {
- tcg_out32(s, opc | RA(a0) | RS(a1));
- tcg_out32(s, tcg_to_bc[TCG_COND_NE] | 8);
- if (const_a2) {
- tcg_out_movi(s, type, a0, 0);
- } else {
- tcg_out_mov(s, type, a0, a2);
- }
- }
- }
- }
- static void tcg_out_cmp2(TCGContext *s, const TCGArg *args,
- const int *const_args)
- {
- static const struct { uint8_t bit1, bit2; } bits[] = {
- [TCG_COND_LT ] = { CR_LT, CR_LT },
- [TCG_COND_LE ] = { CR_LT, CR_GT },
- [TCG_COND_GT ] = { CR_GT, CR_GT },
- [TCG_COND_GE ] = { CR_GT, CR_LT },
- [TCG_COND_LTU] = { CR_LT, CR_LT },
- [TCG_COND_LEU] = { CR_LT, CR_GT },
- [TCG_COND_GTU] = { CR_GT, CR_GT },
- [TCG_COND_GEU] = { CR_GT, CR_LT },
- };
- TCGCond cond = args[4], cond2;
- TCGArg al, ah, bl, bh;
- int blconst, bhconst;
- int op, bit1, bit2;
- al = args[0];
- ah = args[1];
- bl = args[2];
- bh = args[3];
- blconst = const_args[2];
- bhconst = const_args[3];
- switch (cond) {
- case TCG_COND_EQ:
- op = CRAND;
- goto do_equality;
- case TCG_COND_NE:
- op = CRNAND;
- do_equality:
- tcg_out_cmp(s, cond, al, bl, blconst, 6, TCG_TYPE_I32);
- tcg_out_cmp(s, cond, ah, bh, bhconst, 7, TCG_TYPE_I32);
- tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
- break;
- case TCG_COND_LT:
- case TCG_COND_LE:
- case TCG_COND_GT:
- case TCG_COND_GE:
- case TCG_COND_LTU:
- case TCG_COND_LEU:
- case TCG_COND_GTU:
- case TCG_COND_GEU:
- bit1 = bits[cond].bit1;
- bit2 = bits[cond].bit2;
- op = (bit1 != bit2 ? CRANDC : CRAND);
- cond2 = tcg_unsigned_cond(cond);
- tcg_out_cmp(s, cond, ah, bh, bhconst, 6, TCG_TYPE_I32);
- tcg_out_cmp(s, cond2, al, bl, blconst, 7, TCG_TYPE_I32);
- tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, bit2));
- tcg_out32(s, CROR | BT(7, CR_EQ) | BA(6, bit1) | BB(7, CR_EQ));
- break;
- default:
- tcg_abort();
- }
- }
- static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
- const int *const_args)
- {
- tcg_out_cmp2(s, args + 1, const_args + 1);
- tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
- tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, 31, 31, 31);
- }
- static void tcg_out_brcond2 (TCGContext *s, const TCGArg *args,
- const int *const_args)
- {
- tcg_out_cmp2(s, args, const_args);
- tcg_out_bc(s, BC | BI(7, CR_EQ) | BO_COND_TRUE, arg_label(args[5]));
- }
- static void tcg_out_mb(TCGContext *s, TCGArg a0)
- {
- uint32_t insn = HWSYNC;
- a0 &= TCG_MO_ALL;
- if (a0 == TCG_MO_LD_LD) {
- insn = LWSYNC;
- } else if (a0 == TCG_MO_ST_ST) {
- insn = EIEIO;
- }
- tcg_out32(s, insn);
- }
- void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
- uintptr_t addr)
- {
- if (TCG_TARGET_REG_BITS == 64) {
- tcg_insn_unit i1, i2;
- intptr_t tb_diff = addr - tc_ptr;
- intptr_t br_diff = addr - (jmp_addr + 4);
- uint64_t pair;
- /* This does not exercise the range of the branch, but we do
- still need to be able to load the new value of TCG_REG_TB.
- But this does still happen quite often. */
- if (tb_diff == (int16_t)tb_diff) {
- i1 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, tb_diff);
- i2 = B | (br_diff & 0x3fffffc);
- } else {
- intptr_t lo = (int16_t)tb_diff;
- intptr_t hi = (int32_t)(tb_diff - lo);
- assert(tb_diff == hi + lo);
- i1 = ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, hi >> 16);
- i2 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, lo);
- }
- #ifdef HOST_WORDS_BIGENDIAN
- pair = (uint64_t)i1 << 32 | i2;
- #else
- pair = (uint64_t)i2 << 32 | i1;
- #endif
- /* As per the enclosing if, this is ppc64. Avoid the _Static_assert
- within atomic_set that would fail to build a ppc32 host. */
- atomic_set__nocheck((uint64_t *)jmp_addr, pair);
- flush_icache_range(jmp_addr, jmp_addr + 8);
- } else {
- intptr_t diff = addr - jmp_addr;
- tcg_debug_assert(in_range_b(diff));
- atomic_set((uint32_t *)jmp_addr, B | (diff & 0x3fffffc));
- flush_icache_range(jmp_addr, jmp_addr + 4);
- }
- }
- static void tcg_out_call(TCGContext *s, tcg_insn_unit *target)
- {
- #ifdef _CALL_AIX
- /* Look through the descriptor. If the branch is in range, and we
- don't have to spend too much effort on building the toc. */
- void *tgt = ((void **)target)[0];
- uintptr_t toc = ((uintptr_t *)target)[1];
- intptr_t diff = tcg_pcrel_diff(s, tgt);
- if (in_range_b(diff) && toc == (uint32_t)toc) {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, toc);
- tcg_out_b(s, LK, tgt);
- } else {
- /* Fold the low bits of the constant into the addresses below. */
- intptr_t arg = (intptr_t)target;
- int ofs = (int16_t)arg;
- if (ofs + 8 < 0x8000) {
- arg -= ofs;
- } else {
- ofs = 0;
- }
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, arg);
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_TMP1, ofs);
- tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR);
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_REG_TMP1, ofs + SZP);
- tcg_out32(s, BCCTR | BO_ALWAYS | LK);
- }
- #elif defined(_CALL_ELF) && _CALL_ELF == 2
- intptr_t diff;
- /* In the ELFv2 ABI, we have to set up r12 to contain the destination
- address, which the callee uses to compute its TOC address. */
- /* FIXME: when the branch is in range, we could avoid r12 load if we
- knew that the destination uses the same TOC, and what its local
- entry point offset is. */
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R12, (intptr_t)target);
- diff = tcg_pcrel_diff(s, target);
- if (in_range_b(diff)) {
- tcg_out_b(s, LK, target);
- } else {
- tcg_out32(s, MTSPR | RS(TCG_REG_R12) | CTR);
- tcg_out32(s, BCCTR | BO_ALWAYS | LK);
- }
- #else
- tcg_out_b(s, LK, target);
- #endif
- }
- static const uint32_t qemu_ldx_opc[16] = {
- [MO_UB] = LBZX,
- [MO_UW] = LHZX,
- [MO_UL] = LWZX,
- [MO_Q] = LDX,
- [MO_SW] = LHAX,
- [MO_SL] = LWAX,
- [MO_BSWAP | MO_UB] = LBZX,
- [MO_BSWAP | MO_UW] = LHBRX,
- [MO_BSWAP | MO_UL] = LWBRX,
- [MO_BSWAP | MO_Q] = LDBRX,
- };
- static const uint32_t qemu_stx_opc[16] = {
- [MO_UB] = STBX,
- [MO_UW] = STHX,
- [MO_UL] = STWX,
- [MO_Q] = STDX,
- [MO_BSWAP | MO_UB] = STBX,
- [MO_BSWAP | MO_UW] = STHBRX,
- [MO_BSWAP | MO_UL] = STWBRX,
- [MO_BSWAP | MO_Q] = STDBRX,
- };
- static const uint32_t qemu_exts_opc[4] = {
- EXTSB, EXTSH, EXTSW, 0
- };
- #if defined (CONFIG_SOFTMMU)
- #include "../tcg-ldst.c.inc"
- /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
- * int mmu_idx, uintptr_t ra)
- */
- static void * const qemu_ld_helpers[16] = {
- [MO_UB] = helper_ret_ldub_mmu,
- [MO_LEUW] = helper_le_lduw_mmu,
- [MO_LEUL] = helper_le_ldul_mmu,
- [MO_LEQ] = helper_le_ldq_mmu,
- [MO_BEUW] = helper_be_lduw_mmu,
- [MO_BEUL] = helper_be_ldul_mmu,
- [MO_BEQ] = helper_be_ldq_mmu,
- };
- /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
- * uintxx_t val, int mmu_idx, uintptr_t ra)
- */
- static void * const qemu_st_helpers[16] = {
- [MO_UB] = helper_ret_stb_mmu,
- [MO_LEUW] = helper_le_stw_mmu,
- [MO_LEUL] = helper_le_stl_mmu,
- [MO_LEQ] = helper_le_stq_mmu,
- [MO_BEUW] = helper_be_stw_mmu,
- [MO_BEUL] = helper_be_stl_mmu,
- [MO_BEQ] = helper_be_stq_mmu,
- };
- /* We expect to use a 16-bit negative offset from ENV. */
- QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
- QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768);
- /* Perform the TLB load and compare. Places the result of the comparison
- in CR7, loads the addend of the TLB into R3, and returns the register
- containing the guest address (zero-extended into R4). Clobbers R0 and R2. */
- static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc,
- TCGReg addrlo, TCGReg addrhi,
- int mem_index, bool is_read)
- {
- int cmp_off
- = (is_read
- ? offsetof(CPUTLBEntry, addr_read)
- : offsetof(CPUTLBEntry, addr_write));
- int fast_off = TLB_MASK_TABLE_OFS(mem_index);
- int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
- int table_off = fast_off + offsetof(CPUTLBDescFast, table);
- unsigned s_bits = opc & MO_SIZE;
- unsigned a_bits = get_alignment_bits(opc);
- /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_AREG0, mask_off);
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R4, TCG_AREG0, table_off);
- /* Extract the page index, shifted into place for tlb index. */
- if (TCG_TARGET_REG_BITS == 32) {
- tcg_out_shri32(s, TCG_REG_TMP1, addrlo,
- TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
- } else {
- tcg_out_shri64(s, TCG_REG_TMP1, addrlo,
- TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
- }
- tcg_out32(s, AND | SAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_TMP1));
- /* Load the TLB comparator. */
- if (cmp_off == 0 && TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
- uint32_t lxu = (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32
- ? LWZUX : LDUX);
- tcg_out32(s, lxu | TAB(TCG_REG_TMP1, TCG_REG_R3, TCG_REG_R4));
- } else {
- tcg_out32(s, ADD | TAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_R4));
- if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
- tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP1, TCG_REG_R3, cmp_off + 4);
- tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R4, TCG_REG_R3, cmp_off);
- } else {
- tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP1, TCG_REG_R3, cmp_off);
- }
- }
- /* Load the TLB addend for use on the fast path. Do this asap
- to minimize any load use delay. */
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_REG_R3,
- offsetof(CPUTLBEntry, addend));
- /* Clear the non-page, non-alignment bits from the address */
- if (TCG_TARGET_REG_BITS == 32) {
- /* We don't support unaligned accesses on 32-bits.
- * Preserve the bottom bits and thus trigger a comparison
- * failure on unaligned accesses.
- */
- if (a_bits < s_bits) {
- a_bits = s_bits;
- }
- tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
- (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
- } else {
- TCGReg t = addrlo;
- /* If the access is unaligned, we need to make sure we fail if we
- * cross a page boundary. The trick is to add the access size-1
- * to the address before masking the low bits. That will make the
- * address overflow to the next page if we cross a page boundary,
- * which will then force a mismatch of the TLB compare.
- */
- if (a_bits < s_bits) {
- unsigned a_mask = (1 << a_bits) - 1;
- unsigned s_mask = (1 << s_bits) - 1;
- tcg_out32(s, ADDI | TAI(TCG_REG_R0, t, s_mask - a_mask));
- t = TCG_REG_R0;
- }
- /* Mask the address for the requested alignment. */
- if (TARGET_LONG_BITS == 32) {
- tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0,
- (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
- /* Zero-extend the address for use in the final address. */
- tcg_out_ext32u(s, TCG_REG_R4, addrlo);
- addrlo = TCG_REG_R4;
- } else if (a_bits == 0) {
- tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - TARGET_PAGE_BITS);
- } else {
- tcg_out_rld(s, RLDICL, TCG_REG_R0, t,
- 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - a_bits);
- tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0);
- }
- }
- if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
- tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1,
- 0, 7, TCG_TYPE_I32);
- tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_R4, 0, 6, TCG_TYPE_I32);
- tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
- } else {
- tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1,
- 0, 7, TCG_TYPE_TL);
- }
- return addrlo;
- }
- /* Record the context of a call to the out of line helper code for the slow
- path for a load or store, so that we can later generate the correct
- helper code. */
- static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
- TCGReg datalo_reg, TCGReg datahi_reg,
- TCGReg addrlo_reg, TCGReg addrhi_reg,
- tcg_insn_unit *raddr, tcg_insn_unit *lptr)
- {
- TCGLabelQemuLdst *label = new_ldst_label(s);
- label->is_ld = is_ld;
- label->oi = oi;
- label->datalo_reg = datalo_reg;
- label->datahi_reg = datahi_reg;
- label->addrlo_reg = addrlo_reg;
- label->addrhi_reg = addrhi_reg;
- label->raddr = raddr;
- label->label_ptr[0] = lptr;
- }
- static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
- {
- TCGMemOpIdx oi = lb->oi;
- MemOp opc = get_memop(oi);
- TCGReg hi, lo, arg = TCG_REG_R3;
- if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) {
- return false;
- }
- tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0);
- lo = lb->addrlo_reg;
- hi = lb->addrhi_reg;
- if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
- #ifdef TCG_TARGET_CALL_ALIGN_ARGS
- arg |= 1;
- #endif
- tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
- tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
- } else {
- /* If the address needed to be zero-extended, we'll have already
- placed it in R4. The only remaining case is 64-bit guest. */
- tcg_out_mov(s, TCG_TYPE_TL, arg++, lo);
- }
- tcg_out_movi(s, TCG_TYPE_I32, arg++, oi);
- tcg_out32(s, MFSPR | RT(arg) | LR);
- tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]);
- lo = lb->datalo_reg;
- hi = lb->datahi_reg;
- if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
- tcg_out_mov(s, TCG_TYPE_I32, lo, TCG_REG_R4);
- tcg_out_mov(s, TCG_TYPE_I32, hi, TCG_REG_R3);
- } else if (opc & MO_SIGN) {
- uint32_t insn = qemu_exts_opc[opc & MO_SIZE];
- tcg_out32(s, insn | RA(lo) | RS(TCG_REG_R3));
- } else {
- tcg_out_mov(s, TCG_TYPE_REG, lo, TCG_REG_R3);
- }
- tcg_out_b(s, 0, lb->raddr);
- return true;
- }
- static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
- {
- TCGMemOpIdx oi = lb->oi;
- MemOp opc = get_memop(oi);
- MemOp s_bits = opc & MO_SIZE;
- TCGReg hi, lo, arg = TCG_REG_R3;
- if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) {
- return false;
- }
- tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0);
- lo = lb->addrlo_reg;
- hi = lb->addrhi_reg;
- if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
- #ifdef TCG_TARGET_CALL_ALIGN_ARGS
- arg |= 1;
- #endif
- tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
- tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
- } else {
- /* If the address needed to be zero-extended, we'll have already
- placed it in R4. The only remaining case is 64-bit guest. */
- tcg_out_mov(s, TCG_TYPE_TL, arg++, lo);
- }
- lo = lb->datalo_reg;
- hi = lb->datahi_reg;
- if (TCG_TARGET_REG_BITS == 32) {
- switch (s_bits) {
- case MO_64:
- #ifdef TCG_TARGET_CALL_ALIGN_ARGS
- arg |= 1;
- #endif
- tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
- /* FALLTHRU */
- case MO_32:
- tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
- break;
- default:
- tcg_out_rlw(s, RLWINM, arg++, lo, 0, 32 - (8 << s_bits), 31);
- break;
- }
- } else {
- if (s_bits == MO_64) {
- tcg_out_mov(s, TCG_TYPE_I64, arg++, lo);
- } else {
- tcg_out_rld(s, RLDICL, arg++, lo, 0, 64 - (8 << s_bits));
- }
- }
- tcg_out_movi(s, TCG_TYPE_I32, arg++, oi);
- tcg_out32(s, MFSPR | RT(arg) | LR);
- tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
- tcg_out_b(s, 0, lb->raddr);
- return true;
- }
- #endif /* SOFTMMU */
- static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
- {
- TCGReg datalo, datahi, addrlo, rbase;
- TCGReg addrhi __attribute__((unused));
- TCGMemOpIdx oi;
- MemOp opc, s_bits;
- #ifdef CONFIG_SOFTMMU
- int mem_index;
- tcg_insn_unit *label_ptr;
- #endif
- datalo = *args++;
- datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
- addrlo = *args++;
- addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
- oi = *args++;
- opc = get_memop(oi);
- s_bits = opc & MO_SIZE;
- #ifdef CONFIG_SOFTMMU
- mem_index = get_mmuidx(oi);
- addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, true);
- /* Load a pointer into the current opcode w/conditional branch-link. */
- label_ptr = s->code_ptr;
- tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
- rbase = TCG_REG_R3;
- #else /* !CONFIG_SOFTMMU */
- rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
- if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
- tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
- addrlo = TCG_REG_TMP1;
- }
- #endif
- if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
- if (opc & MO_BSWAP) {
- tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
- tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo));
- tcg_out32(s, LWBRX | TAB(datahi, rbase, TCG_REG_R0));
- } else if (rbase != 0) {
- tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
- tcg_out32(s, LWZX | TAB(datahi, rbase, addrlo));
- tcg_out32(s, LWZX | TAB(datalo, rbase, TCG_REG_R0));
- } else if (addrlo == datahi) {
- tcg_out32(s, LWZ | TAI(datalo, addrlo, 4));
- tcg_out32(s, LWZ | TAI(datahi, addrlo, 0));
- } else {
- tcg_out32(s, LWZ | TAI(datahi, addrlo, 0));
- tcg_out32(s, LWZ | TAI(datalo, addrlo, 4));
- }
- } else {
- uint32_t insn = qemu_ldx_opc[opc & (MO_BSWAP | MO_SSIZE)];
- if (!have_isa_2_06 && insn == LDBRX) {
- tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
- tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo));
- tcg_out32(s, LWBRX | TAB(TCG_REG_R0, rbase, TCG_REG_R0));
- tcg_out_rld(s, RLDIMI, datalo, TCG_REG_R0, 32, 0);
- } else if (insn) {
- tcg_out32(s, insn | TAB(datalo, rbase, addrlo));
- } else {
- insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)];
- tcg_out32(s, insn | TAB(datalo, rbase, addrlo));
- insn = qemu_exts_opc[s_bits];
- tcg_out32(s, insn | RA(datalo) | RS(datalo));
- }
- }
- #ifdef CONFIG_SOFTMMU
- add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
- s->code_ptr, label_ptr);
- #endif
- }
- static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
- {
- TCGReg datalo, datahi, addrlo, rbase;
- TCGReg addrhi __attribute__((unused));
- TCGMemOpIdx oi;
- MemOp opc, s_bits;
- #ifdef CONFIG_SOFTMMU
- int mem_index;
- tcg_insn_unit *label_ptr;
- #endif
- datalo = *args++;
- datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
- addrlo = *args++;
- addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
- oi = *args++;
- opc = get_memop(oi);
- s_bits = opc & MO_SIZE;
- #ifdef CONFIG_SOFTMMU
- mem_index = get_mmuidx(oi);
- addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, false);
- /* Load a pointer into the current opcode w/conditional branch-link. */
- label_ptr = s->code_ptr;
- tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
- rbase = TCG_REG_R3;
- #else /* !CONFIG_SOFTMMU */
- rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
- if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
- tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
- addrlo = TCG_REG_TMP1;
- }
- #endif
- if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
- if (opc & MO_BSWAP) {
- tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
- tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo));
- tcg_out32(s, STWBRX | SAB(datahi, rbase, TCG_REG_R0));
- } else if (rbase != 0) {
- tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
- tcg_out32(s, STWX | SAB(datahi, rbase, addrlo));
- tcg_out32(s, STWX | SAB(datalo, rbase, TCG_REG_R0));
- } else {
- tcg_out32(s, STW | TAI(datahi, addrlo, 0));
- tcg_out32(s, STW | TAI(datalo, addrlo, 4));
- }
- } else {
- uint32_t insn = qemu_stx_opc[opc & (MO_BSWAP | MO_SIZE)];
- if (!have_isa_2_06 && insn == STDBRX) {
- tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo));
- tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, addrlo, 4));
- tcg_out_shri64(s, TCG_REG_R0, datalo, 32);
- tcg_out32(s, STWBRX | SAB(TCG_REG_R0, rbase, TCG_REG_TMP1));
- } else {
- tcg_out32(s, insn | SAB(datalo, rbase, addrlo));
- }
- }
- #ifdef CONFIG_SOFTMMU
- add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
- s->code_ptr, label_ptr);
- #endif
- }
- static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
- {
- int i;
- for (i = 0; i < count; ++i) {
- p[i] = NOP;
- }
- }
- /* Parameters for function call generation, used in tcg.c. */
- #define TCG_TARGET_STACK_ALIGN 16
- #define TCG_TARGET_EXTEND_ARGS 1
- #ifdef _CALL_AIX
- # define LINK_AREA_SIZE (6 * SZR)
- # define LR_OFFSET (1 * SZR)
- # define TCG_TARGET_CALL_STACK_OFFSET (LINK_AREA_SIZE + 8 * SZR)
- #elif defined(TCG_TARGET_CALL_DARWIN)
- # define LINK_AREA_SIZE (6 * SZR)
- # define LR_OFFSET (2 * SZR)
- #elif TCG_TARGET_REG_BITS == 64
- # if defined(_CALL_ELF) && _CALL_ELF == 2
- # define LINK_AREA_SIZE (4 * SZR)
- # define LR_OFFSET (1 * SZR)
- # endif
- #else /* TCG_TARGET_REG_BITS == 32 */
- # if defined(_CALL_SYSV)
- # define LINK_AREA_SIZE (2 * SZR)
- # define LR_OFFSET (1 * SZR)
- # endif
- #endif
- #ifndef LR_OFFSET
- # error "Unhandled abi"
- #endif
- #ifndef TCG_TARGET_CALL_STACK_OFFSET
- # define TCG_TARGET_CALL_STACK_OFFSET LINK_AREA_SIZE
- #endif
- #define CPU_TEMP_BUF_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
- #define REG_SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * SZR)
- #define FRAME_SIZE ((TCG_TARGET_CALL_STACK_OFFSET \
- + TCG_STATIC_CALL_ARGS_SIZE \
- + CPU_TEMP_BUF_SIZE \
- + REG_SAVE_SIZE \
- + TCG_TARGET_STACK_ALIGN - 1) \
- & -TCG_TARGET_STACK_ALIGN)
- #define REG_SAVE_BOT (FRAME_SIZE - REG_SAVE_SIZE)
- static void tcg_target_qemu_prologue(TCGContext *s)
- {
- int i;
- #ifdef _CALL_AIX
- void **desc = (void **)s->code_ptr;
- desc[0] = desc + 2; /* entry point */
- desc[1] = 0; /* environment pointer */
- s->code_ptr = (void *)(desc + 2); /* skip over descriptor */
- #endif
- tcg_set_frame(s, TCG_REG_CALL_STACK, REG_SAVE_BOT - CPU_TEMP_BUF_SIZE,
- CPU_TEMP_BUF_SIZE);
- /* Prologue */
- tcg_out32(s, MFSPR | RT(TCG_REG_R0) | LR);
- tcg_out32(s, (SZR == 8 ? STDU : STWU)
- | SAI(TCG_REG_R1, TCG_REG_R1, -FRAME_SIZE));
- for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
- tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
- TCG_REG_R1, REG_SAVE_BOT + i * SZR);
- }
- tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
- #ifndef CONFIG_SOFTMMU
- if (guest_base) {
- tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
- tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
- }
- #endif
- tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
- tcg_out32(s, MTSPR | RS(tcg_target_call_iarg_regs[1]) | CTR);
- if (USE_REG_TB) {
- tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, tcg_target_call_iarg_regs[1]);
- }
- tcg_out32(s, BCCTR | BO_ALWAYS);
- /* Epilogue */
- s->code_gen_epilogue = tb_ret_addr = s->code_ptr;
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
- for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
- tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
- TCG_REG_R1, REG_SAVE_BOT + i * SZR);
- }
- tcg_out32(s, MTSPR | RS(TCG_REG_R0) | LR);
- tcg_out32(s, ADDI | TAI(TCG_REG_R1, TCG_REG_R1, FRAME_SIZE));
- tcg_out32(s, BCLR | BO_ALWAYS);
- }
- static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
- const int *const_args)
- {
- TCGArg a0, a1, a2;
- int c;
- switch (opc) {
- case INDEX_op_exit_tb:
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, args[0]);
- tcg_out_b(s, 0, tb_ret_addr);
- break;
- case INDEX_op_goto_tb:
- if (s->tb_jmp_insn_offset) {
- /* Direct jump. */
- if (TCG_TARGET_REG_BITS == 64) {
- /* Ensure the next insns are 8-byte aligned. */
- if ((uintptr_t)s->code_ptr & 7) {
- tcg_out32(s, NOP);
- }
- s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
- tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0));
- tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0));
- } else {
- s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
- tcg_out32(s, B);
- s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
- break;
- }
- } else {
- /* Indirect jump. */
- tcg_debug_assert(s->tb_jmp_insn_offset == NULL);
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, 0,
- (intptr_t)(s->tb_jmp_insn_offset + args[0]));
- }
- tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
- tcg_out32(s, BCCTR | BO_ALWAYS);
- set_jmp_reset_offset(s, args[0]);
- if (USE_REG_TB) {
- /* For the unlinked case, need to reset TCG_REG_TB. */
- c = -tcg_current_code_size(s);
- assert(c == (int16_t)c);
- tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, c));
- }
- break;
- case INDEX_op_goto_ptr:
- tcg_out32(s, MTSPR | RS(args[0]) | CTR);
- if (USE_REG_TB) {
- tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, args[0]);
- }
- tcg_out32(s, ADDI | TAI(TCG_REG_R3, 0, 0));
- tcg_out32(s, BCCTR | BO_ALWAYS);
- break;
- case INDEX_op_br:
- {
- TCGLabel *l = arg_label(args[0]);
- uint32_t insn = B;
- if (l->has_value) {
- insn |= reloc_pc24_val(s->code_ptr, l->u.value_ptr);
- } else {
- tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, l, 0);
- }
- tcg_out32(s, insn);
- }
- break;
- case INDEX_op_ld8u_i32:
- case INDEX_op_ld8u_i64:
- tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
- break;
- case INDEX_op_ld8s_i32:
- case INDEX_op_ld8s_i64:
- tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
- tcg_out32(s, EXTSB | RS(args[0]) | RA(args[0]));
- break;
- case INDEX_op_ld16u_i32:
- case INDEX_op_ld16u_i64:
- tcg_out_mem_long(s, LHZ, LHZX, args[0], args[1], args[2]);
- break;
- case INDEX_op_ld16s_i32:
- case INDEX_op_ld16s_i64:
- tcg_out_mem_long(s, LHA, LHAX, args[0], args[1], args[2]);
- break;
- case INDEX_op_ld_i32:
- case INDEX_op_ld32u_i64:
- tcg_out_mem_long(s, LWZ, LWZX, args[0], args[1], args[2]);
- break;
- case INDEX_op_ld32s_i64:
- tcg_out_mem_long(s, LWA, LWAX, args[0], args[1], args[2]);
- break;
- case INDEX_op_ld_i64:
- tcg_out_mem_long(s, LD, LDX, args[0], args[1], args[2]);
- break;
- case INDEX_op_st8_i32:
- case INDEX_op_st8_i64:
- tcg_out_mem_long(s, STB, STBX, args[0], args[1], args[2]);
- break;
- case INDEX_op_st16_i32:
- case INDEX_op_st16_i64:
- tcg_out_mem_long(s, STH, STHX, args[0], args[1], args[2]);
- break;
- case INDEX_op_st_i32:
- case INDEX_op_st32_i64:
- tcg_out_mem_long(s, STW, STWX, args[0], args[1], args[2]);
- break;
- case INDEX_op_st_i64:
- tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]);
- break;
- case INDEX_op_add_i32:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- do_addi_32:
- tcg_out_mem_long(s, ADDI, ADD, a0, a1, (int32_t)a2);
- } else {
- tcg_out32(s, ADD | TAB(a0, a1, a2));
- }
- break;
- case INDEX_op_sub_i32:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[1]) {
- if (const_args[2]) {
- tcg_out_movi(s, TCG_TYPE_I32, a0, a1 - a2);
- } else {
- tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
- }
- } else if (const_args[2]) {
- a2 = -a2;
- goto do_addi_32;
- } else {
- tcg_out32(s, SUBF | TAB(a0, a2, a1));
- }
- break;
- case INDEX_op_and_i32:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tcg_out_andi32(s, a0, a1, a2);
- } else {
- tcg_out32(s, AND | SAB(a1, a0, a2));
- }
- break;
- case INDEX_op_and_i64:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tcg_out_andi64(s, a0, a1, a2);
- } else {
- tcg_out32(s, AND | SAB(a1, a0, a2));
- }
- break;
- case INDEX_op_or_i64:
- case INDEX_op_or_i32:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tcg_out_ori32(s, a0, a1, a2);
- } else {
- tcg_out32(s, OR | SAB(a1, a0, a2));
- }
- break;
- case INDEX_op_xor_i64:
- case INDEX_op_xor_i32:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tcg_out_xori32(s, a0, a1, a2);
- } else {
- tcg_out32(s, XOR | SAB(a1, a0, a2));
- }
- break;
- case INDEX_op_andc_i32:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tcg_out_andi32(s, a0, a1, ~a2);
- } else {
- tcg_out32(s, ANDC | SAB(a1, a0, a2));
- }
- break;
- case INDEX_op_andc_i64:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tcg_out_andi64(s, a0, a1, ~a2);
- } else {
- tcg_out32(s, ANDC | SAB(a1, a0, a2));
- }
- break;
- case INDEX_op_orc_i32:
- if (const_args[2]) {
- tcg_out_ori32(s, args[0], args[1], ~args[2]);
- break;
- }
- /* FALLTHRU */
- case INDEX_op_orc_i64:
- tcg_out32(s, ORC | SAB(args[1], args[0], args[2]));
- break;
- case INDEX_op_eqv_i32:
- if (const_args[2]) {
- tcg_out_xori32(s, args[0], args[1], ~args[2]);
- break;
- }
- /* FALLTHRU */
- case INDEX_op_eqv_i64:
- tcg_out32(s, EQV | SAB(args[1], args[0], args[2]));
- break;
- case INDEX_op_nand_i32:
- case INDEX_op_nand_i64:
- tcg_out32(s, NAND | SAB(args[1], args[0], args[2]));
- break;
- case INDEX_op_nor_i32:
- case INDEX_op_nor_i64:
- tcg_out32(s, NOR | SAB(args[1], args[0], args[2]));
- break;
- case INDEX_op_clz_i32:
- tcg_out_cntxz(s, TCG_TYPE_I32, CNTLZW, args[0], args[1],
- args[2], const_args[2]);
- break;
- case INDEX_op_ctz_i32:
- tcg_out_cntxz(s, TCG_TYPE_I32, CNTTZW, args[0], args[1],
- args[2], const_args[2]);
- break;
- case INDEX_op_ctpop_i32:
- tcg_out32(s, CNTPOPW | SAB(args[1], args[0], 0));
- break;
- case INDEX_op_clz_i64:
- tcg_out_cntxz(s, TCG_TYPE_I64, CNTLZD, args[0], args[1],
- args[2], const_args[2]);
- break;
- case INDEX_op_ctz_i64:
- tcg_out_cntxz(s, TCG_TYPE_I64, CNTTZD, args[0], args[1],
- args[2], const_args[2]);
- break;
- case INDEX_op_ctpop_i64:
- tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0));
- break;
- case INDEX_op_mul_i32:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tcg_out32(s, MULLI | TAI(a0, a1, a2));
- } else {
- tcg_out32(s, MULLW | TAB(a0, a1, a2));
- }
- break;
- case INDEX_op_div_i32:
- tcg_out32(s, DIVW | TAB(args[0], args[1], args[2]));
- break;
- case INDEX_op_divu_i32:
- tcg_out32(s, DIVWU | TAB(args[0], args[1], args[2]));
- break;
- case INDEX_op_shl_i32:
- if (const_args[2]) {
- /* Limit immediate shift count lest we create an illegal insn. */
- tcg_out_shli32(s, args[0], args[1], args[2] & 31);
- } else {
- tcg_out32(s, SLW | SAB(args[1], args[0], args[2]));
- }
- break;
- case INDEX_op_shr_i32:
- if (const_args[2]) {
- /* Limit immediate shift count lest we create an illegal insn. */
- tcg_out_shri32(s, args[0], args[1], args[2] & 31);
- } else {
- tcg_out32(s, SRW | SAB(args[1], args[0], args[2]));
- }
- break;
- case INDEX_op_sar_i32:
- if (const_args[2]) {
- /* Limit immediate shift count lest we create an illegal insn. */
- tcg_out32(s, SRAWI | RS(args[1]) | RA(args[0]) | SH(args[2] & 31));
- } else {
- tcg_out32(s, SRAW | SAB(args[1], args[0], args[2]));
- }
- break;
- case INDEX_op_rotl_i32:
- if (const_args[2]) {
- tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31);
- } else {
- tcg_out32(s, RLWNM | SAB(args[1], args[0], args[2])
- | MB(0) | ME(31));
- }
- break;
- case INDEX_op_rotr_i32:
- if (const_args[2]) {
- tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], 0, 31);
- } else {
- tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 32));
- tcg_out32(s, RLWNM | SAB(args[1], args[0], TCG_REG_R0)
- | MB(0) | ME(31));
- }
- break;
- case INDEX_op_brcond_i32:
- tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
- arg_label(args[3]), TCG_TYPE_I32);
- break;
- case INDEX_op_brcond_i64:
- tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
- arg_label(args[3]), TCG_TYPE_I64);
- break;
- case INDEX_op_brcond2_i32:
- tcg_out_brcond2(s, args, const_args);
- break;
- case INDEX_op_neg_i32:
- case INDEX_op_neg_i64:
- tcg_out32(s, NEG | RT(args[0]) | RA(args[1]));
- break;
- case INDEX_op_not_i32:
- case INDEX_op_not_i64:
- tcg_out32(s, NOR | SAB(args[1], args[0], args[1]));
- break;
- case INDEX_op_add_i64:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- do_addi_64:
- tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2);
- } else {
- tcg_out32(s, ADD | TAB(a0, a1, a2));
- }
- break;
- case INDEX_op_sub_i64:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[1]) {
- if (const_args[2]) {
- tcg_out_movi(s, TCG_TYPE_I64, a0, a1 - a2);
- } else {
- tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
- }
- } else if (const_args[2]) {
- a2 = -a2;
- goto do_addi_64;
- } else {
- tcg_out32(s, SUBF | TAB(a0, a2, a1));
- }
- break;
- case INDEX_op_shl_i64:
- if (const_args[2]) {
- /* Limit immediate shift count lest we create an illegal insn. */
- tcg_out_shli64(s, args[0], args[1], args[2] & 63);
- } else {
- tcg_out32(s, SLD | SAB(args[1], args[0], args[2]));
- }
- break;
- case INDEX_op_shr_i64:
- if (const_args[2]) {
- /* Limit immediate shift count lest we create an illegal insn. */
- tcg_out_shri64(s, args[0], args[1], args[2] & 63);
- } else {
- tcg_out32(s, SRD | SAB(args[1], args[0], args[2]));
- }
- break;
- case INDEX_op_sar_i64:
- if (const_args[2]) {
- int sh = SH(args[2] & 0x1f) | (((args[2] >> 5) & 1) << 1);
- tcg_out32(s, SRADI | RA(args[0]) | RS(args[1]) | sh);
- } else {
- tcg_out32(s, SRAD | SAB(args[1], args[0], args[2]));
- }
- break;
- case INDEX_op_rotl_i64:
- if (const_args[2]) {
- tcg_out_rld(s, RLDICL, args[0], args[1], args[2], 0);
- } else {
- tcg_out32(s, RLDCL | SAB(args[1], args[0], args[2]) | MB64(0));
- }
- break;
- case INDEX_op_rotr_i64:
- if (const_args[2]) {
- tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 0);
- } else {
- tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 64));
- tcg_out32(s, RLDCL | SAB(args[1], args[0], TCG_REG_R0) | MB64(0));
- }
- break;
- case INDEX_op_mul_i64:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tcg_out32(s, MULLI | TAI(a0, a1, a2));
- } else {
- tcg_out32(s, MULLD | TAB(a0, a1, a2));
- }
- break;
- case INDEX_op_div_i64:
- tcg_out32(s, DIVD | TAB(args[0], args[1], args[2]));
- break;
- case INDEX_op_divu_i64:
- tcg_out32(s, DIVDU | TAB(args[0], args[1], args[2]));
- break;
- case INDEX_op_qemu_ld_i32:
- tcg_out_qemu_ld(s, args, false);
- break;
- case INDEX_op_qemu_ld_i64:
- tcg_out_qemu_ld(s, args, true);
- break;
- case INDEX_op_qemu_st_i32:
- tcg_out_qemu_st(s, args, false);
- break;
- case INDEX_op_qemu_st_i64:
- tcg_out_qemu_st(s, args, true);
- break;
- case INDEX_op_ext8s_i32:
- case INDEX_op_ext8s_i64:
- c = EXTSB;
- goto gen_ext;
- case INDEX_op_ext16s_i32:
- case INDEX_op_ext16s_i64:
- c = EXTSH;
- goto gen_ext;
- case INDEX_op_ext_i32_i64:
- case INDEX_op_ext32s_i64:
- c = EXTSW;
- goto gen_ext;
- gen_ext:
- tcg_out32(s, c | RS(args[1]) | RA(args[0]));
- break;
- case INDEX_op_extu_i32_i64:
- tcg_out_ext32u(s, args[0], args[1]);
- break;
- case INDEX_op_setcond_i32:
- tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2],
- const_args[2]);
- break;
- case INDEX_op_setcond_i64:
- tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2],
- const_args[2]);
- break;
- case INDEX_op_setcond2_i32:
- tcg_out_setcond2(s, args, const_args);
- break;
- case INDEX_op_bswap16_i32:
- case INDEX_op_bswap16_i64:
- a0 = args[0], a1 = args[1];
- /* a1 = abcd */
- if (a0 != a1) {
- /* a0 = (a1 r<< 24) & 0xff # 000c */
- tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31);
- /* a0 = (a0 & ~0xff00) | (a1 r<< 8) & 0xff00 # 00dc */
- tcg_out_rlw(s, RLWIMI, a0, a1, 8, 16, 23);
- } else {
- /* r0 = (a1 r<< 8) & 0xff00 # 00d0 */
- tcg_out_rlw(s, RLWINM, TCG_REG_R0, a1, 8, 16, 23);
- /* a0 = (a1 r<< 24) & 0xff # 000c */
- tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31);
- /* a0 = a0 | r0 # 00dc */
- tcg_out32(s, OR | SAB(TCG_REG_R0, a0, a0));
- }
- break;
- case INDEX_op_bswap32_i32:
- case INDEX_op_bswap32_i64:
- /* Stolen from gcc's builtin_bswap32 */
- a1 = args[1];
- a0 = args[0] == a1 ? TCG_REG_R0 : args[0];
- /* a1 = args[1] # abcd */
- /* a0 = rotate_left (a1, 8) # bcda */
- tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31);
- /* a0 = (a0 & ~0xff000000) | ((a1 r<< 24) & 0xff000000) # dcda */
- tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7);
- /* a0 = (a0 & ~0x0000ff00) | ((a1 r<< 24) & 0x0000ff00) # dcba */
- tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23);
- if (a0 == TCG_REG_R0) {
- tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
- }
- break;
- case INDEX_op_bswap64_i64:
- a0 = args[0], a1 = args[1], a2 = TCG_REG_R0;
- if (a0 == a1) {
- a0 = TCG_REG_R0;
- a2 = a1;
- }
- /* a1 = # abcd efgh */
- /* a0 = rl32(a1, 8) # 0000 fghe */
- tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31);
- /* a0 = dep(a0, rl32(a1, 24), 0xff000000) # 0000 hghe */
- tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7);
- /* a0 = dep(a0, rl32(a1, 24), 0x0000ff00) # 0000 hgfe */
- tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23);
- /* a0 = rl64(a0, 32) # hgfe 0000 */
- /* a2 = rl64(a1, 32) # efgh abcd */
- tcg_out_rld(s, RLDICL, a0, a0, 32, 0);
- tcg_out_rld(s, RLDICL, a2, a1, 32, 0);
- /* a0 = dep(a0, rl32(a2, 8), 0xffffffff) # hgfe bcda */
- tcg_out_rlw(s, RLWIMI, a0, a2, 8, 0, 31);
- /* a0 = dep(a0, rl32(a2, 24), 0xff000000) # hgfe dcda */
- tcg_out_rlw(s, RLWIMI, a0, a2, 24, 0, 7);
- /* a0 = dep(a0, rl32(a2, 24), 0x0000ff00) # hgfe dcba */
- tcg_out_rlw(s, RLWIMI, a0, a2, 24, 16, 23);
- if (a0 == 0) {
- tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
- }
- break;
- case INDEX_op_deposit_i32:
- if (const_args[2]) {
- uint32_t mask = ((2u << (args[4] - 1)) - 1) << args[3];
- tcg_out_andi32(s, args[0], args[0], ~mask);
- } else {
- tcg_out_rlw(s, RLWIMI, args[0], args[2], args[3],
- 32 - args[3] - args[4], 31 - args[3]);
- }
- break;
- case INDEX_op_deposit_i64:
- if (const_args[2]) {
- uint64_t mask = ((2ull << (args[4] - 1)) - 1) << args[3];
- tcg_out_andi64(s, args[0], args[0], ~mask);
- } else {
- tcg_out_rld(s, RLDIMI, args[0], args[2], args[3],
- 64 - args[3] - args[4]);
- }
- break;
- case INDEX_op_extract_i32:
- tcg_out_rlw(s, RLWINM, args[0], args[1],
- 32 - args[2], 32 - args[3], 31);
- break;
- case INDEX_op_extract_i64:
- tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 64 - args[3]);
- break;
- case INDEX_op_movcond_i32:
- tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2],
- args[3], args[4], const_args[2]);
- break;
- case INDEX_op_movcond_i64:
- tcg_out_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], args[2],
- args[3], args[4], const_args[2]);
- break;
- #if TCG_TARGET_REG_BITS == 64
- case INDEX_op_add2_i64:
- #else
- case INDEX_op_add2_i32:
- #endif
- /* Note that the CA bit is defined based on the word size of the
- environment. So in 64-bit mode it's always carry-out of bit 63.
- The fallback code using deposit works just as well for 32-bit. */
- a0 = args[0], a1 = args[1];
- if (a0 == args[3] || (!const_args[5] && a0 == args[5])) {
- a0 = TCG_REG_R0;
- }
- if (const_args[4]) {
- tcg_out32(s, ADDIC | TAI(a0, args[2], args[4]));
- } else {
- tcg_out32(s, ADDC | TAB(a0, args[2], args[4]));
- }
- if (const_args[5]) {
- tcg_out32(s, (args[5] ? ADDME : ADDZE) | RT(a1) | RA(args[3]));
- } else {
- tcg_out32(s, ADDE | TAB(a1, args[3], args[5]));
- }
- if (a0 != args[0]) {
- tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
- }
- break;
- #if TCG_TARGET_REG_BITS == 64
- case INDEX_op_sub2_i64:
- #else
- case INDEX_op_sub2_i32:
- #endif
- a0 = args[0], a1 = args[1];
- if (a0 == args[5] || (!const_args[3] && a0 == args[3])) {
- a0 = TCG_REG_R0;
- }
- if (const_args[2]) {
- tcg_out32(s, SUBFIC | TAI(a0, args[4], args[2]));
- } else {
- tcg_out32(s, SUBFC | TAB(a0, args[4], args[2]));
- }
- if (const_args[3]) {
- tcg_out32(s, (args[3] ? SUBFME : SUBFZE) | RT(a1) | RA(args[5]));
- } else {
- tcg_out32(s, SUBFE | TAB(a1, args[5], args[3]));
- }
- if (a0 != args[0]) {
- tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
- }
- break;
- case INDEX_op_muluh_i32:
- tcg_out32(s, MULHWU | TAB(args[0], args[1], args[2]));
- break;
- case INDEX_op_mulsh_i32:
- tcg_out32(s, MULHW | TAB(args[0], args[1], args[2]));
- break;
- case INDEX_op_muluh_i64:
- tcg_out32(s, MULHDU | TAB(args[0], args[1], args[2]));
- break;
- case INDEX_op_mulsh_i64:
- tcg_out32(s, MULHD | TAB(args[0], args[1], args[2]));
- break;
- case INDEX_op_mb:
- tcg_out_mb(s, args[0]);
- break;
- case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
- case INDEX_op_mov_i64:
- case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
- case INDEX_op_movi_i64:
- case INDEX_op_call: /* Always emitted via tcg_out_call. */
- default:
- tcg_abort();
- }
- }
- int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
- {
- switch (opc) {
- case INDEX_op_and_vec:
- case INDEX_op_or_vec:
- case INDEX_op_xor_vec:
- case INDEX_op_andc_vec:
- case INDEX_op_not_vec:
- return 1;
- case INDEX_op_orc_vec:
- return have_isa_2_07;
- case INDEX_op_add_vec:
- case INDEX_op_sub_vec:
- case INDEX_op_smax_vec:
- case INDEX_op_smin_vec:
- case INDEX_op_umax_vec:
- case INDEX_op_umin_vec:
- case INDEX_op_shlv_vec:
- case INDEX_op_shrv_vec:
- case INDEX_op_sarv_vec:
- case INDEX_op_rotlv_vec:
- return vece <= MO_32 || have_isa_2_07;
- case INDEX_op_ssadd_vec:
- case INDEX_op_sssub_vec:
- case INDEX_op_usadd_vec:
- case INDEX_op_ussub_vec:
- return vece <= MO_32;
- case INDEX_op_cmp_vec:
- case INDEX_op_shli_vec:
- case INDEX_op_shri_vec:
- case INDEX_op_sari_vec:
- case INDEX_op_rotli_vec:
- return vece <= MO_32 || have_isa_2_07 ? -1 : 0;
- case INDEX_op_neg_vec:
- return vece >= MO_32 && have_isa_3_00;
- case INDEX_op_mul_vec:
- switch (vece) {
- case MO_8:
- case MO_16:
- return -1;
- case MO_32:
- return have_isa_2_07 ? 1 : -1;
- }
- return 0;
- case INDEX_op_bitsel_vec:
- return have_vsx;
- case INDEX_op_rotrv_vec:
- return -1;
- default:
- return 0;
- }
- }
- static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
- TCGReg dst, TCGReg src)
- {
- tcg_debug_assert(dst >= TCG_REG_V0);
- /* Splat from integer reg allowed via constraints for v3.00. */
- if (src < TCG_REG_V0) {
- tcg_debug_assert(have_isa_3_00);
- switch (vece) {
- case MO_64:
- tcg_out32(s, MTVSRDD | VRT(dst) | RA(src) | RB(src));
- return true;
- case MO_32:
- tcg_out32(s, MTVSRWS | VRT(dst) | RA(src));
- return true;
- default:
- /* Fail, so that we fall back on either dupm or mov+dup. */
- return false;
- }
- }
- /*
- * Recall we use (or emulate) VSX integer loads, so the integer is
- * right justified within the left (zero-index) double-word.
- */
- switch (vece) {
- case MO_8:
- tcg_out32(s, VSPLTB | VRT(dst) | VRB(src) | (7 << 16));
- break;
- case MO_16:
- tcg_out32(s, VSPLTH | VRT(dst) | VRB(src) | (3 << 16));
- break;
- case MO_32:
- tcg_out32(s, VSPLTW | VRT(dst) | VRB(src) | (1 << 16));
- break;
- case MO_64:
- if (have_vsx) {
- tcg_out32(s, XXPERMDI | VRT(dst) | VRA(src) | VRB(src));
- break;
- }
- tcg_out_vsldoi(s, TCG_VEC_TMP1, src, src, 8);
- tcg_out_vsldoi(s, dst, TCG_VEC_TMP1, src, 8);
- break;
- default:
- g_assert_not_reached();
- }
- return true;
- }
- static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
- TCGReg out, TCGReg base, intptr_t offset)
- {
- int elt;
- tcg_debug_assert(out >= TCG_REG_V0);
- switch (vece) {
- case MO_8:
- if (have_isa_3_00) {
- tcg_out_mem_long(s, LXV, LVX, out, base, offset & -16);
- } else {
- tcg_out_mem_long(s, 0, LVEBX, out, base, offset);
- }
- elt = extract32(offset, 0, 4);
- #ifndef HOST_WORDS_BIGENDIAN
- elt ^= 15;
- #endif
- tcg_out32(s, VSPLTB | VRT(out) | VRB(out) | (elt << 16));
- break;
- case MO_16:
- tcg_debug_assert((offset & 1) == 0);
- if (have_isa_3_00) {
- tcg_out_mem_long(s, LXV | 8, LVX, out, base, offset & -16);
- } else {
- tcg_out_mem_long(s, 0, LVEHX, out, base, offset);
- }
- elt = extract32(offset, 1, 3);
- #ifndef HOST_WORDS_BIGENDIAN
- elt ^= 7;
- #endif
- tcg_out32(s, VSPLTH | VRT(out) | VRB(out) | (elt << 16));
- break;
- case MO_32:
- if (have_isa_3_00) {
- tcg_out_mem_long(s, 0, LXVWSX, out, base, offset);
- break;
- }
- tcg_debug_assert((offset & 3) == 0);
- tcg_out_mem_long(s, 0, LVEWX, out, base, offset);
- elt = extract32(offset, 2, 2);
- #ifndef HOST_WORDS_BIGENDIAN
- elt ^= 3;
- #endif
- tcg_out32(s, VSPLTW | VRT(out) | VRB(out) | (elt << 16));
- break;
- case MO_64:
- if (have_vsx) {
- tcg_out_mem_long(s, 0, LXVDSX, out, base, offset);
- break;
- }
- tcg_debug_assert((offset & 7) == 0);
- tcg_out_mem_long(s, 0, LVX, out, base, offset & -16);
- tcg_out_vsldoi(s, TCG_VEC_TMP1, out, out, 8);
- elt = extract32(offset, 3, 1);
- #ifndef HOST_WORDS_BIGENDIAN
- elt = !elt;
- #endif
- if (elt) {
- tcg_out_vsldoi(s, out, out, TCG_VEC_TMP1, 8);
- } else {
- tcg_out_vsldoi(s, out, TCG_VEC_TMP1, out, 8);
- }
- break;
- default:
- g_assert_not_reached();
- }
- return true;
- }
- static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
- unsigned vecl, unsigned vece,
- const TCGArg *args, const int *const_args)
- {
- static const uint32_t
- add_op[4] = { VADDUBM, VADDUHM, VADDUWM, VADDUDM },
- sub_op[4] = { VSUBUBM, VSUBUHM, VSUBUWM, VSUBUDM },
- neg_op[4] = { 0, 0, VNEGW, VNEGD },
- eq_op[4] = { VCMPEQUB, VCMPEQUH, VCMPEQUW, VCMPEQUD },
- ne_op[4] = { VCMPNEB, VCMPNEH, VCMPNEW, 0 },
- gts_op[4] = { VCMPGTSB, VCMPGTSH, VCMPGTSW, VCMPGTSD },
- gtu_op[4] = { VCMPGTUB, VCMPGTUH, VCMPGTUW, VCMPGTUD },
- ssadd_op[4] = { VADDSBS, VADDSHS, VADDSWS, 0 },
- usadd_op[4] = { VADDUBS, VADDUHS, VADDUWS, 0 },
- sssub_op[4] = { VSUBSBS, VSUBSHS, VSUBSWS, 0 },
- ussub_op[4] = { VSUBUBS, VSUBUHS, VSUBUWS, 0 },
- umin_op[4] = { VMINUB, VMINUH, VMINUW, VMINUD },
- smin_op[4] = { VMINSB, VMINSH, VMINSW, VMINSD },
- umax_op[4] = { VMAXUB, VMAXUH, VMAXUW, VMAXUD },
- smax_op[4] = { VMAXSB, VMAXSH, VMAXSW, VMAXSD },
- shlv_op[4] = { VSLB, VSLH, VSLW, VSLD },
- shrv_op[4] = { VSRB, VSRH, VSRW, VSRD },
- sarv_op[4] = { VSRAB, VSRAH, VSRAW, VSRAD },
- mrgh_op[4] = { VMRGHB, VMRGHH, VMRGHW, 0 },
- mrgl_op[4] = { VMRGLB, VMRGLH, VMRGLW, 0 },
- muleu_op[4] = { VMULEUB, VMULEUH, VMULEUW, 0 },
- mulou_op[4] = { VMULOUB, VMULOUH, VMULOUW, 0 },
- pkum_op[4] = { VPKUHUM, VPKUWUM, 0, 0 },
- rotl_op[4] = { VRLB, VRLH, VRLW, VRLD };
- TCGType type = vecl + TCG_TYPE_V64;
- TCGArg a0 = args[0], a1 = args[1], a2 = args[2];
- uint32_t insn;
- switch (opc) {
- case INDEX_op_ld_vec:
- tcg_out_ld(s, type, a0, a1, a2);
- return;
- case INDEX_op_st_vec:
- tcg_out_st(s, type, a0, a1, a2);
- return;
- case INDEX_op_dupm_vec:
- tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
- return;
- case INDEX_op_add_vec:
- insn = add_op[vece];
- break;
- case INDEX_op_sub_vec:
- insn = sub_op[vece];
- break;
- case INDEX_op_neg_vec:
- insn = neg_op[vece];
- a2 = a1;
- a1 = 0;
- break;
- case INDEX_op_mul_vec:
- tcg_debug_assert(vece == MO_32 && have_isa_2_07);
- insn = VMULUWM;
- break;
- case INDEX_op_ssadd_vec:
- insn = ssadd_op[vece];
- break;
- case INDEX_op_sssub_vec:
- insn = sssub_op[vece];
- break;
- case INDEX_op_usadd_vec:
- insn = usadd_op[vece];
- break;
- case INDEX_op_ussub_vec:
- insn = ussub_op[vece];
- break;
- case INDEX_op_smin_vec:
- insn = smin_op[vece];
- break;
- case INDEX_op_umin_vec:
- insn = umin_op[vece];
- break;
- case INDEX_op_smax_vec:
- insn = smax_op[vece];
- break;
- case INDEX_op_umax_vec:
- insn = umax_op[vece];
- break;
- case INDEX_op_shlv_vec:
- insn = shlv_op[vece];
- break;
- case INDEX_op_shrv_vec:
- insn = shrv_op[vece];
- break;
- case INDEX_op_sarv_vec:
- insn = sarv_op[vece];
- break;
- case INDEX_op_and_vec:
- insn = VAND;
- break;
- case INDEX_op_or_vec:
- insn = VOR;
- break;
- case INDEX_op_xor_vec:
- insn = VXOR;
- break;
- case INDEX_op_andc_vec:
- insn = VANDC;
- break;
- case INDEX_op_not_vec:
- insn = VNOR;
- a2 = a1;
- break;
- case INDEX_op_orc_vec:
- insn = VORC;
- break;
- case INDEX_op_cmp_vec:
- switch (args[3]) {
- case TCG_COND_EQ:
- insn = eq_op[vece];
- break;
- case TCG_COND_NE:
- insn = ne_op[vece];
- break;
- case TCG_COND_GT:
- insn = gts_op[vece];
- break;
- case TCG_COND_GTU:
- insn = gtu_op[vece];
- break;
- default:
- g_assert_not_reached();
- }
- break;
- case INDEX_op_bitsel_vec:
- tcg_out32(s, XXSEL | VRT(a0) | VRC(a1) | VRB(a2) | VRA(args[3]));
- return;
- case INDEX_op_dup2_vec:
- assert(TCG_TARGET_REG_BITS == 32);
- /* With inputs a1 = xLxx, a2 = xHxx */
- tcg_out32(s, VMRGHW | VRT(a0) | VRA(a2) | VRB(a1)); /* a0 = xxHL */
- tcg_out_vsldoi(s, TCG_VEC_TMP1, a0, a0, 8); /* tmp = HLxx */
- tcg_out_vsldoi(s, a0, a0, TCG_VEC_TMP1, 8); /* a0 = HLHL */
- return;
- case INDEX_op_ppc_mrgh_vec:
- insn = mrgh_op[vece];
- break;
- case INDEX_op_ppc_mrgl_vec:
- insn = mrgl_op[vece];
- break;
- case INDEX_op_ppc_muleu_vec:
- insn = muleu_op[vece];
- break;
- case INDEX_op_ppc_mulou_vec:
- insn = mulou_op[vece];
- break;
- case INDEX_op_ppc_pkum_vec:
- insn = pkum_op[vece];
- break;
- case INDEX_op_rotlv_vec:
- insn = rotl_op[vece];
- break;
- case INDEX_op_ppc_msum_vec:
- tcg_debug_assert(vece == MO_16);
- tcg_out32(s, VMSUMUHM | VRT(a0) | VRA(a1) | VRB(a2) | VRC(args[3]));
- return;
- case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
- case INDEX_op_dupi_vec: /* Always emitted via tcg_out_movi. */
- case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
- default:
- g_assert_not_reached();
- }
- tcg_debug_assert(insn != 0);
- tcg_out32(s, insn | VRT(a0) | VRA(a1) | VRB(a2));
- }
- static void expand_vec_shi(TCGType type, unsigned vece, TCGv_vec v0,
- TCGv_vec v1, TCGArg imm, TCGOpcode opci)
- {
- TCGv_vec t1 = tcg_temp_new_vec(type);
- /* Splat w/bytes for xxspltib. */
- tcg_gen_dupi_vec(MO_8, t1, imm & ((8 << vece) - 1));
- vec_gen_3(opci, type, vece, tcgv_vec_arg(v0),
- tcgv_vec_arg(v1), tcgv_vec_arg(t1));
- tcg_temp_free_vec(t1);
- }
- static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0,
- TCGv_vec v1, TCGv_vec v2, TCGCond cond)
- {
- bool need_swap = false, need_inv = false;
- tcg_debug_assert(vece <= MO_32 || have_isa_2_07);
- switch (cond) {
- case TCG_COND_EQ:
- case TCG_COND_GT:
- case TCG_COND_GTU:
- break;
- case TCG_COND_NE:
- if (have_isa_3_00 && vece <= MO_32) {
- break;
- }
- /* fall through */
- case TCG_COND_LE:
- case TCG_COND_LEU:
- need_inv = true;
- break;
- case TCG_COND_LT:
- case TCG_COND_LTU:
- need_swap = true;
- break;
- case TCG_COND_GE:
- case TCG_COND_GEU:
- need_swap = need_inv = true;
- break;
- default:
- g_assert_not_reached();
- }
- if (need_inv) {
- cond = tcg_invert_cond(cond);
- }
- if (need_swap) {
- TCGv_vec t1;
- t1 = v1, v1 = v2, v2 = t1;
- cond = tcg_swap_cond(cond);
- }
- vec_gen_4(INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(v0),
- tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond);
- if (need_inv) {
- tcg_gen_not_vec(vece, v0, v0);
- }
- }
- static void expand_vec_mul(TCGType type, unsigned vece, TCGv_vec v0,
- TCGv_vec v1, TCGv_vec v2)
- {
- TCGv_vec t1 = tcg_temp_new_vec(type);
- TCGv_vec t2 = tcg_temp_new_vec(type);
- TCGv_vec t3, t4;
- switch (vece) {
- case MO_8:
- case MO_16:
- vec_gen_3(INDEX_op_ppc_muleu_vec, type, vece, tcgv_vec_arg(t1),
- tcgv_vec_arg(v1), tcgv_vec_arg(v2));
- vec_gen_3(INDEX_op_ppc_mulou_vec, type, vece, tcgv_vec_arg(t2),
- tcgv_vec_arg(v1), tcgv_vec_arg(v2));
- vec_gen_3(INDEX_op_ppc_mrgh_vec, type, vece + 1, tcgv_vec_arg(v0),
- tcgv_vec_arg(t1), tcgv_vec_arg(t2));
- vec_gen_3(INDEX_op_ppc_mrgl_vec, type, vece + 1, tcgv_vec_arg(t1),
- tcgv_vec_arg(t1), tcgv_vec_arg(t2));
- vec_gen_3(INDEX_op_ppc_pkum_vec, type, vece, tcgv_vec_arg(v0),
- tcgv_vec_arg(v0), tcgv_vec_arg(t1));
- break;
- case MO_32:
- tcg_debug_assert(!have_isa_2_07);
- t3 = tcg_temp_new_vec(type);
- t4 = tcg_temp_new_vec(type);
- tcg_gen_dupi_vec(MO_8, t4, -16);
- vec_gen_3(INDEX_op_rotlv_vec, type, MO_32, tcgv_vec_arg(t1),
- tcgv_vec_arg(v2), tcgv_vec_arg(t4));
- vec_gen_3(INDEX_op_ppc_mulou_vec, type, MO_16, tcgv_vec_arg(t2),
- tcgv_vec_arg(v1), tcgv_vec_arg(v2));
- tcg_gen_dupi_vec(MO_8, t3, 0);
- vec_gen_4(INDEX_op_ppc_msum_vec, type, MO_16, tcgv_vec_arg(t3),
- tcgv_vec_arg(v1), tcgv_vec_arg(t1), tcgv_vec_arg(t3));
- vec_gen_3(INDEX_op_shlv_vec, type, MO_32, tcgv_vec_arg(t3),
- tcgv_vec_arg(t3), tcgv_vec_arg(t4));
- tcg_gen_add_vec(MO_32, v0, t2, t3);
- tcg_temp_free_vec(t3);
- tcg_temp_free_vec(t4);
- break;
- default:
- g_assert_not_reached();
- }
- tcg_temp_free_vec(t1);
- tcg_temp_free_vec(t2);
- }
- void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
- TCGArg a0, ...)
- {
- va_list va;
- TCGv_vec v0, v1, v2, t0;
- TCGArg a2;
- va_start(va, a0);
- v0 = temp_tcgv_vec(arg_temp(a0));
- v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
- a2 = va_arg(va, TCGArg);
- switch (opc) {
- case INDEX_op_shli_vec:
- expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shlv_vec);
- break;
- case INDEX_op_shri_vec:
- expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shrv_vec);
- break;
- case INDEX_op_sari_vec:
- expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_sarv_vec);
- break;
- case INDEX_op_rotli_vec:
- expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_rotlv_vec);
- break;
- case INDEX_op_cmp_vec:
- v2 = temp_tcgv_vec(arg_temp(a2));
- expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg));
- break;
- case INDEX_op_mul_vec:
- v2 = temp_tcgv_vec(arg_temp(a2));
- expand_vec_mul(type, vece, v0, v1, v2);
- break;
- case INDEX_op_rotlv_vec:
- v2 = temp_tcgv_vec(arg_temp(a2));
- t0 = tcg_temp_new_vec(type);
- tcg_gen_neg_vec(vece, t0, v2);
- tcg_gen_rotlv_vec(vece, v0, v1, t0);
- tcg_temp_free_vec(t0);
- break;
- default:
- g_assert_not_reached();
- }
- va_end(va);
- }
- static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
- {
- static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
- static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
- static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } };
- static const TCGTargetOpDef S_S = { .args_ct_str = { "S", "S" } };
- static const TCGTargetOpDef r_ri = { .args_ct_str = { "r", "ri" } };
- static const TCGTargetOpDef r_r_r = { .args_ct_str = { "r", "r", "r" } };
- static const TCGTargetOpDef r_L_L = { .args_ct_str = { "r", "L", "L" } };
- static const TCGTargetOpDef L_L_L = { .args_ct_str = { "L", "L", "L" } };
- static const TCGTargetOpDef S_S_S = { .args_ct_str = { "S", "S", "S" } };
- static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } };
- static const TCGTargetOpDef r_r_rI = { .args_ct_str = { "r", "r", "rI" } };
- static const TCGTargetOpDef r_r_rT = { .args_ct_str = { "r", "r", "rT" } };
- static const TCGTargetOpDef r_r_rU = { .args_ct_str = { "r", "r", "rU" } };
- static const TCGTargetOpDef r_rI_ri
- = { .args_ct_str = { "r", "rI", "ri" } };
- static const TCGTargetOpDef r_rI_rT
- = { .args_ct_str = { "r", "rI", "rT" } };
- static const TCGTargetOpDef r_r_rZW
- = { .args_ct_str = { "r", "r", "rZW" } };
- static const TCGTargetOpDef L_L_L_L
- = { .args_ct_str = { "L", "L", "L", "L" } };
- static const TCGTargetOpDef S_S_S_S
- = { .args_ct_str = { "S", "S", "S", "S" } };
- static const TCGTargetOpDef movc
- = { .args_ct_str = { "r", "r", "ri", "rZ", "rZ" } };
- static const TCGTargetOpDef dep
- = { .args_ct_str = { "r", "0", "rZ" } };
- static const TCGTargetOpDef br2
- = { .args_ct_str = { "r", "r", "ri", "ri" } };
- static const TCGTargetOpDef setc2
- = { .args_ct_str = { "r", "r", "r", "ri", "ri" } };
- static const TCGTargetOpDef add2
- = { .args_ct_str = { "r", "r", "r", "r", "rI", "rZM" } };
- static const TCGTargetOpDef sub2
- = { .args_ct_str = { "r", "r", "rI", "rZM", "r", "r" } };
- static const TCGTargetOpDef v_r = { .args_ct_str = { "v", "r" } };
- static const TCGTargetOpDef v_vr = { .args_ct_str = { "v", "vr" } };
- static const TCGTargetOpDef v_v = { .args_ct_str = { "v", "v" } };
- static const TCGTargetOpDef v_v_v = { .args_ct_str = { "v", "v", "v" } };
- static const TCGTargetOpDef v_v_v_v
- = { .args_ct_str = { "v", "v", "v", "v" } };
- switch (op) {
- case INDEX_op_goto_ptr:
- return &r;
- case INDEX_op_ld8u_i32:
- case INDEX_op_ld8s_i32:
- case INDEX_op_ld16u_i32:
- case INDEX_op_ld16s_i32:
- case INDEX_op_ld_i32:
- case INDEX_op_st8_i32:
- case INDEX_op_st16_i32:
- case INDEX_op_st_i32:
- case INDEX_op_ctpop_i32:
- case INDEX_op_neg_i32:
- case INDEX_op_not_i32:
- case INDEX_op_ext8s_i32:
- case INDEX_op_ext16s_i32:
- case INDEX_op_bswap16_i32:
- case INDEX_op_bswap32_i32:
- case INDEX_op_extract_i32:
- case INDEX_op_ld8u_i64:
- case INDEX_op_ld8s_i64:
- case INDEX_op_ld16u_i64:
- case INDEX_op_ld16s_i64:
- case INDEX_op_ld32u_i64:
- case INDEX_op_ld32s_i64:
- case INDEX_op_ld_i64:
- case INDEX_op_st8_i64:
- case INDEX_op_st16_i64:
- case INDEX_op_st32_i64:
- case INDEX_op_st_i64:
- case INDEX_op_ctpop_i64:
- case INDEX_op_neg_i64:
- case INDEX_op_not_i64:
- case INDEX_op_ext8s_i64:
- case INDEX_op_ext16s_i64:
- case INDEX_op_ext32s_i64:
- case INDEX_op_ext_i32_i64:
- case INDEX_op_extu_i32_i64:
- case INDEX_op_bswap16_i64:
- case INDEX_op_bswap32_i64:
- case INDEX_op_bswap64_i64:
- case INDEX_op_extract_i64:
- return &r_r;
- case INDEX_op_add_i32:
- case INDEX_op_and_i32:
- case INDEX_op_or_i32:
- case INDEX_op_xor_i32:
- case INDEX_op_andc_i32:
- case INDEX_op_orc_i32:
- case INDEX_op_eqv_i32:
- case INDEX_op_shl_i32:
- case INDEX_op_shr_i32:
- case INDEX_op_sar_i32:
- case INDEX_op_rotl_i32:
- case INDEX_op_rotr_i32:
- case INDEX_op_setcond_i32:
- case INDEX_op_and_i64:
- case INDEX_op_andc_i64:
- case INDEX_op_shl_i64:
- case INDEX_op_shr_i64:
- case INDEX_op_sar_i64:
- case INDEX_op_rotl_i64:
- case INDEX_op_rotr_i64:
- case INDEX_op_setcond_i64:
- return &r_r_ri;
- case INDEX_op_mul_i32:
- case INDEX_op_mul_i64:
- return &r_r_rI;
- case INDEX_op_div_i32:
- case INDEX_op_divu_i32:
- case INDEX_op_nand_i32:
- case INDEX_op_nor_i32:
- case INDEX_op_muluh_i32:
- case INDEX_op_mulsh_i32:
- case INDEX_op_orc_i64:
- case INDEX_op_eqv_i64:
- case INDEX_op_nand_i64:
- case INDEX_op_nor_i64:
- case INDEX_op_div_i64:
- case INDEX_op_divu_i64:
- case INDEX_op_mulsh_i64:
- case INDEX_op_muluh_i64:
- return &r_r_r;
- case INDEX_op_sub_i32:
- return &r_rI_ri;
- case INDEX_op_add_i64:
- return &r_r_rT;
- case INDEX_op_or_i64:
- case INDEX_op_xor_i64:
- return &r_r_rU;
- case INDEX_op_sub_i64:
- return &r_rI_rT;
- case INDEX_op_clz_i32:
- case INDEX_op_ctz_i32:
- case INDEX_op_clz_i64:
- case INDEX_op_ctz_i64:
- return &r_r_rZW;
- case INDEX_op_brcond_i32:
- case INDEX_op_brcond_i64:
- return &r_ri;
- case INDEX_op_movcond_i32:
- case INDEX_op_movcond_i64:
- return &movc;
- case INDEX_op_deposit_i32:
- case INDEX_op_deposit_i64:
- return &dep;
- case INDEX_op_brcond2_i32:
- return &br2;
- case INDEX_op_setcond2_i32:
- return &setc2;
- case INDEX_op_add2_i64:
- case INDEX_op_add2_i32:
- return &add2;
- case INDEX_op_sub2_i64:
- case INDEX_op_sub2_i32:
- return &sub2;
- case INDEX_op_qemu_ld_i32:
- return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
- ? &r_L : &r_L_L);
- case INDEX_op_qemu_st_i32:
- return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
- ? &S_S : &S_S_S);
- case INDEX_op_qemu_ld_i64:
- return (TCG_TARGET_REG_BITS == 64 ? &r_L
- : TARGET_LONG_BITS == 32 ? &L_L_L : &L_L_L_L);
- case INDEX_op_qemu_st_i64:
- return (TCG_TARGET_REG_BITS == 64 ? &S_S
- : TARGET_LONG_BITS == 32 ? &S_S_S : &S_S_S_S);
- case INDEX_op_add_vec:
- case INDEX_op_sub_vec:
- case INDEX_op_mul_vec:
- case INDEX_op_and_vec:
- case INDEX_op_or_vec:
- case INDEX_op_xor_vec:
- case INDEX_op_andc_vec:
- case INDEX_op_orc_vec:
- case INDEX_op_cmp_vec:
- case INDEX_op_ssadd_vec:
- case INDEX_op_sssub_vec:
- case INDEX_op_usadd_vec:
- case INDEX_op_ussub_vec:
- case INDEX_op_smax_vec:
- case INDEX_op_smin_vec:
- case INDEX_op_umax_vec:
- case INDEX_op_umin_vec:
- case INDEX_op_shlv_vec:
- case INDEX_op_shrv_vec:
- case INDEX_op_sarv_vec:
- case INDEX_op_rotlv_vec:
- case INDEX_op_rotrv_vec:
- case INDEX_op_ppc_mrgh_vec:
- case INDEX_op_ppc_mrgl_vec:
- case INDEX_op_ppc_muleu_vec:
- case INDEX_op_ppc_mulou_vec:
- case INDEX_op_ppc_pkum_vec:
- case INDEX_op_dup2_vec:
- return &v_v_v;
- case INDEX_op_not_vec:
- case INDEX_op_neg_vec:
- return &v_v;
- case INDEX_op_dup_vec:
- return have_isa_3_00 ? &v_vr : &v_v;
- case INDEX_op_ld_vec:
- case INDEX_op_st_vec:
- case INDEX_op_dupm_vec:
- return &v_r;
- case INDEX_op_bitsel_vec:
- case INDEX_op_ppc_msum_vec:
- return &v_v_v_v;
- default:
- return NULL;
- }
- }
- static void tcg_target_init(TCGContext *s)
- {
- unsigned long hwcap = qemu_getauxval(AT_HWCAP);
- unsigned long hwcap2 = qemu_getauxval(AT_HWCAP2);
- have_isa = tcg_isa_base;
- if (hwcap & PPC_FEATURE_ARCH_2_06) {
- have_isa = tcg_isa_2_06;
- }
- #ifdef PPC_FEATURE2_ARCH_2_07
- if (hwcap2 & PPC_FEATURE2_ARCH_2_07) {
- have_isa = tcg_isa_2_07;
- }
- #endif
- #ifdef PPC_FEATURE2_ARCH_3_00
- if (hwcap2 & PPC_FEATURE2_ARCH_3_00) {
- have_isa = tcg_isa_3_00;
- }
- #endif
- #ifdef PPC_FEATURE2_HAS_ISEL
- /* Prefer explicit instruction from the kernel. */
- have_isel = (hwcap2 & PPC_FEATURE2_HAS_ISEL) != 0;
- #else
- /* Fall back to knowing Power7 (2.06) has ISEL. */
- have_isel = have_isa_2_06;
- #endif
- if (hwcap & PPC_FEATURE_HAS_ALTIVEC) {
- have_altivec = true;
- /* We only care about the portion of VSX that overlaps Altivec. */
- if (hwcap & PPC_FEATURE_HAS_VSX) {
- have_vsx = true;
- }
- }
- tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
- tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
- if (have_altivec) {
- tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
- tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull;
- }
- tcg_target_call_clobber_regs = 0;
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R7);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V2);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V3);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V4);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V5);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V6);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V7);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V8);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V9);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V10);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V11);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V12);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V13);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V14);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V15);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V16);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V17);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V18);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V19);
- s->reserved_regs = 0;
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* tcg temp */
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* stack pointer */
- #if defined(_CALL_SYSV)
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); /* toc pointer */
- #endif
- #if defined(_CALL_SYSV) || TCG_TARGET_REG_BITS == 64
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */
- #endif
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); /* mem temp */
- tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP1);
- tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP2);
- if (USE_REG_TB) {
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB); /* tb->tc_ptr */
- }
- }
- #ifdef __ELF__
- typedef struct {
- DebugFrameCIE cie;
- DebugFrameFDEHeader fde;
- uint8_t fde_def_cfa[4];
- uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2 + 3];
- } DebugFrame;
- /* We're expecting a 2 byte uleb128 encoded value. */
- QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
- #if TCG_TARGET_REG_BITS == 64
- # define ELF_HOST_MACHINE EM_PPC64
- #else
- # define ELF_HOST_MACHINE EM_PPC
- #endif
- static DebugFrame debug_frame = {
- .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
- .cie.id = -1,
- .cie.version = 1,
- .cie.code_align = 1,
- .cie.data_align = (-SZR & 0x7f), /* sleb128 -SZR */
- .cie.return_column = 65,
- /* Total FDE size does not include the "len" member. */
- .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
- .fde_def_cfa = {
- 12, TCG_REG_R1, /* DW_CFA_def_cfa r1, ... */
- (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
- (FRAME_SIZE >> 7)
- },
- .fde_reg_ofs = {
- /* DW_CFA_offset_extended_sf, lr, LR_OFFSET */
- 0x11, 65, (LR_OFFSET / -SZR) & 0x7f,
- }
- };
- void tcg_register_jit(void *buf, size_t buf_size)
- {
- uint8_t *p = &debug_frame.fde_reg_ofs[3];
- int i;
- for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i, p += 2) {
- p[0] = 0x80 + tcg_target_callee_save_regs[i];
- p[1] = (FRAME_SIZE - (REG_SAVE_BOT + i * SZR)) / SZR;
- }
- debug_frame.fde.func_start = (uintptr_t)buf;
- debug_frame.fde.func_len = buf_size;
- tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
- }
- #endif /* __ELF__ */
- void flush_icache_range(uintptr_t start, uintptr_t stop)
- {
- uintptr_t p, start1, stop1;
- size_t dsize = qemu_dcache_linesize;
- size_t isize = qemu_icache_linesize;
- start1 = start & ~(dsize - 1);
- stop1 = (stop + dsize - 1) & ~(dsize - 1);
- for (p = start1; p < stop1; p += dsize) {
- asm volatile ("dcbst 0,%0" : : "r"(p) : "memory");
- }
- asm volatile ("sync" : : : "memory");
- start &= start & ~(isize - 1);
- stop1 = (stop + isize - 1) & ~(isize - 1);
- for (p = start1; p < stop1; p += isize) {
- asm volatile ("icbi 0,%0" : : "r"(p) : "memory");
- }
- asm volatile ("sync" : : : "memory");
- asm volatile ("isync" : : : "memory");
- }
|