123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011 |
- /*
- * Generic vector operation expansion
- *
- * Copyright (c) 2018 Linaro
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
- #include "qemu/osdep.h"
- #include "tcg/tcg.h"
- #include "tcg/tcg-temp-internal.h"
- #include "tcg/tcg-op-common.h"
- #include "tcg/tcg-op-gvec-common.h"
- #include "tcg/tcg-gvec-desc.h"
- #include "tcg-has.h"
- #define MAX_UNROLL 4
- #ifdef CONFIG_DEBUG_TCG
- static const TCGOpcode vecop_list_empty[1] = { 0 };
- #else
- #define vecop_list_empty NULL
- #endif
- /* Verify vector size and alignment rules. OFS should be the OR of all
- of the operand offsets so that we can check them all at once. */
- static void check_size_align(uint32_t oprsz, uint32_t maxsz, uint32_t ofs)
- {
- uint32_t max_align;
- switch (oprsz) {
- case 8:
- case 16:
- case 32:
- tcg_debug_assert(oprsz <= maxsz);
- break;
- default:
- tcg_debug_assert(oprsz == maxsz);
- break;
- }
- tcg_debug_assert(maxsz <= (8 << SIMD_MAXSZ_BITS));
- max_align = maxsz >= 16 ? 15 : 7;
- tcg_debug_assert((maxsz & max_align) == 0);
- tcg_debug_assert((ofs & max_align) == 0);
- }
- /* Verify vector overlap rules for two operands. */
- static void check_overlap_2(uint32_t d, uint32_t a, uint32_t s)
- {
- tcg_debug_assert(d == a || d + s <= a || a + s <= d);
- }
- /* Verify vector overlap rules for three operands. */
- static void check_overlap_3(uint32_t d, uint32_t a, uint32_t b, uint32_t s)
- {
- check_overlap_2(d, a, s);
- check_overlap_2(d, b, s);
- check_overlap_2(a, b, s);
- }
- /* Verify vector overlap rules for four operands. */
- static void check_overlap_4(uint32_t d, uint32_t a, uint32_t b,
- uint32_t c, uint32_t s)
- {
- check_overlap_2(d, a, s);
- check_overlap_2(d, b, s);
- check_overlap_2(d, c, s);
- check_overlap_2(a, b, s);
- check_overlap_2(a, c, s);
- check_overlap_2(b, c, s);
- }
- /* Create a descriptor from components. */
- uint32_t simd_desc(uint32_t oprsz, uint32_t maxsz, int32_t data)
- {
- uint32_t desc = 0;
- check_size_align(oprsz, maxsz, 0);
- /*
- * We want to check that 'data' will fit into SIMD_DATA_BITS.
- * However, some callers want to treat the data as a signed
- * value (which they can later get back with simd_data())
- * and some want to treat it as an unsigned value.
- * So here we assert only that the data will fit into the
- * field in at least one way. This means that some invalid
- * values from the caller will not be detected, e.g. if the
- * caller wants to handle the value as a signed integer but
- * incorrectly passes us 1 << (SIMD_DATA_BITS - 1).
- */
- tcg_debug_assert(data == sextract32(data, 0, SIMD_DATA_BITS) ||
- data == extract32(data, 0, SIMD_DATA_BITS));
- oprsz = (oprsz / 8) - 1;
- maxsz = (maxsz / 8) - 1;
- /*
- * We have just asserted in check_size_align that either
- * oprsz is {8,16,32} or matches maxsz. Encode the final
- * case with '2', as that would otherwise map to 24.
- */
- if (oprsz == maxsz) {
- oprsz = 2;
- }
- desc = deposit32(desc, SIMD_OPRSZ_SHIFT, SIMD_OPRSZ_BITS, oprsz);
- desc = deposit32(desc, SIMD_MAXSZ_SHIFT, SIMD_MAXSZ_BITS, maxsz);
- desc = deposit32(desc, SIMD_DATA_SHIFT, SIMD_DATA_BITS, data);
- return desc;
- }
- /* Generate a call to a gvec-style helper with two vector operands. */
- void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs,
- uint32_t oprsz, uint32_t maxsz, int32_t data,
- gen_helper_gvec_2 *fn)
- {
- TCGv_ptr a0, a1;
- TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
- a0 = tcg_temp_ebb_new_ptr();
- a1 = tcg_temp_ebb_new_ptr();
- tcg_gen_addi_ptr(a0, tcg_env, dofs);
- tcg_gen_addi_ptr(a1, tcg_env, aofs);
- fn(a0, a1, desc);
- tcg_temp_free_ptr(a0);
- tcg_temp_free_ptr(a1);
- }
- /* Generate a call to a gvec-style helper with two vector operands
- and one scalar operand. */
- void tcg_gen_gvec_2i_ool(uint32_t dofs, uint32_t aofs, TCGv_i64 c,
- uint32_t oprsz, uint32_t maxsz, int32_t data,
- gen_helper_gvec_2i *fn)
- {
- TCGv_ptr a0, a1;
- TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
- a0 = tcg_temp_ebb_new_ptr();
- a1 = tcg_temp_ebb_new_ptr();
- tcg_gen_addi_ptr(a0, tcg_env, dofs);
- tcg_gen_addi_ptr(a1, tcg_env, aofs);
- fn(a0, a1, c, desc);
- tcg_temp_free_ptr(a0);
- tcg_temp_free_ptr(a1);
- }
- /* Generate a call to a gvec-style helper with three vector operands. */
- void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- uint32_t oprsz, uint32_t maxsz, int32_t data,
- gen_helper_gvec_3 *fn)
- {
- TCGv_ptr a0, a1, a2;
- TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
- a0 = tcg_temp_ebb_new_ptr();
- a1 = tcg_temp_ebb_new_ptr();
- a2 = tcg_temp_ebb_new_ptr();
- tcg_gen_addi_ptr(a0, tcg_env, dofs);
- tcg_gen_addi_ptr(a1, tcg_env, aofs);
- tcg_gen_addi_ptr(a2, tcg_env, bofs);
- fn(a0, a1, a2, desc);
- tcg_temp_free_ptr(a0);
- tcg_temp_free_ptr(a1);
- tcg_temp_free_ptr(a2);
- }
- /* Generate a call to a gvec-style helper with four vector operands. */
- void tcg_gen_gvec_4_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- uint32_t cofs, uint32_t oprsz, uint32_t maxsz,
- int32_t data, gen_helper_gvec_4 *fn)
- {
- TCGv_ptr a0, a1, a2, a3;
- TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
- a0 = tcg_temp_ebb_new_ptr();
- a1 = tcg_temp_ebb_new_ptr();
- a2 = tcg_temp_ebb_new_ptr();
- a3 = tcg_temp_ebb_new_ptr();
- tcg_gen_addi_ptr(a0, tcg_env, dofs);
- tcg_gen_addi_ptr(a1, tcg_env, aofs);
- tcg_gen_addi_ptr(a2, tcg_env, bofs);
- tcg_gen_addi_ptr(a3, tcg_env, cofs);
- fn(a0, a1, a2, a3, desc);
- tcg_temp_free_ptr(a0);
- tcg_temp_free_ptr(a1);
- tcg_temp_free_ptr(a2);
- tcg_temp_free_ptr(a3);
- }
- /* Generate a call to a gvec-style helper with five vector operands. */
- void tcg_gen_gvec_5_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- uint32_t cofs, uint32_t xofs, uint32_t oprsz,
- uint32_t maxsz, int32_t data, gen_helper_gvec_5 *fn)
- {
- TCGv_ptr a0, a1, a2, a3, a4;
- TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
- a0 = tcg_temp_ebb_new_ptr();
- a1 = tcg_temp_ebb_new_ptr();
- a2 = tcg_temp_ebb_new_ptr();
- a3 = tcg_temp_ebb_new_ptr();
- a4 = tcg_temp_ebb_new_ptr();
- tcg_gen_addi_ptr(a0, tcg_env, dofs);
- tcg_gen_addi_ptr(a1, tcg_env, aofs);
- tcg_gen_addi_ptr(a2, tcg_env, bofs);
- tcg_gen_addi_ptr(a3, tcg_env, cofs);
- tcg_gen_addi_ptr(a4, tcg_env, xofs);
- fn(a0, a1, a2, a3, a4, desc);
- tcg_temp_free_ptr(a0);
- tcg_temp_free_ptr(a1);
- tcg_temp_free_ptr(a2);
- tcg_temp_free_ptr(a3);
- tcg_temp_free_ptr(a4);
- }
- /* Generate a call to a gvec-style helper with three vector operands
- and an extra pointer operand. */
- void tcg_gen_gvec_2_ptr(uint32_t dofs, uint32_t aofs,
- TCGv_ptr ptr, uint32_t oprsz, uint32_t maxsz,
- int32_t data, gen_helper_gvec_2_ptr *fn)
- {
- TCGv_ptr a0, a1;
- TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
- a0 = tcg_temp_ebb_new_ptr();
- a1 = tcg_temp_ebb_new_ptr();
- tcg_gen_addi_ptr(a0, tcg_env, dofs);
- tcg_gen_addi_ptr(a1, tcg_env, aofs);
- fn(a0, a1, ptr, desc);
- tcg_temp_free_ptr(a0);
- tcg_temp_free_ptr(a1);
- }
- /* Generate a call to a gvec-style helper with three vector operands
- and an extra pointer operand. */
- void tcg_gen_gvec_3_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- TCGv_ptr ptr, uint32_t oprsz, uint32_t maxsz,
- int32_t data, gen_helper_gvec_3_ptr *fn)
- {
- TCGv_ptr a0, a1, a2;
- TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
- a0 = tcg_temp_ebb_new_ptr();
- a1 = tcg_temp_ebb_new_ptr();
- a2 = tcg_temp_ebb_new_ptr();
- tcg_gen_addi_ptr(a0, tcg_env, dofs);
- tcg_gen_addi_ptr(a1, tcg_env, aofs);
- tcg_gen_addi_ptr(a2, tcg_env, bofs);
- fn(a0, a1, a2, ptr, desc);
- tcg_temp_free_ptr(a0);
- tcg_temp_free_ptr(a1);
- tcg_temp_free_ptr(a2);
- }
- /* Generate a call to a gvec-style helper with four vector operands
- and an extra pointer operand. */
- void tcg_gen_gvec_4_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- uint32_t cofs, TCGv_ptr ptr, uint32_t oprsz,
- uint32_t maxsz, int32_t data,
- gen_helper_gvec_4_ptr *fn)
- {
- TCGv_ptr a0, a1, a2, a3;
- TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
- a0 = tcg_temp_ebb_new_ptr();
- a1 = tcg_temp_ebb_new_ptr();
- a2 = tcg_temp_ebb_new_ptr();
- a3 = tcg_temp_ebb_new_ptr();
- tcg_gen_addi_ptr(a0, tcg_env, dofs);
- tcg_gen_addi_ptr(a1, tcg_env, aofs);
- tcg_gen_addi_ptr(a2, tcg_env, bofs);
- tcg_gen_addi_ptr(a3, tcg_env, cofs);
- fn(a0, a1, a2, a3, ptr, desc);
- tcg_temp_free_ptr(a0);
- tcg_temp_free_ptr(a1);
- tcg_temp_free_ptr(a2);
- tcg_temp_free_ptr(a3);
- }
- /* Generate a call to a gvec-style helper with five vector operands
- and an extra pointer operand. */
- void tcg_gen_gvec_5_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- uint32_t cofs, uint32_t eofs, TCGv_ptr ptr,
- uint32_t oprsz, uint32_t maxsz, int32_t data,
- gen_helper_gvec_5_ptr *fn)
- {
- TCGv_ptr a0, a1, a2, a3, a4;
- TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
- a0 = tcg_temp_ebb_new_ptr();
- a1 = tcg_temp_ebb_new_ptr();
- a2 = tcg_temp_ebb_new_ptr();
- a3 = tcg_temp_ebb_new_ptr();
- a4 = tcg_temp_ebb_new_ptr();
- tcg_gen_addi_ptr(a0, tcg_env, dofs);
- tcg_gen_addi_ptr(a1, tcg_env, aofs);
- tcg_gen_addi_ptr(a2, tcg_env, bofs);
- tcg_gen_addi_ptr(a3, tcg_env, cofs);
- tcg_gen_addi_ptr(a4, tcg_env, eofs);
- fn(a0, a1, a2, a3, a4, ptr, desc);
- tcg_temp_free_ptr(a0);
- tcg_temp_free_ptr(a1);
- tcg_temp_free_ptr(a2);
- tcg_temp_free_ptr(a3);
- tcg_temp_free_ptr(a4);
- }
- /* Return true if we want to implement something of OPRSZ bytes
- in units of LNSZ. This limits the expansion of inline code. */
- static inline bool check_size_impl(uint32_t oprsz, uint32_t lnsz)
- {
- uint32_t q, r;
- if (oprsz < lnsz) {
- return false;
- }
- q = oprsz / lnsz;
- r = oprsz % lnsz;
- tcg_debug_assert((r & 7) == 0);
- if (lnsz < 16) {
- /* For sizes below 16, accept no remainder. */
- if (r != 0) {
- return false;
- }
- } else {
- /*
- * Recall that ARM SVE allows vector sizes that are not a
- * power of 2, but always a multiple of 16. The intent is
- * that e.g. size == 80 would be expanded with 2x32 + 1x16.
- * In addition, expand_clr needs to handle a multiple of 8.
- * Thus we can handle the tail with one more operation per
- * diminishing power of 2.
- */
- q += ctpop32(r);
- }
- return q <= MAX_UNROLL;
- }
- static void expand_clr(uint32_t dofs, uint32_t maxsz);
- /* Duplicate C as per VECE. */
- uint64_t (dup_const)(unsigned vece, uint64_t c)
- {
- switch (vece) {
- case MO_8:
- return 0x0101010101010101ull * (uint8_t)c;
- case MO_16:
- return 0x0001000100010001ull * (uint16_t)c;
- case MO_32:
- return 0x0000000100000001ull * (uint32_t)c;
- case MO_64:
- return c;
- default:
- g_assert_not_reached();
- }
- }
- /* Duplicate IN into OUT as per VECE. */
- void tcg_gen_dup_i32(unsigned vece, TCGv_i32 out, TCGv_i32 in)
- {
- switch (vece) {
- case MO_8:
- tcg_gen_ext8u_i32(out, in);
- tcg_gen_muli_i32(out, out, 0x01010101);
- break;
- case MO_16:
- tcg_gen_deposit_i32(out, in, in, 16, 16);
- break;
- case MO_32:
- tcg_gen_mov_i32(out, in);
- break;
- default:
- g_assert_not_reached();
- }
- }
- void tcg_gen_dup_i64(unsigned vece, TCGv_i64 out, TCGv_i64 in)
- {
- switch (vece) {
- case MO_8:
- tcg_gen_ext8u_i64(out, in);
- tcg_gen_muli_i64(out, out, 0x0101010101010101ull);
- break;
- case MO_16:
- tcg_gen_ext16u_i64(out, in);
- tcg_gen_muli_i64(out, out, 0x0001000100010001ull);
- break;
- case MO_32:
- tcg_gen_deposit_i64(out, in, in, 32, 32);
- break;
- case MO_64:
- tcg_gen_mov_i64(out, in);
- break;
- default:
- g_assert_not_reached();
- }
- }
- /* Select a supported vector type for implementing an operation on SIZE
- * bytes. If OP is 0, assume that the real operation to be performed is
- * required by all backends. Otherwise, make sure than OP can be performed
- * on elements of size VECE in the selected type. Do not select V64 if
- * PREFER_I64 is true. Return 0 if no vector type is selected.
- */
- static TCGType choose_vector_type(const TCGOpcode *list, unsigned vece,
- uint32_t size, bool prefer_i64)
- {
- /*
- * Recall that ARM SVE allows vector sizes that are not a
- * power of 2, but always a multiple of 16. The intent is
- * that e.g. size == 80 would be expanded with 2x32 + 1x16.
- * It is hard to imagine a case in which v256 is supported
- * but v128 is not, but check anyway.
- * In addition, expand_clr needs to handle a multiple of 8.
- */
- if (TCG_TARGET_HAS_v256 &&
- check_size_impl(size, 32) &&
- tcg_can_emit_vecop_list(list, TCG_TYPE_V256, vece) &&
- (!(size & 16) ||
- (TCG_TARGET_HAS_v128 &&
- tcg_can_emit_vecop_list(list, TCG_TYPE_V128, vece))) &&
- (!(size & 8) ||
- (TCG_TARGET_HAS_v64 &&
- tcg_can_emit_vecop_list(list, TCG_TYPE_V64, vece)))) {
- return TCG_TYPE_V256;
- }
- if (TCG_TARGET_HAS_v128 &&
- check_size_impl(size, 16) &&
- tcg_can_emit_vecop_list(list, TCG_TYPE_V128, vece) &&
- (!(size & 8) ||
- (TCG_TARGET_HAS_v64 &&
- tcg_can_emit_vecop_list(list, TCG_TYPE_V64, vece)))) {
- return TCG_TYPE_V128;
- }
- if (TCG_TARGET_HAS_v64 && !prefer_i64 && check_size_impl(size, 8)
- && tcg_can_emit_vecop_list(list, TCG_TYPE_V64, vece)) {
- return TCG_TYPE_V64;
- }
- return 0;
- }
- static void do_dup_store(TCGType type, uint32_t dofs, uint32_t oprsz,
- uint32_t maxsz, TCGv_vec t_vec)
- {
- uint32_t i = 0;
- tcg_debug_assert(oprsz >= 8);
- /*
- * This may be expand_clr for the tail of an operation, e.g.
- * oprsz == 8 && maxsz == 64. The first 8 bytes of this store
- * are misaligned wrt the maximum vector size, so do that first.
- */
- if (dofs & 8) {
- tcg_gen_stl_vec(t_vec, tcg_env, dofs + i, TCG_TYPE_V64);
- i += 8;
- }
- switch (type) {
- case TCG_TYPE_V256:
- /*
- * Recall that ARM SVE allows vector sizes that are not a
- * power of 2, but always a multiple of 16. The intent is
- * that e.g. size == 80 would be expanded with 2x32 + 1x16.
- */
- for (; i + 32 <= oprsz; i += 32) {
- tcg_gen_stl_vec(t_vec, tcg_env, dofs + i, TCG_TYPE_V256);
- }
- /* fallthru */
- case TCG_TYPE_V128:
- for (; i + 16 <= oprsz; i += 16) {
- tcg_gen_stl_vec(t_vec, tcg_env, dofs + i, TCG_TYPE_V128);
- }
- break;
- case TCG_TYPE_V64:
- for (; i < oprsz; i += 8) {
- tcg_gen_stl_vec(t_vec, tcg_env, dofs + i, TCG_TYPE_V64);
- }
- break;
- default:
- g_assert_not_reached();
- }
- if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
- }
- }
- /* Set OPRSZ bytes at DOFS to replications of IN_32, IN_64 or IN_C.
- * Only one of IN_32 or IN_64 may be set;
- * IN_C is used if IN_32 and IN_64 are unset.
- */
- static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
- uint32_t maxsz, TCGv_i32 in_32, TCGv_i64 in_64,
- uint64_t in_c)
- {
- TCGType type;
- TCGv_i64 t_64;
- TCGv_i32 t_32, t_desc;
- TCGv_ptr t_ptr;
- uint32_t i;
- assert(vece <= (in_32 ? MO_32 : MO_64));
- assert(in_32 == NULL || in_64 == NULL);
- /* If we're storing 0, expand oprsz to maxsz. */
- if (in_32 == NULL && in_64 == NULL) {
- in_c = dup_const(vece, in_c);
- if (in_c == 0) {
- oprsz = maxsz;
- vece = MO_8;
- } else if (in_c == dup_const(MO_8, in_c)) {
- vece = MO_8;
- }
- }
- /* Implement inline with a vector type, if possible.
- * Prefer integer when 64-bit host and no variable dup.
- */
- type = choose_vector_type(NULL, vece, oprsz,
- (TCG_TARGET_REG_BITS == 64 && in_32 == NULL
- && (in_64 == NULL || vece == MO_64)));
- if (type != 0) {
- TCGv_vec t_vec = tcg_temp_new_vec(type);
- if (in_32) {
- tcg_gen_dup_i32_vec(vece, t_vec, in_32);
- } else if (in_64) {
- tcg_gen_dup_i64_vec(vece, t_vec, in_64);
- } else {
- tcg_gen_dupi_vec(vece, t_vec, in_c);
- }
- do_dup_store(type, dofs, oprsz, maxsz, t_vec);
- return;
- }
- /* Otherwise, inline with an integer type, unless "large". */
- if (check_size_impl(oprsz, TCG_TARGET_REG_BITS / 8)) {
- t_64 = NULL;
- t_32 = NULL;
- if (in_32) {
- /* We are given a 32-bit variable input. For a 64-bit host,
- use a 64-bit operation unless the 32-bit operation would
- be simple enough. */
- if (TCG_TARGET_REG_BITS == 64
- && (vece != MO_32 || !check_size_impl(oprsz, 4))) {
- t_64 = tcg_temp_ebb_new_i64();
- tcg_gen_extu_i32_i64(t_64, in_32);
- tcg_gen_dup_i64(vece, t_64, t_64);
- } else {
- t_32 = tcg_temp_ebb_new_i32();
- tcg_gen_dup_i32(vece, t_32, in_32);
- }
- } else if (in_64) {
- /* We are given a 64-bit variable input. */
- t_64 = tcg_temp_ebb_new_i64();
- tcg_gen_dup_i64(vece, t_64, in_64);
- } else {
- /* We are given a constant input. */
- /* For 64-bit hosts, use 64-bit constants for "simple" constants
- or when we'd need too many 32-bit stores, or when a 64-bit
- constant is really required. */
- if (vece == MO_64
- || (TCG_TARGET_REG_BITS == 64
- && (in_c == 0 || in_c == -1
- || !check_size_impl(oprsz, 4)))) {
- t_64 = tcg_constant_i64(in_c);
- } else {
- t_32 = tcg_constant_i32(in_c);
- }
- }
- /* Implement inline if we picked an implementation size above. */
- if (t_32) {
- for (i = 0; i < oprsz; i += 4) {
- tcg_gen_st_i32(t_32, tcg_env, dofs + i);
- }
- tcg_temp_free_i32(t_32);
- goto done;
- }
- if (t_64) {
- for (i = 0; i < oprsz; i += 8) {
- tcg_gen_st_i64(t_64, tcg_env, dofs + i);
- }
- tcg_temp_free_i64(t_64);
- goto done;
- }
- }
- /* Otherwise implement out of line. */
- t_ptr = tcg_temp_ebb_new_ptr();
- tcg_gen_addi_ptr(t_ptr, tcg_env, dofs);
- /*
- * This may be expand_clr for the tail of an operation, e.g.
- * oprsz == 8 && maxsz == 64. The size of the clear is misaligned
- * wrt simd_desc and will assert. Simply pass all replicated byte
- * stores through to memset.
- */
- if (oprsz == maxsz && vece == MO_8) {
- TCGv_ptr t_size = tcg_constant_ptr(oprsz);
- TCGv_i32 t_val;
- if (in_32) {
- t_val = in_32;
- } else if (in_64) {
- t_val = tcg_temp_ebb_new_i32();
- tcg_gen_extrl_i64_i32(t_val, in_64);
- } else {
- t_val = tcg_constant_i32(in_c);
- }
- gen_helper_memset(t_ptr, t_ptr, t_val, t_size);
- if (in_64) {
- tcg_temp_free_i32(t_val);
- }
- tcg_temp_free_ptr(t_ptr);
- return;
- }
- t_desc = tcg_constant_i32(simd_desc(oprsz, maxsz, 0));
- if (vece == MO_64) {
- if (in_64) {
- gen_helper_gvec_dup64(t_ptr, t_desc, in_64);
- } else {
- t_64 = tcg_constant_i64(in_c);
- gen_helper_gvec_dup64(t_ptr, t_desc, t_64);
- }
- } else {
- typedef void dup_fn(TCGv_ptr, TCGv_i32, TCGv_i32);
- static dup_fn * const fns[3] = {
- gen_helper_gvec_dup8,
- gen_helper_gvec_dup16,
- gen_helper_gvec_dup32
- };
- if (in_32) {
- fns[vece](t_ptr, t_desc, in_32);
- } else if (in_64) {
- t_32 = tcg_temp_ebb_new_i32();
- tcg_gen_extrl_i64_i32(t_32, in_64);
- fns[vece](t_ptr, t_desc, t_32);
- tcg_temp_free_i32(t_32);
- } else {
- if (vece == MO_8) {
- in_c &= 0xff;
- } else if (vece == MO_16) {
- in_c &= 0xffff;
- }
- t_32 = tcg_constant_i32(in_c);
- fns[vece](t_ptr, t_desc, t_32);
- }
- }
- tcg_temp_free_ptr(t_ptr);
- return;
- done:
- if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
- }
- }
- /* Likewise, but with zero. */
- static void expand_clr(uint32_t dofs, uint32_t maxsz)
- {
- do_dup(MO_8, dofs, maxsz, maxsz, NULL, NULL, 0);
- }
- /* Expand OPSZ bytes worth of two-operand operations using i32 elements. */
- static void expand_2_i32(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
- bool load_dest, void (*fni)(TCGv_i32, TCGv_i32))
- {
- TCGv_i32 t0 = tcg_temp_new_i32();
- TCGv_i32 t1 = tcg_temp_new_i32();
- uint32_t i;
- for (i = 0; i < oprsz; i += 4) {
- tcg_gen_ld_i32(t0, tcg_env, aofs + i);
- if (load_dest) {
- tcg_gen_ld_i32(t1, tcg_env, dofs + i);
- }
- fni(t1, t0);
- tcg_gen_st_i32(t1, tcg_env, dofs + i);
- }
- tcg_temp_free_i32(t0);
- tcg_temp_free_i32(t1);
- }
- static void expand_2i_i32(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
- int32_t c, bool load_dest,
- void (*fni)(TCGv_i32, TCGv_i32, int32_t))
- {
- TCGv_i32 t0 = tcg_temp_new_i32();
- TCGv_i32 t1 = tcg_temp_new_i32();
- uint32_t i;
- for (i = 0; i < oprsz; i += 4) {
- tcg_gen_ld_i32(t0, tcg_env, aofs + i);
- if (load_dest) {
- tcg_gen_ld_i32(t1, tcg_env, dofs + i);
- }
- fni(t1, t0, c);
- tcg_gen_st_i32(t1, tcg_env, dofs + i);
- }
- tcg_temp_free_i32(t0);
- tcg_temp_free_i32(t1);
- }
- static void expand_2s_i32(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
- TCGv_i32 c, bool scalar_first,
- void (*fni)(TCGv_i32, TCGv_i32, TCGv_i32))
- {
- TCGv_i32 t0 = tcg_temp_new_i32();
- TCGv_i32 t1 = tcg_temp_new_i32();
- uint32_t i;
- for (i = 0; i < oprsz; i += 4) {
- tcg_gen_ld_i32(t0, tcg_env, aofs + i);
- if (scalar_first) {
- fni(t1, c, t0);
- } else {
- fni(t1, t0, c);
- }
- tcg_gen_st_i32(t1, tcg_env, dofs + i);
- }
- tcg_temp_free_i32(t0);
- tcg_temp_free_i32(t1);
- }
- /* Expand OPSZ bytes worth of three-operand operations using i32 elements. */
- static void expand_3_i32(uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, bool load_dest,
- void (*fni)(TCGv_i32, TCGv_i32, TCGv_i32))
- {
- TCGv_i32 t0 = tcg_temp_new_i32();
- TCGv_i32 t1 = tcg_temp_new_i32();
- TCGv_i32 t2 = tcg_temp_new_i32();
- uint32_t i;
- for (i = 0; i < oprsz; i += 4) {
- tcg_gen_ld_i32(t0, tcg_env, aofs + i);
- tcg_gen_ld_i32(t1, tcg_env, bofs + i);
- if (load_dest) {
- tcg_gen_ld_i32(t2, tcg_env, dofs + i);
- }
- fni(t2, t0, t1);
- tcg_gen_st_i32(t2, tcg_env, dofs + i);
- }
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t0);
- }
- static void expand_3i_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- uint32_t oprsz, int32_t c,
- bool load_dest, bool write_aofs,
- void (*fni)(TCGv_i32, TCGv_i32, TCGv_i32, int32_t))
- {
- TCGv_i32 t0 = tcg_temp_new_i32();
- TCGv_i32 t1 = tcg_temp_new_i32();
- TCGv_i32 t2 = tcg_temp_new_i32();
- uint32_t i;
- for (i = 0; i < oprsz; i += 4) {
- tcg_gen_ld_i32(t0, tcg_env, aofs + i);
- tcg_gen_ld_i32(t1, tcg_env, bofs + i);
- if (load_dest) {
- tcg_gen_ld_i32(t2, tcg_env, dofs + i);
- }
- fni(t2, t0, t1, c);
- tcg_gen_st_i32(t2, tcg_env, dofs + i);
- if (write_aofs) {
- tcg_gen_st_i32(t0, tcg_env, aofs + i);
- }
- }
- tcg_temp_free_i32(t0);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t2);
- }
- /* Expand OPSZ bytes worth of three-operand operations using i32 elements. */
- static void expand_4_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- uint32_t cofs, uint32_t oprsz, bool write_aofs,
- void (*fni)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32))
- {
- TCGv_i32 t0 = tcg_temp_new_i32();
- TCGv_i32 t1 = tcg_temp_new_i32();
- TCGv_i32 t2 = tcg_temp_new_i32();
- TCGv_i32 t3 = tcg_temp_new_i32();
- uint32_t i;
- for (i = 0; i < oprsz; i += 4) {
- tcg_gen_ld_i32(t1, tcg_env, aofs + i);
- tcg_gen_ld_i32(t2, tcg_env, bofs + i);
- tcg_gen_ld_i32(t3, tcg_env, cofs + i);
- fni(t0, t1, t2, t3);
- tcg_gen_st_i32(t0, tcg_env, dofs + i);
- if (write_aofs) {
- tcg_gen_st_i32(t1, tcg_env, aofs + i);
- }
- }
- tcg_temp_free_i32(t3);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t0);
- }
- static void expand_4i_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- uint32_t cofs, uint32_t oprsz, int32_t c,
- void (*fni)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32,
- int32_t))
- {
- TCGv_i32 t0 = tcg_temp_new_i32();
- TCGv_i32 t1 = tcg_temp_new_i32();
- TCGv_i32 t2 = tcg_temp_new_i32();
- TCGv_i32 t3 = tcg_temp_new_i32();
- uint32_t i;
- for (i = 0; i < oprsz; i += 4) {
- tcg_gen_ld_i32(t1, tcg_env, aofs + i);
- tcg_gen_ld_i32(t2, tcg_env, bofs + i);
- tcg_gen_ld_i32(t3, tcg_env, cofs + i);
- fni(t0, t1, t2, t3, c);
- tcg_gen_st_i32(t0, tcg_env, dofs + i);
- }
- tcg_temp_free_i32(t3);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t0);
- }
- /* Expand OPSZ bytes worth of two-operand operations using i64 elements. */
- static void expand_2_i64(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
- bool load_dest, void (*fni)(TCGv_i64, TCGv_i64))
- {
- TCGv_i64 t0 = tcg_temp_new_i64();
- TCGv_i64 t1 = tcg_temp_new_i64();
- uint32_t i;
- for (i = 0; i < oprsz; i += 8) {
- tcg_gen_ld_i64(t0, tcg_env, aofs + i);
- if (load_dest) {
- tcg_gen_ld_i64(t1, tcg_env, dofs + i);
- }
- fni(t1, t0);
- tcg_gen_st_i64(t1, tcg_env, dofs + i);
- }
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- }
- static void expand_2i_i64(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
- int64_t c, bool load_dest,
- void (*fni)(TCGv_i64, TCGv_i64, int64_t))
- {
- TCGv_i64 t0 = tcg_temp_new_i64();
- TCGv_i64 t1 = tcg_temp_new_i64();
- uint32_t i;
- for (i = 0; i < oprsz; i += 8) {
- tcg_gen_ld_i64(t0, tcg_env, aofs + i);
- if (load_dest) {
- tcg_gen_ld_i64(t1, tcg_env, dofs + i);
- }
- fni(t1, t0, c);
- tcg_gen_st_i64(t1, tcg_env, dofs + i);
- }
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- }
- static void expand_2s_i64(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
- TCGv_i64 c, bool scalar_first,
- void (*fni)(TCGv_i64, TCGv_i64, TCGv_i64))
- {
- TCGv_i64 t0 = tcg_temp_new_i64();
- TCGv_i64 t1 = tcg_temp_new_i64();
- uint32_t i;
- for (i = 0; i < oprsz; i += 8) {
- tcg_gen_ld_i64(t0, tcg_env, aofs + i);
- if (scalar_first) {
- fni(t1, c, t0);
- } else {
- fni(t1, t0, c);
- }
- tcg_gen_st_i64(t1, tcg_env, dofs + i);
- }
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- }
- /* Expand OPSZ bytes worth of three-operand operations using i64 elements. */
- static void expand_3_i64(uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, bool load_dest,
- void (*fni)(TCGv_i64, TCGv_i64, TCGv_i64))
- {
- TCGv_i64 t0 = tcg_temp_new_i64();
- TCGv_i64 t1 = tcg_temp_new_i64();
- TCGv_i64 t2 = tcg_temp_new_i64();
- uint32_t i;
- for (i = 0; i < oprsz; i += 8) {
- tcg_gen_ld_i64(t0, tcg_env, aofs + i);
- tcg_gen_ld_i64(t1, tcg_env, bofs + i);
- if (load_dest) {
- tcg_gen_ld_i64(t2, tcg_env, dofs + i);
- }
- fni(t2, t0, t1);
- tcg_gen_st_i64(t2, tcg_env, dofs + i);
- }
- tcg_temp_free_i64(t2);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t0);
- }
- static void expand_3i_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- uint32_t oprsz, int64_t c,
- bool load_dest, bool write_aofs,
- void (*fni)(TCGv_i64, TCGv_i64, TCGv_i64, int64_t))
- {
- TCGv_i64 t0 = tcg_temp_new_i64();
- TCGv_i64 t1 = tcg_temp_new_i64();
- TCGv_i64 t2 = tcg_temp_new_i64();
- uint32_t i;
- for (i = 0; i < oprsz; i += 8) {
- tcg_gen_ld_i64(t0, tcg_env, aofs + i);
- tcg_gen_ld_i64(t1, tcg_env, bofs + i);
- if (load_dest) {
- tcg_gen_ld_i64(t2, tcg_env, dofs + i);
- }
- fni(t2, t0, t1, c);
- tcg_gen_st_i64(t2, tcg_env, dofs + i);
- if (write_aofs) {
- tcg_gen_st_i64(t0, tcg_env, aofs + i);
- }
- }
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t2);
- }
- /* Expand OPSZ bytes worth of three-operand operations using i64 elements. */
- static void expand_4_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- uint32_t cofs, uint32_t oprsz, bool write_aofs,
- void (*fni)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
- {
- TCGv_i64 t0 = tcg_temp_new_i64();
- TCGv_i64 t1 = tcg_temp_new_i64();
- TCGv_i64 t2 = tcg_temp_new_i64();
- TCGv_i64 t3 = tcg_temp_new_i64();
- uint32_t i;
- for (i = 0; i < oprsz; i += 8) {
- tcg_gen_ld_i64(t1, tcg_env, aofs + i);
- tcg_gen_ld_i64(t2, tcg_env, bofs + i);
- tcg_gen_ld_i64(t3, tcg_env, cofs + i);
- fni(t0, t1, t2, t3);
- tcg_gen_st_i64(t0, tcg_env, dofs + i);
- if (write_aofs) {
- tcg_gen_st_i64(t1, tcg_env, aofs + i);
- }
- }
- tcg_temp_free_i64(t3);
- tcg_temp_free_i64(t2);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t0);
- }
- static void expand_4i_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- uint32_t cofs, uint32_t oprsz, int64_t c,
- void (*fni)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64,
- int64_t))
- {
- TCGv_i64 t0 = tcg_temp_new_i64();
- TCGv_i64 t1 = tcg_temp_new_i64();
- TCGv_i64 t2 = tcg_temp_new_i64();
- TCGv_i64 t3 = tcg_temp_new_i64();
- uint32_t i;
- for (i = 0; i < oprsz; i += 8) {
- tcg_gen_ld_i64(t1, tcg_env, aofs + i);
- tcg_gen_ld_i64(t2, tcg_env, bofs + i);
- tcg_gen_ld_i64(t3, tcg_env, cofs + i);
- fni(t0, t1, t2, t3, c);
- tcg_gen_st_i64(t0, tcg_env, dofs + i);
- }
- tcg_temp_free_i64(t3);
- tcg_temp_free_i64(t2);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t0);
- }
- /* Expand OPSZ bytes worth of two-operand operations using host vectors. */
- static void expand_2_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t oprsz, uint32_t tysz, TCGType type,
- bool load_dest,
- void (*fni)(unsigned, TCGv_vec, TCGv_vec))
- {
- for (uint32_t i = 0; i < oprsz; i += tysz) {
- TCGv_vec t0 = tcg_temp_new_vec(type);
- TCGv_vec t1 = tcg_temp_new_vec(type);
- tcg_gen_ld_vec(t0, tcg_env, aofs + i);
- if (load_dest) {
- tcg_gen_ld_vec(t1, tcg_env, dofs + i);
- }
- fni(vece, t1, t0);
- tcg_gen_st_vec(t1, tcg_env, dofs + i);
- }
- }
- /* Expand OPSZ bytes worth of two-vector operands and an immediate operand
- using host vectors. */
- static void expand_2i_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t oprsz, uint32_t tysz, TCGType type,
- int64_t c, bool load_dest,
- void (*fni)(unsigned, TCGv_vec, TCGv_vec, int64_t))
- {
- for (uint32_t i = 0; i < oprsz; i += tysz) {
- TCGv_vec t0 = tcg_temp_new_vec(type);
- TCGv_vec t1 = tcg_temp_new_vec(type);
- tcg_gen_ld_vec(t0, tcg_env, aofs + i);
- if (load_dest) {
- tcg_gen_ld_vec(t1, tcg_env, dofs + i);
- }
- fni(vece, t1, t0, c);
- tcg_gen_st_vec(t1, tcg_env, dofs + i);
- }
- }
- static void expand_2s_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t oprsz, uint32_t tysz, TCGType type,
- TCGv_vec c, bool scalar_first,
- void (*fni)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec))
- {
- for (uint32_t i = 0; i < oprsz; i += tysz) {
- TCGv_vec t0 = tcg_temp_new_vec(type);
- TCGv_vec t1 = tcg_temp_new_vec(type);
- tcg_gen_ld_vec(t0, tcg_env, aofs + i);
- if (scalar_first) {
- fni(vece, t1, c, t0);
- } else {
- fni(vece, t1, t0, c);
- }
- tcg_gen_st_vec(t1, tcg_env, dofs + i);
- }
- }
- /* Expand OPSZ bytes worth of three-operand operations using host vectors. */
- static void expand_3_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz,
- uint32_t tysz, TCGType type, bool load_dest,
- void (*fni)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec))
- {
- for (uint32_t i = 0; i < oprsz; i += tysz) {
- TCGv_vec t0 = tcg_temp_new_vec(type);
- TCGv_vec t1 = tcg_temp_new_vec(type);
- TCGv_vec t2 = tcg_temp_new_vec(type);
- tcg_gen_ld_vec(t0, tcg_env, aofs + i);
- tcg_gen_ld_vec(t1, tcg_env, bofs + i);
- if (load_dest) {
- tcg_gen_ld_vec(t2, tcg_env, dofs + i);
- }
- fni(vece, t2, t0, t1);
- tcg_gen_st_vec(t2, tcg_env, dofs + i);
- }
- }
- /*
- * Expand OPSZ bytes worth of three-vector operands and an immediate operand
- * using host vectors.
- */
- static void expand_3i_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t tysz,
- TCGType type, int64_t c,
- bool load_dest, bool write_aofs,
- void (*fni)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec,
- int64_t))
- {
- for (uint32_t i = 0; i < oprsz; i += tysz) {
- TCGv_vec t0 = tcg_temp_new_vec(type);
- TCGv_vec t1 = tcg_temp_new_vec(type);
- TCGv_vec t2 = tcg_temp_new_vec(type);
- tcg_gen_ld_vec(t0, tcg_env, aofs + i);
- tcg_gen_ld_vec(t1, tcg_env, bofs + i);
- if (load_dest) {
- tcg_gen_ld_vec(t2, tcg_env, dofs + i);
- }
- fni(vece, t2, t0, t1, c);
- tcg_gen_st_vec(t2, tcg_env, dofs + i);
- if (write_aofs) {
- tcg_gen_st_vec(t0, tcg_env, aofs + i);
- }
- }
- }
- /* Expand OPSZ bytes worth of four-operand operations using host vectors. */
- static void expand_4_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t cofs, uint32_t oprsz,
- uint32_t tysz, TCGType type, bool write_aofs,
- void (*fni)(unsigned, TCGv_vec, TCGv_vec,
- TCGv_vec, TCGv_vec))
- {
- for (uint32_t i = 0; i < oprsz; i += tysz) {
- TCGv_vec t0 = tcg_temp_new_vec(type);
- TCGv_vec t1 = tcg_temp_new_vec(type);
- TCGv_vec t2 = tcg_temp_new_vec(type);
- TCGv_vec t3 = tcg_temp_new_vec(type);
- tcg_gen_ld_vec(t1, tcg_env, aofs + i);
- tcg_gen_ld_vec(t2, tcg_env, bofs + i);
- tcg_gen_ld_vec(t3, tcg_env, cofs + i);
- fni(vece, t0, t1, t2, t3);
- tcg_gen_st_vec(t0, tcg_env, dofs + i);
- if (write_aofs) {
- tcg_gen_st_vec(t1, tcg_env, aofs + i);
- }
- }
- }
- /*
- * Expand OPSZ bytes worth of four-vector operands and an immediate operand
- * using host vectors.
- */
- static void expand_4i_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t cofs, uint32_t oprsz,
- uint32_t tysz, TCGType type, int64_t c,
- void (*fni)(unsigned, TCGv_vec, TCGv_vec,
- TCGv_vec, TCGv_vec, int64_t))
- {
- for (uint32_t i = 0; i < oprsz; i += tysz) {
- TCGv_vec t0 = tcg_temp_new_vec(type);
- TCGv_vec t1 = tcg_temp_new_vec(type);
- TCGv_vec t2 = tcg_temp_new_vec(type);
- TCGv_vec t3 = tcg_temp_new_vec(type);
- tcg_gen_ld_vec(t1, tcg_env, aofs + i);
- tcg_gen_ld_vec(t2, tcg_env, bofs + i);
- tcg_gen_ld_vec(t3, tcg_env, cofs + i);
- fni(vece, t0, t1, t2, t3, c);
- tcg_gen_st_vec(t0, tcg_env, dofs + i);
- }
- }
- /* Expand a vector two-operand operation. */
- void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
- uint32_t oprsz, uint32_t maxsz, const GVecGen2 *g)
- {
- const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty;
- const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list);
- TCGType type;
- uint32_t some;
- check_size_align(oprsz, maxsz, dofs | aofs);
- check_overlap_2(dofs, aofs, maxsz);
- type = 0;
- if (g->fniv) {
- type = choose_vector_type(g->opt_opc, g->vece, oprsz, g->prefer_i64);
- }
- switch (type) {
- case TCG_TYPE_V256:
- /* Recall that ARM SVE allows vector sizes that are not a
- * power of 2, but always a multiple of 16. The intent is
- * that e.g. size == 80 would be expanded with 2x32 + 1x16.
- */
- some = QEMU_ALIGN_DOWN(oprsz, 32);
- expand_2_vec(g->vece, dofs, aofs, some, 32, TCG_TYPE_V256,
- g->load_dest, g->fniv);
- if (some == oprsz) {
- break;
- }
- dofs += some;
- aofs += some;
- oprsz -= some;
- maxsz -= some;
- /* fallthru */
- case TCG_TYPE_V128:
- expand_2_vec(g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128,
- g->load_dest, g->fniv);
- break;
- case TCG_TYPE_V64:
- expand_2_vec(g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64,
- g->load_dest, g->fniv);
- break;
- case 0:
- if (g->fni8 && check_size_impl(oprsz, 8)) {
- expand_2_i64(dofs, aofs, oprsz, g->load_dest, g->fni8);
- } else if (g->fni4 && check_size_impl(oprsz, 4)) {
- expand_2_i32(dofs, aofs, oprsz, g->load_dest, g->fni4);
- } else {
- assert(g->fno != NULL);
- tcg_gen_gvec_2_ool(dofs, aofs, oprsz, maxsz, g->data, g->fno);
- oprsz = maxsz;
- }
- break;
- default:
- g_assert_not_reached();
- }
- tcg_swap_vecop_list(hold_list);
- if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
- }
- }
- /* Expand a vector operation with two vectors and an immediate. */
- void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
- uint32_t maxsz, int64_t c, const GVecGen2i *g)
- {
- const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty;
- const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list);
- TCGType type;
- uint32_t some;
- check_size_align(oprsz, maxsz, dofs | aofs);
- check_overlap_2(dofs, aofs, maxsz);
- type = 0;
- if (g->fniv) {
- type = choose_vector_type(g->opt_opc, g->vece, oprsz, g->prefer_i64);
- }
- switch (type) {
- case TCG_TYPE_V256:
- /* Recall that ARM SVE allows vector sizes that are not a
- * power of 2, but always a multiple of 16. The intent is
- * that e.g. size == 80 would be expanded with 2x32 + 1x16.
- */
- some = QEMU_ALIGN_DOWN(oprsz, 32);
- expand_2i_vec(g->vece, dofs, aofs, some, 32, TCG_TYPE_V256,
- c, g->load_dest, g->fniv);
- if (some == oprsz) {
- break;
- }
- dofs += some;
- aofs += some;
- oprsz -= some;
- maxsz -= some;
- /* fallthru */
- case TCG_TYPE_V128:
- expand_2i_vec(g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128,
- c, g->load_dest, g->fniv);
- break;
- case TCG_TYPE_V64:
- expand_2i_vec(g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64,
- c, g->load_dest, g->fniv);
- break;
- case 0:
- if (g->fni8 && check_size_impl(oprsz, 8)) {
- expand_2i_i64(dofs, aofs, oprsz, c, g->load_dest, g->fni8);
- } else if (g->fni4 && check_size_impl(oprsz, 4)) {
- expand_2i_i32(dofs, aofs, oprsz, c, g->load_dest, g->fni4);
- } else {
- if (g->fno) {
- tcg_gen_gvec_2_ool(dofs, aofs, oprsz, maxsz, c, g->fno);
- } else {
- TCGv_i64 tcg_c = tcg_constant_i64(c);
- tcg_gen_gvec_2i_ool(dofs, aofs, tcg_c, oprsz,
- maxsz, c, g->fnoi);
- }
- oprsz = maxsz;
- }
- break;
- default:
- g_assert_not_reached();
- }
- tcg_swap_vecop_list(hold_list);
- if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
- }
- }
- /* Expand a vector operation with two vectors and a scalar. */
- void tcg_gen_gvec_2s(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
- uint32_t maxsz, TCGv_i64 c, const GVecGen2s *g)
- {
- TCGType type;
- check_size_align(oprsz, maxsz, dofs | aofs);
- check_overlap_2(dofs, aofs, maxsz);
- type = 0;
- if (g->fniv) {
- type = choose_vector_type(g->opt_opc, g->vece, oprsz, g->prefer_i64);
- }
- if (type != 0) {
- const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty;
- const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list);
- TCGv_vec t_vec = tcg_temp_new_vec(type);
- uint32_t some;
- tcg_gen_dup_i64_vec(g->vece, t_vec, c);
- switch (type) {
- case TCG_TYPE_V256:
- /* Recall that ARM SVE allows vector sizes that are not a
- * power of 2, but always a multiple of 16. The intent is
- * that e.g. size == 80 would be expanded with 2x32 + 1x16.
- */
- some = QEMU_ALIGN_DOWN(oprsz, 32);
- expand_2s_vec(g->vece, dofs, aofs, some, 32, TCG_TYPE_V256,
- t_vec, g->scalar_first, g->fniv);
- if (some == oprsz) {
- break;
- }
- dofs += some;
- aofs += some;
- oprsz -= some;
- maxsz -= some;
- /* fallthru */
- case TCG_TYPE_V128:
- expand_2s_vec(g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128,
- t_vec, g->scalar_first, g->fniv);
- break;
- case TCG_TYPE_V64:
- expand_2s_vec(g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64,
- t_vec, g->scalar_first, g->fniv);
- break;
- default:
- g_assert_not_reached();
- }
- tcg_temp_free_vec(t_vec);
- tcg_swap_vecop_list(hold_list);
- } else if (g->fni8 && check_size_impl(oprsz, 8)) {
- TCGv_i64 t64 = tcg_temp_new_i64();
- tcg_gen_dup_i64(g->vece, t64, c);
- expand_2s_i64(dofs, aofs, oprsz, t64, g->scalar_first, g->fni8);
- tcg_temp_free_i64(t64);
- } else if (g->fni4 && check_size_impl(oprsz, 4)) {
- TCGv_i32 t32 = tcg_temp_new_i32();
- tcg_gen_extrl_i64_i32(t32, c);
- tcg_gen_dup_i32(g->vece, t32, t32);
- expand_2s_i32(dofs, aofs, oprsz, t32, g->scalar_first, g->fni4);
- tcg_temp_free_i32(t32);
- } else {
- tcg_gen_gvec_2i_ool(dofs, aofs, c, oprsz, maxsz, 0, g->fno);
- return;
- }
- if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
- }
- }
- /* Expand a vector three-operand operation. */
- void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- uint32_t oprsz, uint32_t maxsz, const GVecGen3 *g)
- {
- const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty;
- const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list);
- TCGType type;
- uint32_t some;
- check_size_align(oprsz, maxsz, dofs | aofs | bofs);
- check_overlap_3(dofs, aofs, bofs, maxsz);
- type = 0;
- if (g->fniv) {
- type = choose_vector_type(g->opt_opc, g->vece, oprsz, g->prefer_i64);
- }
- switch (type) {
- case TCG_TYPE_V256:
- /* Recall that ARM SVE allows vector sizes that are not a
- * power of 2, but always a multiple of 16. The intent is
- * that e.g. size == 80 would be expanded with 2x32 + 1x16.
- */
- some = QEMU_ALIGN_DOWN(oprsz, 32);
- expand_3_vec(g->vece, dofs, aofs, bofs, some, 32, TCG_TYPE_V256,
- g->load_dest, g->fniv);
- if (some == oprsz) {
- break;
- }
- dofs += some;
- aofs += some;
- bofs += some;
- oprsz -= some;
- maxsz -= some;
- /* fallthru */
- case TCG_TYPE_V128:
- expand_3_vec(g->vece, dofs, aofs, bofs, oprsz, 16, TCG_TYPE_V128,
- g->load_dest, g->fniv);
- break;
- case TCG_TYPE_V64:
- expand_3_vec(g->vece, dofs, aofs, bofs, oprsz, 8, TCG_TYPE_V64,
- g->load_dest, g->fniv);
- break;
- case 0:
- if (g->fni8 && check_size_impl(oprsz, 8)) {
- expand_3_i64(dofs, aofs, bofs, oprsz, g->load_dest, g->fni8);
- } else if (g->fni4 && check_size_impl(oprsz, 4)) {
- expand_3_i32(dofs, aofs, bofs, oprsz, g->load_dest, g->fni4);
- } else {
- assert(g->fno != NULL);
- tcg_gen_gvec_3_ool(dofs, aofs, bofs, oprsz,
- maxsz, g->data, g->fno);
- oprsz = maxsz;
- }
- break;
- default:
- g_assert_not_reached();
- }
- tcg_swap_vecop_list(hold_list);
- if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
- }
- }
- /* Expand a vector operation with three vectors and an immediate. */
- void tcg_gen_gvec_3i(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- uint32_t oprsz, uint32_t maxsz, int64_t c,
- const GVecGen3i *g)
- {
- const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty;
- const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list);
- TCGType type;
- uint32_t some;
- check_size_align(oprsz, maxsz, dofs | aofs | bofs);
- check_overlap_3(dofs, aofs, bofs, maxsz);
- type = 0;
- if (g->fniv) {
- type = choose_vector_type(g->opt_opc, g->vece, oprsz, g->prefer_i64);
- }
- switch (type) {
- case TCG_TYPE_V256:
- /*
- * Recall that ARM SVE allows vector sizes that are not a
- * power of 2, but always a multiple of 16. The intent is
- * that e.g. size == 80 would be expanded with 2x32 + 1x16.
- */
- some = QEMU_ALIGN_DOWN(oprsz, 32);
- expand_3i_vec(g->vece, dofs, aofs, bofs, some, 32, TCG_TYPE_V256,
- c, g->load_dest, g->write_aofs, g->fniv);
- if (some == oprsz) {
- break;
- }
- dofs += some;
- aofs += some;
- bofs += some;
- oprsz -= some;
- maxsz -= some;
- /* fallthru */
- case TCG_TYPE_V128:
- expand_3i_vec(g->vece, dofs, aofs, bofs, oprsz, 16, TCG_TYPE_V128,
- c, g->load_dest, g->write_aofs, g->fniv);
- break;
- case TCG_TYPE_V64:
- expand_3i_vec(g->vece, dofs, aofs, bofs, oprsz, 8, TCG_TYPE_V64,
- c, g->load_dest, g->write_aofs, g->fniv);
- break;
- case 0:
- if (g->fni8 && check_size_impl(oprsz, 8)) {
- expand_3i_i64(dofs, aofs, bofs, oprsz, c,
- g->load_dest, g->write_aofs, g->fni8);
- } else if (g->fni4 && check_size_impl(oprsz, 4)) {
- expand_3i_i32(dofs, aofs, bofs, oprsz, c,
- g->load_dest, g->write_aofs, g->fni4);
- } else {
- assert(g->fno != NULL);
- tcg_gen_gvec_3_ool(dofs, aofs, bofs, oprsz, maxsz, c, g->fno);
- oprsz = maxsz;
- }
- break;
- default:
- g_assert_not_reached();
- }
- tcg_swap_vecop_list(hold_list);
- if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
- }
- }
- /* Expand a vector four-operand operation. */
- void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
- uint32_t oprsz, uint32_t maxsz, const GVecGen4 *g)
- {
- const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty;
- const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list);
- TCGType type;
- uint32_t some;
- check_size_align(oprsz, maxsz, dofs | aofs | bofs | cofs);
- check_overlap_4(dofs, aofs, bofs, cofs, maxsz);
- type = 0;
- if (g->fniv) {
- type = choose_vector_type(g->opt_opc, g->vece, oprsz, g->prefer_i64);
- }
- switch (type) {
- case TCG_TYPE_V256:
- /* Recall that ARM SVE allows vector sizes that are not a
- * power of 2, but always a multiple of 16. The intent is
- * that e.g. size == 80 would be expanded with 2x32 + 1x16.
- */
- some = QEMU_ALIGN_DOWN(oprsz, 32);
- expand_4_vec(g->vece, dofs, aofs, bofs, cofs, some,
- 32, TCG_TYPE_V256, g->write_aofs, g->fniv);
- if (some == oprsz) {
- break;
- }
- dofs += some;
- aofs += some;
- bofs += some;
- cofs += some;
- oprsz -= some;
- maxsz -= some;
- /* fallthru */
- case TCG_TYPE_V128:
- expand_4_vec(g->vece, dofs, aofs, bofs, cofs, oprsz,
- 16, TCG_TYPE_V128, g->write_aofs, g->fniv);
- break;
- case TCG_TYPE_V64:
- expand_4_vec(g->vece, dofs, aofs, bofs, cofs, oprsz,
- 8, TCG_TYPE_V64, g->write_aofs, g->fniv);
- break;
- case 0:
- if (g->fni8 && check_size_impl(oprsz, 8)) {
- expand_4_i64(dofs, aofs, bofs, cofs, oprsz,
- g->write_aofs, g->fni8);
- } else if (g->fni4 && check_size_impl(oprsz, 4)) {
- expand_4_i32(dofs, aofs, bofs, cofs, oprsz,
- g->write_aofs, g->fni4);
- } else {
- assert(g->fno != NULL);
- tcg_gen_gvec_4_ool(dofs, aofs, bofs, cofs,
- oprsz, maxsz, g->data, g->fno);
- oprsz = maxsz;
- }
- break;
- default:
- g_assert_not_reached();
- }
- tcg_swap_vecop_list(hold_list);
- if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
- }
- }
- /* Expand a vector four-operand operation. */
- void tcg_gen_gvec_4i(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
- uint32_t oprsz, uint32_t maxsz, int64_t c,
- const GVecGen4i *g)
- {
- const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty;
- const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list);
- TCGType type;
- uint32_t some;
- check_size_align(oprsz, maxsz, dofs | aofs | bofs | cofs);
- check_overlap_4(dofs, aofs, bofs, cofs, maxsz);
- type = 0;
- if (g->fniv) {
- type = choose_vector_type(g->opt_opc, g->vece, oprsz, g->prefer_i64);
- }
- switch (type) {
- case TCG_TYPE_V256:
- /*
- * Recall that ARM SVE allows vector sizes that are not a
- * power of 2, but always a multiple of 16. The intent is
- * that e.g. size == 80 would be expanded with 2x32 + 1x16.
- */
- some = QEMU_ALIGN_DOWN(oprsz, 32);
- expand_4i_vec(g->vece, dofs, aofs, bofs, cofs, some,
- 32, TCG_TYPE_V256, c, g->fniv);
- if (some == oprsz) {
- break;
- }
- dofs += some;
- aofs += some;
- bofs += some;
- cofs += some;
- oprsz -= some;
- maxsz -= some;
- /* fallthru */
- case TCG_TYPE_V128:
- expand_4i_vec(g->vece, dofs, aofs, bofs, cofs, oprsz,
- 16, TCG_TYPE_V128, c, g->fniv);
- break;
- case TCG_TYPE_V64:
- expand_4i_vec(g->vece, dofs, aofs, bofs, cofs, oprsz,
- 8, TCG_TYPE_V64, c, g->fniv);
- break;
- case 0:
- if (g->fni8 && check_size_impl(oprsz, 8)) {
- expand_4i_i64(dofs, aofs, bofs, cofs, oprsz, c, g->fni8);
- } else if (g->fni4 && check_size_impl(oprsz, 4)) {
- expand_4i_i32(dofs, aofs, bofs, cofs, oprsz, c, g->fni4);
- } else {
- assert(g->fno != NULL);
- tcg_gen_gvec_4_ool(dofs, aofs, bofs, cofs,
- oprsz, maxsz, c, g->fno);
- oprsz = maxsz;
- }
- break;
- default:
- g_assert_not_reached();
- }
- tcg_swap_vecop_list(hold_list);
- if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
- }
- }
- /*
- * Expand specific vector operations.
- */
- static void vec_mov2(unsigned vece, TCGv_vec a, TCGv_vec b)
- {
- tcg_gen_mov_vec(a, b);
- }
- void tcg_gen_gvec_mov(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t oprsz, uint32_t maxsz)
- {
- static const GVecGen2 g = {
- .fni8 = tcg_gen_mov_i64,
- .fniv = vec_mov2,
- .fno = gen_helper_gvec_mov,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- };
- if (dofs != aofs) {
- tcg_gen_gvec_2(dofs, aofs, oprsz, maxsz, &g);
- } else {
- check_size_align(oprsz, maxsz, dofs);
- if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
- }
- }
- }
- void tcg_gen_gvec_dup_i32(unsigned vece, uint32_t dofs, uint32_t oprsz,
- uint32_t maxsz, TCGv_i32 in)
- {
- check_size_align(oprsz, maxsz, dofs);
- tcg_debug_assert(vece <= MO_32);
- do_dup(vece, dofs, oprsz, maxsz, in, NULL, 0);
- }
- void tcg_gen_gvec_dup_i64(unsigned vece, uint32_t dofs, uint32_t oprsz,
- uint32_t maxsz, TCGv_i64 in)
- {
- check_size_align(oprsz, maxsz, dofs);
- tcg_debug_assert(vece <= MO_64);
- do_dup(vece, dofs, oprsz, maxsz, NULL, in, 0);
- }
- void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t oprsz, uint32_t maxsz)
- {
- check_size_align(oprsz, maxsz, dofs);
- if (vece <= MO_64) {
- TCGType type = choose_vector_type(NULL, vece, oprsz, 0);
- if (type != 0) {
- TCGv_vec t_vec = tcg_temp_new_vec(type);
- tcg_gen_dup_mem_vec(vece, t_vec, tcg_env, aofs);
- do_dup_store(type, dofs, oprsz, maxsz, t_vec);
- } else if (vece <= MO_32) {
- TCGv_i32 in = tcg_temp_ebb_new_i32();
- switch (vece) {
- case MO_8:
- tcg_gen_ld8u_i32(in, tcg_env, aofs);
- break;
- case MO_16:
- tcg_gen_ld16u_i32(in, tcg_env, aofs);
- break;
- default:
- tcg_gen_ld_i32(in, tcg_env, aofs);
- break;
- }
- do_dup(vece, dofs, oprsz, maxsz, in, NULL, 0);
- tcg_temp_free_i32(in);
- } else {
- TCGv_i64 in = tcg_temp_ebb_new_i64();
- tcg_gen_ld_i64(in, tcg_env, aofs);
- do_dup(vece, dofs, oprsz, maxsz, NULL, in, 0);
- tcg_temp_free_i64(in);
- }
- } else if (vece == 4) {
- /* 128-bit duplicate. */
- int i;
- tcg_debug_assert(oprsz >= 16);
- if (TCG_TARGET_HAS_v128) {
- TCGv_vec in = tcg_temp_new_vec(TCG_TYPE_V128);
- tcg_gen_ld_vec(in, tcg_env, aofs);
- for (i = (aofs == dofs) * 16; i < oprsz; i += 16) {
- tcg_gen_st_vec(in, tcg_env, dofs + i);
- }
- } else {
- TCGv_i64 in0 = tcg_temp_ebb_new_i64();
- TCGv_i64 in1 = tcg_temp_ebb_new_i64();
- tcg_gen_ld_i64(in0, tcg_env, aofs);
- tcg_gen_ld_i64(in1, tcg_env, aofs + 8);
- for (i = (aofs == dofs) * 16; i < oprsz; i += 16) {
- tcg_gen_st_i64(in0, tcg_env, dofs + i);
- tcg_gen_st_i64(in1, tcg_env, dofs + i + 8);
- }
- tcg_temp_free_i64(in0);
- tcg_temp_free_i64(in1);
- }
- if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
- }
- } else if (vece == 5) {
- /* 256-bit duplicate. */
- int i;
- tcg_debug_assert(oprsz >= 32);
- tcg_debug_assert(oprsz % 32 == 0);
- if (TCG_TARGET_HAS_v256) {
- TCGv_vec in = tcg_temp_new_vec(TCG_TYPE_V256);
- tcg_gen_ld_vec(in, tcg_env, aofs);
- for (i = (aofs == dofs) * 32; i < oprsz; i += 32) {
- tcg_gen_st_vec(in, tcg_env, dofs + i);
- }
- } else if (TCG_TARGET_HAS_v128) {
- TCGv_vec in0 = tcg_temp_new_vec(TCG_TYPE_V128);
- TCGv_vec in1 = tcg_temp_new_vec(TCG_TYPE_V128);
- tcg_gen_ld_vec(in0, tcg_env, aofs);
- tcg_gen_ld_vec(in1, tcg_env, aofs + 16);
- for (i = (aofs == dofs) * 32; i < oprsz; i += 32) {
- tcg_gen_st_vec(in0, tcg_env, dofs + i);
- tcg_gen_st_vec(in1, tcg_env, dofs + i + 16);
- }
- } else {
- TCGv_i64 in[4];
- int j;
- for (j = 0; j < 4; ++j) {
- in[j] = tcg_temp_ebb_new_i64();
- tcg_gen_ld_i64(in[j], tcg_env, aofs + j * 8);
- }
- for (i = (aofs == dofs) * 32; i < oprsz; i += 32) {
- for (j = 0; j < 4; ++j) {
- tcg_gen_st_i64(in[j], tcg_env, dofs + i + j * 8);
- }
- }
- for (j = 0; j < 4; ++j) {
- tcg_temp_free_i64(in[j]);
- }
- }
- if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
- }
- } else {
- g_assert_not_reached();
- }
- }
- void tcg_gen_gvec_dup_imm(unsigned vece, uint32_t dofs, uint32_t oprsz,
- uint32_t maxsz, uint64_t x)
- {
- check_size_align(oprsz, maxsz, dofs);
- do_dup(vece, dofs, oprsz, maxsz, NULL, NULL, x);
- }
- void tcg_gen_gvec_not(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t oprsz, uint32_t maxsz)
- {
- static const GVecGen2 g = {
- .fni8 = tcg_gen_not_i64,
- .fniv = tcg_gen_not_vec,
- .fno = gen_helper_gvec_not,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- };
- tcg_gen_gvec_2(dofs, aofs, oprsz, maxsz, &g);
- }
- /* Perform a vector addition using normal addition and a mask. The mask
- should be the sign bit of each lane. This 6-operation form is more
- efficient than separate additions when there are 4 or more lanes in
- the 64-bit operation. */
- static void gen_addv_mask(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 m)
- {
- TCGv_i64 t1 = tcg_temp_ebb_new_i64();
- TCGv_i64 t2 = tcg_temp_ebb_new_i64();
- TCGv_i64 t3 = tcg_temp_ebb_new_i64();
- tcg_gen_andc_i64(t1, a, m);
- tcg_gen_andc_i64(t2, b, m);
- tcg_gen_xor_i64(t3, a, b);
- tcg_gen_add_i64(d, t1, t2);
- tcg_gen_and_i64(t3, t3, m);
- tcg_gen_xor_i64(d, d, t3);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t2);
- tcg_temp_free_i64(t3);
- }
- void tcg_gen_vec_add8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
- {
- TCGv_i64 m = tcg_constant_i64(dup_const(MO_8, 0x80));
- gen_addv_mask(d, a, b, m);
- }
- void tcg_gen_vec_add8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
- {
- TCGv_i32 m = tcg_constant_i32((int32_t)dup_const(MO_8, 0x80));
- TCGv_i32 t1 = tcg_temp_ebb_new_i32();
- TCGv_i32 t2 = tcg_temp_ebb_new_i32();
- TCGv_i32 t3 = tcg_temp_ebb_new_i32();
- tcg_gen_andc_i32(t1, a, m);
- tcg_gen_andc_i32(t2, b, m);
- tcg_gen_xor_i32(t3, a, b);
- tcg_gen_add_i32(d, t1, t2);
- tcg_gen_and_i32(t3, t3, m);
- tcg_gen_xor_i32(d, d, t3);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
- }
- void tcg_gen_vec_add16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
- {
- TCGv_i64 m = tcg_constant_i64(dup_const(MO_16, 0x8000));
- gen_addv_mask(d, a, b, m);
- }
- void tcg_gen_vec_add16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
- {
- TCGv_i32 t1 = tcg_temp_ebb_new_i32();
- TCGv_i32 t2 = tcg_temp_ebb_new_i32();
- tcg_gen_andi_i32(t1, a, ~0xffff);
- tcg_gen_add_i32(t2, a, b);
- tcg_gen_add_i32(t1, t1, b);
- tcg_gen_deposit_i32(d, t1, t2, 0, 16);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t2);
- }
- void tcg_gen_vec_add32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
- {
- TCGv_i64 t1 = tcg_temp_ebb_new_i64();
- TCGv_i64 t2 = tcg_temp_ebb_new_i64();
- tcg_gen_andi_i64(t1, a, ~0xffffffffull);
- tcg_gen_add_i64(t2, a, b);
- tcg_gen_add_i64(t1, t1, b);
- tcg_gen_deposit_i64(d, t1, t2, 0, 32);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t2);
- }
- static const TCGOpcode vecop_list_add[] = { INDEX_op_add_vec, 0 };
- void tcg_gen_gvec_add(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
- {
- static const GVecGen3 g[4] = {
- { .fni8 = tcg_gen_vec_add8_i64,
- .fniv = tcg_gen_add_vec,
- .fno = gen_helper_gvec_add8,
- .opt_opc = vecop_list_add,
- .vece = MO_8 },
- { .fni8 = tcg_gen_vec_add16_i64,
- .fniv = tcg_gen_add_vec,
- .fno = gen_helper_gvec_add16,
- .opt_opc = vecop_list_add,
- .vece = MO_16 },
- { .fni4 = tcg_gen_add_i32,
- .fniv = tcg_gen_add_vec,
- .fno = gen_helper_gvec_add32,
- .opt_opc = vecop_list_add,
- .vece = MO_32 },
- { .fni8 = tcg_gen_add_i64,
- .fniv = tcg_gen_add_vec,
- .fno = gen_helper_gvec_add64,
- .opt_opc = vecop_list_add,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .vece = MO_64 },
- };
- tcg_debug_assert(vece <= MO_64);
- tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
- }
- void tcg_gen_gvec_adds(unsigned vece, uint32_t dofs, uint32_t aofs,
- TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
- {
- static const GVecGen2s g[4] = {
- { .fni8 = tcg_gen_vec_add8_i64,
- .fniv = tcg_gen_add_vec,
- .fno = gen_helper_gvec_adds8,
- .opt_opc = vecop_list_add,
- .vece = MO_8 },
- { .fni8 = tcg_gen_vec_add16_i64,
- .fniv = tcg_gen_add_vec,
- .fno = gen_helper_gvec_adds16,
- .opt_opc = vecop_list_add,
- .vece = MO_16 },
- { .fni4 = tcg_gen_add_i32,
- .fniv = tcg_gen_add_vec,
- .fno = gen_helper_gvec_adds32,
- .opt_opc = vecop_list_add,
- .vece = MO_32 },
- { .fni8 = tcg_gen_add_i64,
- .fniv = tcg_gen_add_vec,
- .fno = gen_helper_gvec_adds64,
- .opt_opc = vecop_list_add,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .vece = MO_64 },
- };
- tcg_debug_assert(vece <= MO_64);
- tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, c, &g[vece]);
- }
- void tcg_gen_gvec_addi(unsigned vece, uint32_t dofs, uint32_t aofs,
- int64_t c, uint32_t oprsz, uint32_t maxsz)
- {
- TCGv_i64 tmp = tcg_constant_i64(c);
- tcg_gen_gvec_adds(vece, dofs, aofs, tmp, oprsz, maxsz);
- }
- static const TCGOpcode vecop_list_sub[] = { INDEX_op_sub_vec, 0 };
- void tcg_gen_gvec_subs(unsigned vece, uint32_t dofs, uint32_t aofs,
- TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
- {
- static const GVecGen2s g[4] = {
- { .fni8 = tcg_gen_vec_sub8_i64,
- .fniv = tcg_gen_sub_vec,
- .fno = gen_helper_gvec_subs8,
- .opt_opc = vecop_list_sub,
- .vece = MO_8 },
- { .fni8 = tcg_gen_vec_sub16_i64,
- .fniv = tcg_gen_sub_vec,
- .fno = gen_helper_gvec_subs16,
- .opt_opc = vecop_list_sub,
- .vece = MO_16 },
- { .fni4 = tcg_gen_sub_i32,
- .fniv = tcg_gen_sub_vec,
- .fno = gen_helper_gvec_subs32,
- .opt_opc = vecop_list_sub,
- .vece = MO_32 },
- { .fni8 = tcg_gen_sub_i64,
- .fniv = tcg_gen_sub_vec,
- .fno = gen_helper_gvec_subs64,
- .opt_opc = vecop_list_sub,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .vece = MO_64 },
- };
- tcg_debug_assert(vece <= MO_64);
- tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, c, &g[vece]);
- }
- /* Perform a vector subtraction using normal subtraction and a mask.
- Compare gen_addv_mask above. */
- static void gen_subv_mask(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 m)
- {
- TCGv_i64 t1 = tcg_temp_ebb_new_i64();
- TCGv_i64 t2 = tcg_temp_ebb_new_i64();
- TCGv_i64 t3 = tcg_temp_ebb_new_i64();
- tcg_gen_or_i64(t1, a, m);
- tcg_gen_andc_i64(t2, b, m);
- tcg_gen_eqv_i64(t3, a, b);
- tcg_gen_sub_i64(d, t1, t2);
- tcg_gen_and_i64(t3, t3, m);
- tcg_gen_xor_i64(d, d, t3);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t2);
- tcg_temp_free_i64(t3);
- }
- void tcg_gen_vec_sub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
- {
- TCGv_i64 m = tcg_constant_i64(dup_const(MO_8, 0x80));
- gen_subv_mask(d, a, b, m);
- }
- void tcg_gen_vec_sub8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
- {
- TCGv_i32 m = tcg_constant_i32((int32_t)dup_const(MO_8, 0x80));
- TCGv_i32 t1 = tcg_temp_ebb_new_i32();
- TCGv_i32 t2 = tcg_temp_ebb_new_i32();
- TCGv_i32 t3 = tcg_temp_ebb_new_i32();
- tcg_gen_or_i32(t1, a, m);
- tcg_gen_andc_i32(t2, b, m);
- tcg_gen_eqv_i32(t3, a, b);
- tcg_gen_sub_i32(d, t1, t2);
- tcg_gen_and_i32(t3, t3, m);
- tcg_gen_xor_i32(d, d, t3);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
- }
- void tcg_gen_vec_sub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
- {
- TCGv_i64 m = tcg_constant_i64(dup_const(MO_16, 0x8000));
- gen_subv_mask(d, a, b, m);
- }
- void tcg_gen_vec_sub16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
- {
- TCGv_i32 t1 = tcg_temp_ebb_new_i32();
- TCGv_i32 t2 = tcg_temp_ebb_new_i32();
- tcg_gen_andi_i32(t1, b, ~0xffff);
- tcg_gen_sub_i32(t2, a, b);
- tcg_gen_sub_i32(t1, a, t1);
- tcg_gen_deposit_i32(d, t1, t2, 0, 16);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t2);
- }
- void tcg_gen_vec_sub32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
- {
- TCGv_i64 t1 = tcg_temp_ebb_new_i64();
- TCGv_i64 t2 = tcg_temp_ebb_new_i64();
- tcg_gen_andi_i64(t1, b, ~0xffffffffull);
- tcg_gen_sub_i64(t2, a, b);
- tcg_gen_sub_i64(t1, a, t1);
- tcg_gen_deposit_i64(d, t1, t2, 0, 32);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t2);
- }
- void tcg_gen_gvec_sub(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
- {
- static const GVecGen3 g[4] = {
- { .fni8 = tcg_gen_vec_sub8_i64,
- .fniv = tcg_gen_sub_vec,
- .fno = gen_helper_gvec_sub8,
- .opt_opc = vecop_list_sub,
- .vece = MO_8 },
- { .fni8 = tcg_gen_vec_sub16_i64,
- .fniv = tcg_gen_sub_vec,
- .fno = gen_helper_gvec_sub16,
- .opt_opc = vecop_list_sub,
- .vece = MO_16 },
- { .fni4 = tcg_gen_sub_i32,
- .fniv = tcg_gen_sub_vec,
- .fno = gen_helper_gvec_sub32,
- .opt_opc = vecop_list_sub,
- .vece = MO_32 },
- { .fni8 = tcg_gen_sub_i64,
- .fniv = tcg_gen_sub_vec,
- .fno = gen_helper_gvec_sub64,
- .opt_opc = vecop_list_sub,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .vece = MO_64 },
- };
- tcg_debug_assert(vece <= MO_64);
- tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
- }
- static const TCGOpcode vecop_list_mul[] = { INDEX_op_mul_vec, 0 };
- void tcg_gen_gvec_mul(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
- {
- static const GVecGen3 g[4] = {
- { .fniv = tcg_gen_mul_vec,
- .fno = gen_helper_gvec_mul8,
- .opt_opc = vecop_list_mul,
- .vece = MO_8 },
- { .fniv = tcg_gen_mul_vec,
- .fno = gen_helper_gvec_mul16,
- .opt_opc = vecop_list_mul,
- .vece = MO_16 },
- { .fni4 = tcg_gen_mul_i32,
- .fniv = tcg_gen_mul_vec,
- .fno = gen_helper_gvec_mul32,
- .opt_opc = vecop_list_mul,
- .vece = MO_32 },
- { .fni8 = tcg_gen_mul_i64,
- .fniv = tcg_gen_mul_vec,
- .fno = gen_helper_gvec_mul64,
- .opt_opc = vecop_list_mul,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .vece = MO_64 },
- };
- tcg_debug_assert(vece <= MO_64);
- tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
- }
- void tcg_gen_gvec_muls(unsigned vece, uint32_t dofs, uint32_t aofs,
- TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
- {
- static const GVecGen2s g[4] = {
- { .fniv = tcg_gen_mul_vec,
- .fno = gen_helper_gvec_muls8,
- .opt_opc = vecop_list_mul,
- .vece = MO_8 },
- { .fniv = tcg_gen_mul_vec,
- .fno = gen_helper_gvec_muls16,
- .opt_opc = vecop_list_mul,
- .vece = MO_16 },
- { .fni4 = tcg_gen_mul_i32,
- .fniv = tcg_gen_mul_vec,
- .fno = gen_helper_gvec_muls32,
- .opt_opc = vecop_list_mul,
- .vece = MO_32 },
- { .fni8 = tcg_gen_mul_i64,
- .fniv = tcg_gen_mul_vec,
- .fno = gen_helper_gvec_muls64,
- .opt_opc = vecop_list_mul,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .vece = MO_64 },
- };
- tcg_debug_assert(vece <= MO_64);
- tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, c, &g[vece]);
- }
- void tcg_gen_gvec_muli(unsigned vece, uint32_t dofs, uint32_t aofs,
- int64_t c, uint32_t oprsz, uint32_t maxsz)
- {
- TCGv_i64 tmp = tcg_constant_i64(c);
- tcg_gen_gvec_muls(vece, dofs, aofs, tmp, oprsz, maxsz);
- }
- void tcg_gen_gvec_ssadd(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
- {
- static const TCGOpcode vecop_list[] = { INDEX_op_ssadd_vec, 0 };
- static const GVecGen3 g[4] = {
- { .fniv = tcg_gen_ssadd_vec,
- .fno = gen_helper_gvec_ssadd8,
- .opt_opc = vecop_list,
- .vece = MO_8 },
- { .fniv = tcg_gen_ssadd_vec,
- .fno = gen_helper_gvec_ssadd16,
- .opt_opc = vecop_list,
- .vece = MO_16 },
- { .fniv = tcg_gen_ssadd_vec,
- .fno = gen_helper_gvec_ssadd32,
- .opt_opc = vecop_list,
- .vece = MO_32 },
- { .fniv = tcg_gen_ssadd_vec,
- .fno = gen_helper_gvec_ssadd64,
- .opt_opc = vecop_list,
- .vece = MO_64 },
- };
- tcg_debug_assert(vece <= MO_64);
- tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
- }
- void tcg_gen_gvec_sssub(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
- {
- static const TCGOpcode vecop_list[] = { INDEX_op_sssub_vec, 0 };
- static const GVecGen3 g[4] = {
- { .fniv = tcg_gen_sssub_vec,
- .fno = gen_helper_gvec_sssub8,
- .opt_opc = vecop_list,
- .vece = MO_8 },
- { .fniv = tcg_gen_sssub_vec,
- .fno = gen_helper_gvec_sssub16,
- .opt_opc = vecop_list,
- .vece = MO_16 },
- { .fniv = tcg_gen_sssub_vec,
- .fno = gen_helper_gvec_sssub32,
- .opt_opc = vecop_list,
- .vece = MO_32 },
- { .fniv = tcg_gen_sssub_vec,
- .fno = gen_helper_gvec_sssub64,
- .opt_opc = vecop_list,
- .vece = MO_64 },
- };
- tcg_debug_assert(vece <= MO_64);
- tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
- }
- static void tcg_gen_usadd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
- {
- TCGv_i32 max = tcg_constant_i32(-1);
- tcg_gen_add_i32(d, a, b);
- tcg_gen_movcond_i32(TCG_COND_LTU, d, d, a, max, d);
- }
- static void tcg_gen_usadd_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
- {
- TCGv_i64 max = tcg_constant_i64(-1);
- tcg_gen_add_i64(d, a, b);
- tcg_gen_movcond_i64(TCG_COND_LTU, d, d, a, max, d);
- }
- void tcg_gen_gvec_usadd(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
- {
- static const TCGOpcode vecop_list[] = { INDEX_op_usadd_vec, 0 };
- static const GVecGen3 g[4] = {
- { .fniv = tcg_gen_usadd_vec,
- .fno = gen_helper_gvec_usadd8,
- .opt_opc = vecop_list,
- .vece = MO_8 },
- { .fniv = tcg_gen_usadd_vec,
- .fno = gen_helper_gvec_usadd16,
- .opt_opc = vecop_list,
- .vece = MO_16 },
- { .fni4 = tcg_gen_usadd_i32,
- .fniv = tcg_gen_usadd_vec,
- .fno = gen_helper_gvec_usadd32,
- .opt_opc = vecop_list,
- .vece = MO_32 },
- { .fni8 = tcg_gen_usadd_i64,
- .fniv = tcg_gen_usadd_vec,
- .fno = gen_helper_gvec_usadd64,
- .opt_opc = vecop_list,
- .vece = MO_64 }
- };
- tcg_debug_assert(vece <= MO_64);
- tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
- }
- static void tcg_gen_ussub_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
- {
- TCGv_i32 min = tcg_constant_i32(0);
- tcg_gen_sub_i32(d, a, b);
- tcg_gen_movcond_i32(TCG_COND_LTU, d, a, b, min, d);
- }
- static void tcg_gen_ussub_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
- {
- TCGv_i64 min = tcg_constant_i64(0);
- tcg_gen_sub_i64(d, a, b);
- tcg_gen_movcond_i64(TCG_COND_LTU, d, a, b, min, d);
- }
- void tcg_gen_gvec_ussub(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
- {
- static const TCGOpcode vecop_list[] = { INDEX_op_ussub_vec, 0 };
- static const GVecGen3 g[4] = {
- { .fniv = tcg_gen_ussub_vec,
- .fno = gen_helper_gvec_ussub8,
- .opt_opc = vecop_list,
- .vece = MO_8 },
- { .fniv = tcg_gen_ussub_vec,
- .fno = gen_helper_gvec_ussub16,
- .opt_opc = vecop_list,
- .vece = MO_16 },
- { .fni4 = tcg_gen_ussub_i32,
- .fniv = tcg_gen_ussub_vec,
- .fno = gen_helper_gvec_ussub32,
- .opt_opc = vecop_list,
- .vece = MO_32 },
- { .fni8 = tcg_gen_ussub_i64,
- .fniv = tcg_gen_ussub_vec,
- .fno = gen_helper_gvec_ussub64,
- .opt_opc = vecop_list,
- .vece = MO_64 }
- };
- tcg_debug_assert(vece <= MO_64);
- tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
- }
- void tcg_gen_gvec_smin(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
- {
- static const TCGOpcode vecop_list[] = { INDEX_op_smin_vec, 0 };
- static const GVecGen3 g[4] = {
- { .fniv = tcg_gen_smin_vec,
- .fno = gen_helper_gvec_smin8,
- .opt_opc = vecop_list,
- .vece = MO_8 },
- { .fniv = tcg_gen_smin_vec,
- .fno = gen_helper_gvec_smin16,
- .opt_opc = vecop_list,
- .vece = MO_16 },
- { .fni4 = tcg_gen_smin_i32,
- .fniv = tcg_gen_smin_vec,
- .fno = gen_helper_gvec_smin32,
- .opt_opc = vecop_list,
- .vece = MO_32 },
- { .fni8 = tcg_gen_smin_i64,
- .fniv = tcg_gen_smin_vec,
- .fno = gen_helper_gvec_smin64,
- .opt_opc = vecop_list,
- .vece = MO_64 }
- };
- tcg_debug_assert(vece <= MO_64);
- tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
- }
- void tcg_gen_gvec_umin(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
- {
- static const TCGOpcode vecop_list[] = { INDEX_op_umin_vec, 0 };
- static const GVecGen3 g[4] = {
- { .fniv = tcg_gen_umin_vec,
- .fno = gen_helper_gvec_umin8,
- .opt_opc = vecop_list,
- .vece = MO_8 },
- { .fniv = tcg_gen_umin_vec,
- .fno = gen_helper_gvec_umin16,
- .opt_opc = vecop_list,
- .vece = MO_16 },
- { .fni4 = tcg_gen_umin_i32,
- .fniv = tcg_gen_umin_vec,
- .fno = gen_helper_gvec_umin32,
- .opt_opc = vecop_list,
- .vece = MO_32 },
- { .fni8 = tcg_gen_umin_i64,
- .fniv = tcg_gen_umin_vec,
- .fno = gen_helper_gvec_umin64,
- .opt_opc = vecop_list,
- .vece = MO_64 }
- };
- tcg_debug_assert(vece <= MO_64);
- tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
- }
- void tcg_gen_gvec_smax(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
- {
- static const TCGOpcode vecop_list[] = { INDEX_op_smax_vec, 0 };
- static const GVecGen3 g[4] = {
- { .fniv = tcg_gen_smax_vec,
- .fno = gen_helper_gvec_smax8,
- .opt_opc = vecop_list,
- .vece = MO_8 },
- { .fniv = tcg_gen_smax_vec,
- .fno = gen_helper_gvec_smax16,
- .opt_opc = vecop_list,
- .vece = MO_16 },
- { .fni4 = tcg_gen_smax_i32,
- .fniv = tcg_gen_smax_vec,
- .fno = gen_helper_gvec_smax32,
- .opt_opc = vecop_list,
- .vece = MO_32 },
- { .fni8 = tcg_gen_smax_i64,
- .fniv = tcg_gen_smax_vec,
- .fno = gen_helper_gvec_smax64,
- .opt_opc = vecop_list,
- .vece = MO_64 }
- };
- tcg_debug_assert(vece <= MO_64);
- tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
- }
- void tcg_gen_gvec_umax(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
- {
- static const TCGOpcode vecop_list[] = { INDEX_op_umax_vec, 0 };
- static const GVecGen3 g[4] = {
- { .fniv = tcg_gen_umax_vec,
- .fno = gen_helper_gvec_umax8,
- .opt_opc = vecop_list,
- .vece = MO_8 },
- { .fniv = tcg_gen_umax_vec,
- .fno = gen_helper_gvec_umax16,
- .opt_opc = vecop_list,
- .vece = MO_16 },
- { .fni4 = tcg_gen_umax_i32,
- .fniv = tcg_gen_umax_vec,
- .fno = gen_helper_gvec_umax32,
- .opt_opc = vecop_list,
- .vece = MO_32 },
- { .fni8 = tcg_gen_umax_i64,
- .fniv = tcg_gen_umax_vec,
- .fno = gen_helper_gvec_umax64,
- .opt_opc = vecop_list,
- .vece = MO_64 }
- };
- tcg_debug_assert(vece <= MO_64);
- tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
- }
- /* Perform a vector negation using normal negation and a mask.
- Compare gen_subv_mask above. */
- static void gen_negv_mask(TCGv_i64 d, TCGv_i64 b, TCGv_i64 m)
- {
- TCGv_i64 t2 = tcg_temp_ebb_new_i64();
- TCGv_i64 t3 = tcg_temp_ebb_new_i64();
- tcg_gen_andc_i64(t3, m, b);
- tcg_gen_andc_i64(t2, b, m);
- tcg_gen_sub_i64(d, m, t2);
- tcg_gen_xor_i64(d, d, t3);
- tcg_temp_free_i64(t2);
- tcg_temp_free_i64(t3);
- }
- void tcg_gen_vec_neg8_i64(TCGv_i64 d, TCGv_i64 b)
- {
- TCGv_i64 m = tcg_constant_i64(dup_const(MO_8, 0x80));
- gen_negv_mask(d, b, m);
- }
- void tcg_gen_vec_neg16_i64(TCGv_i64 d, TCGv_i64 b)
- {
- TCGv_i64 m = tcg_constant_i64(dup_const(MO_16, 0x8000));
- gen_negv_mask(d, b, m);
- }
- void tcg_gen_vec_neg32_i64(TCGv_i64 d, TCGv_i64 b)
- {
- TCGv_i64 t1 = tcg_temp_ebb_new_i64();
- TCGv_i64 t2 = tcg_temp_ebb_new_i64();
- tcg_gen_andi_i64(t1, b, ~0xffffffffull);
- tcg_gen_neg_i64(t2, b);
- tcg_gen_neg_i64(t1, t1);
- tcg_gen_deposit_i64(d, t1, t2, 0, 32);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t2);
- }
- void tcg_gen_gvec_neg(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t oprsz, uint32_t maxsz)
- {
- static const TCGOpcode vecop_list[] = { INDEX_op_neg_vec, 0 };
- static const GVecGen2 g[4] = {
- { .fni8 = tcg_gen_vec_neg8_i64,
- .fniv = tcg_gen_neg_vec,
- .fno = gen_helper_gvec_neg8,
- .opt_opc = vecop_list,
- .vece = MO_8 },
- { .fni8 = tcg_gen_vec_neg16_i64,
- .fniv = tcg_gen_neg_vec,
- .fno = gen_helper_gvec_neg16,
- .opt_opc = vecop_list,
- .vece = MO_16 },
- { .fni4 = tcg_gen_neg_i32,
- .fniv = tcg_gen_neg_vec,
- .fno = gen_helper_gvec_neg32,
- .opt_opc = vecop_list,
- .vece = MO_32 },
- { .fni8 = tcg_gen_neg_i64,
- .fniv = tcg_gen_neg_vec,
- .fno = gen_helper_gvec_neg64,
- .opt_opc = vecop_list,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .vece = MO_64 },
- };
- tcg_debug_assert(vece <= MO_64);
- tcg_gen_gvec_2(dofs, aofs, oprsz, maxsz, &g[vece]);
- }
- static void gen_absv_mask(TCGv_i64 d, TCGv_i64 b, unsigned vece)
- {
- TCGv_i64 t = tcg_temp_ebb_new_i64();
- int nbit = 8 << vece;
- /* Create -1 for each negative element. */
- tcg_gen_shri_i64(t, b, nbit - 1);
- tcg_gen_andi_i64(t, t, dup_const(vece, 1));
- tcg_gen_muli_i64(t, t, (1 << nbit) - 1);
- /*
- * Invert (via xor -1) and add one.
- * Because of the ordering the msb is cleared,
- * so we never have carry into the next element.
- */
- tcg_gen_xor_i64(d, b, t);
- tcg_gen_andi_i64(t, t, dup_const(vece, 1));
- tcg_gen_add_i64(d, d, t);
- tcg_temp_free_i64(t);
- }
- static void tcg_gen_vec_abs8_i64(TCGv_i64 d, TCGv_i64 b)
- {
- gen_absv_mask(d, b, MO_8);
- }
- static void tcg_gen_vec_abs16_i64(TCGv_i64 d, TCGv_i64 b)
- {
- gen_absv_mask(d, b, MO_16);
- }
- void tcg_gen_gvec_abs(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t oprsz, uint32_t maxsz)
- {
- static const TCGOpcode vecop_list[] = { INDEX_op_abs_vec, 0 };
- static const GVecGen2 g[4] = {
- { .fni8 = tcg_gen_vec_abs8_i64,
- .fniv = tcg_gen_abs_vec,
- .fno = gen_helper_gvec_abs8,
- .opt_opc = vecop_list,
- .vece = MO_8 },
- { .fni8 = tcg_gen_vec_abs16_i64,
- .fniv = tcg_gen_abs_vec,
- .fno = gen_helper_gvec_abs16,
- .opt_opc = vecop_list,
- .vece = MO_16 },
- { .fni4 = tcg_gen_abs_i32,
- .fniv = tcg_gen_abs_vec,
- .fno = gen_helper_gvec_abs32,
- .opt_opc = vecop_list,
- .vece = MO_32 },
- { .fni8 = tcg_gen_abs_i64,
- .fniv = tcg_gen_abs_vec,
- .fno = gen_helper_gvec_abs64,
- .opt_opc = vecop_list,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .vece = MO_64 },
- };
- tcg_debug_assert(vece <= MO_64);
- tcg_gen_gvec_2(dofs, aofs, oprsz, maxsz, &g[vece]);
- }
- void tcg_gen_gvec_and(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
- {
- static const GVecGen3 g = {
- .fni8 = tcg_gen_and_i64,
- .fniv = tcg_gen_and_vec,
- .fno = gen_helper_gvec_and,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- };
- if (aofs == bofs) {
- tcg_gen_gvec_mov(vece, dofs, aofs, oprsz, maxsz);
- } else {
- tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
- }
- }
- void tcg_gen_gvec_or(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
- {
- static const GVecGen3 g = {
- .fni8 = tcg_gen_or_i64,
- .fniv = tcg_gen_or_vec,
- .fno = gen_helper_gvec_or,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- };
- if (aofs == bofs) {
- tcg_gen_gvec_mov(vece, dofs, aofs, oprsz, maxsz);
- } else {
- tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
- }
- }
- void tcg_gen_gvec_xor(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
- {
- static const GVecGen3 g = {
- .fni8 = tcg_gen_xor_i64,
- .fniv = tcg_gen_xor_vec,
- .fno = gen_helper_gvec_xor,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- };
- if (aofs == bofs) {
- tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, 0);
- } else {
- tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
- }
- }
- void tcg_gen_gvec_andc(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
- {
- static const GVecGen3 g = {
- .fni8 = tcg_gen_andc_i64,
- .fniv = tcg_gen_andc_vec,
- .fno = gen_helper_gvec_andc,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- };
- if (aofs == bofs) {
- tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, 0);
- } else {
- tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
- }
- }
- void tcg_gen_gvec_orc(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
- {
- static const GVecGen3 g = {
- .fni8 = tcg_gen_orc_i64,
- .fniv = tcg_gen_orc_vec,
- .fno = gen_helper_gvec_orc,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- };
- if (aofs == bofs) {
- tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, -1);
- } else {
- tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
- }
- }
- void tcg_gen_gvec_nand(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
- {
- static const GVecGen3 g = {
- .fni8 = tcg_gen_nand_i64,
- .fniv = tcg_gen_nand_vec,
- .fno = gen_helper_gvec_nand,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- };
- if (aofs == bofs) {
- tcg_gen_gvec_not(vece, dofs, aofs, oprsz, maxsz);
- } else {
- tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
- }
- }
- void tcg_gen_gvec_nor(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
- {
- static const GVecGen3 g = {
- .fni8 = tcg_gen_nor_i64,
- .fniv = tcg_gen_nor_vec,
- .fno = gen_helper_gvec_nor,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- };
- if (aofs == bofs) {
- tcg_gen_gvec_not(vece, dofs, aofs, oprsz, maxsz);
- } else {
- tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
- }
- }
- void tcg_gen_gvec_eqv(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
- {
- static const GVecGen3 g = {
- .fni8 = tcg_gen_eqv_i64,
- .fniv = tcg_gen_eqv_vec,
- .fno = gen_helper_gvec_eqv,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- };
- if (aofs == bofs) {
- tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, -1);
- } else {
- tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
- }
- }
- static const GVecGen2s gop_ands = {
- .fni8 = tcg_gen_and_i64,
- .fniv = tcg_gen_and_vec,
- .fno = gen_helper_gvec_ands,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .vece = MO_64
- };
- void tcg_gen_gvec_ands(unsigned vece, uint32_t dofs, uint32_t aofs,
- TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
- {
- TCGv_i64 tmp = tcg_temp_ebb_new_i64();
- tcg_gen_dup_i64(vece, tmp, c);
- tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ands);
- tcg_temp_free_i64(tmp);
- }
- void tcg_gen_gvec_andi(unsigned vece, uint32_t dofs, uint32_t aofs,
- int64_t c, uint32_t oprsz, uint32_t maxsz)
- {
- TCGv_i64 tmp = tcg_constant_i64(dup_const(vece, c));
- tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ands);
- }
- void tcg_gen_gvec_andcs(unsigned vece, uint32_t dofs, uint32_t aofs,
- TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
- {
- static GVecGen2s g = {
- .fni8 = tcg_gen_andc_i64,
- .fniv = tcg_gen_andc_vec,
- .fno = gen_helper_gvec_andcs,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .vece = MO_64
- };
- TCGv_i64 tmp = tcg_temp_ebb_new_i64();
- tcg_gen_dup_i64(vece, tmp, c);
- tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &g);
- tcg_temp_free_i64(tmp);
- }
- static const GVecGen2s gop_xors = {
- .fni8 = tcg_gen_xor_i64,
- .fniv = tcg_gen_xor_vec,
- .fno = gen_helper_gvec_xors,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .vece = MO_64
- };
- void tcg_gen_gvec_xors(unsigned vece, uint32_t dofs, uint32_t aofs,
- TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
- {
- TCGv_i64 tmp = tcg_temp_ebb_new_i64();
- tcg_gen_dup_i64(vece, tmp, c);
- tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_xors);
- tcg_temp_free_i64(tmp);
- }
- void tcg_gen_gvec_xori(unsigned vece, uint32_t dofs, uint32_t aofs,
- int64_t c, uint32_t oprsz, uint32_t maxsz)
- {
- TCGv_i64 tmp = tcg_constant_i64(dup_const(vece, c));
- tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_xors);
- }
- static const GVecGen2s gop_ors = {
- .fni8 = tcg_gen_or_i64,
- .fniv = tcg_gen_or_vec,
- .fno = gen_helper_gvec_ors,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .vece = MO_64
- };
- void tcg_gen_gvec_ors(unsigned vece, uint32_t dofs, uint32_t aofs,
- TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
- {
- TCGv_i64 tmp = tcg_temp_ebb_new_i64();
- tcg_gen_dup_i64(vece, tmp, c);
- tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ors);
- tcg_temp_free_i64(tmp);
- }
- void tcg_gen_gvec_ori(unsigned vece, uint32_t dofs, uint32_t aofs,
- int64_t c, uint32_t oprsz, uint32_t maxsz)
- {
- TCGv_i64 tmp = tcg_constant_i64(dup_const(vece, c));
- tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ors);
- }
- void tcg_gen_vec_shl8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
- {
- uint64_t mask = dup_const(MO_8, 0xff << c);
- tcg_gen_shli_i64(d, a, c);
- tcg_gen_andi_i64(d, d, mask);
- }
- void tcg_gen_vec_shl16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
- {
- uint64_t mask = dup_const(MO_16, 0xffff << c);
- tcg_gen_shli_i64(d, a, c);
- tcg_gen_andi_i64(d, d, mask);
- }
- void tcg_gen_vec_shl8i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c)
- {
- uint32_t mask = dup_const(MO_8, 0xff << c);
- tcg_gen_shli_i32(d, a, c);
- tcg_gen_andi_i32(d, d, mask);
- }
- void tcg_gen_vec_shl16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c)
- {
- uint32_t mask = dup_const(MO_16, 0xffff << c);
- tcg_gen_shli_i32(d, a, c);
- tcg_gen_andi_i32(d, d, mask);
- }
- void tcg_gen_gvec_shli(unsigned vece, uint32_t dofs, uint32_t aofs,
- int64_t shift, uint32_t oprsz, uint32_t maxsz)
- {
- static const TCGOpcode vecop_list[] = { INDEX_op_shli_vec, 0 };
- static const GVecGen2i g[4] = {
- { .fni8 = tcg_gen_vec_shl8i_i64,
- .fniv = tcg_gen_shli_vec,
- .fno = gen_helper_gvec_shl8i,
- .opt_opc = vecop_list,
- .vece = MO_8 },
- { .fni8 = tcg_gen_vec_shl16i_i64,
- .fniv = tcg_gen_shli_vec,
- .fno = gen_helper_gvec_shl16i,
- .opt_opc = vecop_list,
- .vece = MO_16 },
- { .fni4 = tcg_gen_shli_i32,
- .fniv = tcg_gen_shli_vec,
- .fno = gen_helper_gvec_shl32i,
- .opt_opc = vecop_list,
- .vece = MO_32 },
- { .fni8 = tcg_gen_shli_i64,
- .fniv = tcg_gen_shli_vec,
- .fno = gen_helper_gvec_shl64i,
- .opt_opc = vecop_list,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .vece = MO_64 },
- };
- tcg_debug_assert(vece <= MO_64);
- tcg_debug_assert(shift >= 0 && shift < (8 << vece));
- if (shift == 0) {
- tcg_gen_gvec_mov(vece, dofs, aofs, oprsz, maxsz);
- } else {
- tcg_gen_gvec_2i(dofs, aofs, oprsz, maxsz, shift, &g[vece]);
- }
- }
- void tcg_gen_vec_shr8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
- {
- uint64_t mask = dup_const(MO_8, 0xff >> c);
- tcg_gen_shri_i64(d, a, c);
- tcg_gen_andi_i64(d, d, mask);
- }
- void tcg_gen_vec_shr16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
- {
- uint64_t mask = dup_const(MO_16, 0xffff >> c);
- tcg_gen_shri_i64(d, a, c);
- tcg_gen_andi_i64(d, d, mask);
- }
- void tcg_gen_vec_shr8i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c)
- {
- uint32_t mask = dup_const(MO_8, 0xff >> c);
- tcg_gen_shri_i32(d, a, c);
- tcg_gen_andi_i32(d, d, mask);
- }
- void tcg_gen_vec_shr16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c)
- {
- uint32_t mask = dup_const(MO_16, 0xffff >> c);
- tcg_gen_shri_i32(d, a, c);
- tcg_gen_andi_i32(d, d, mask);
- }
- void tcg_gen_gvec_shri(unsigned vece, uint32_t dofs, uint32_t aofs,
- int64_t shift, uint32_t oprsz, uint32_t maxsz)
- {
- static const TCGOpcode vecop_list[] = { INDEX_op_shri_vec, 0 };
- static const GVecGen2i g[4] = {
- { .fni8 = tcg_gen_vec_shr8i_i64,
- .fniv = tcg_gen_shri_vec,
- .fno = gen_helper_gvec_shr8i,
- .opt_opc = vecop_list,
- .vece = MO_8 },
- { .fni8 = tcg_gen_vec_shr16i_i64,
- .fniv = tcg_gen_shri_vec,
- .fno = gen_helper_gvec_shr16i,
- .opt_opc = vecop_list,
- .vece = MO_16 },
- { .fni4 = tcg_gen_shri_i32,
- .fniv = tcg_gen_shri_vec,
- .fno = gen_helper_gvec_shr32i,
- .opt_opc = vecop_list,
- .vece = MO_32 },
- { .fni8 = tcg_gen_shri_i64,
- .fniv = tcg_gen_shri_vec,
- .fno = gen_helper_gvec_shr64i,
- .opt_opc = vecop_list,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .vece = MO_64 },
- };
- tcg_debug_assert(vece <= MO_64);
- tcg_debug_assert(shift >= 0 && shift < (8 << vece));
- if (shift == 0) {
- tcg_gen_gvec_mov(vece, dofs, aofs, oprsz, maxsz);
- } else {
- tcg_gen_gvec_2i(dofs, aofs, oprsz, maxsz, shift, &g[vece]);
- }
- }
- void tcg_gen_vec_sar8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
- {
- uint64_t s_mask = dup_const(MO_8, 0x80 >> c);
- uint64_t c_mask = dup_const(MO_8, 0xff >> c);
- TCGv_i64 s = tcg_temp_ebb_new_i64();
- tcg_gen_shri_i64(d, a, c);
- tcg_gen_andi_i64(s, d, s_mask); /* isolate (shifted) sign bit */
- tcg_gen_muli_i64(s, s, (2 << c) - 2); /* replicate isolated signs */
- tcg_gen_andi_i64(d, d, c_mask); /* clear out bits above sign */
- tcg_gen_or_i64(d, d, s); /* include sign extension */
- tcg_temp_free_i64(s);
- }
- void tcg_gen_vec_sar16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
- {
- uint64_t s_mask = dup_const(MO_16, 0x8000 >> c);
- uint64_t c_mask = dup_const(MO_16, 0xffff >> c);
- TCGv_i64 s = tcg_temp_ebb_new_i64();
- tcg_gen_shri_i64(d, a, c);
- tcg_gen_andi_i64(s, d, s_mask); /* isolate (shifted) sign bit */
- tcg_gen_andi_i64(d, d, c_mask); /* clear out bits above sign */
- tcg_gen_muli_i64(s, s, (2 << c) - 2); /* replicate isolated signs */
- tcg_gen_or_i64(d, d, s); /* include sign extension */
- tcg_temp_free_i64(s);
- }
- void tcg_gen_vec_sar8i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c)
- {
- uint32_t s_mask = dup_const(MO_8, 0x80 >> c);
- uint32_t c_mask = dup_const(MO_8, 0xff >> c);
- TCGv_i32 s = tcg_temp_ebb_new_i32();
- tcg_gen_shri_i32(d, a, c);
- tcg_gen_andi_i32(s, d, s_mask); /* isolate (shifted) sign bit */
- tcg_gen_muli_i32(s, s, (2 << c) - 2); /* replicate isolated signs */
- tcg_gen_andi_i32(d, d, c_mask); /* clear out bits above sign */
- tcg_gen_or_i32(d, d, s); /* include sign extension */
- tcg_temp_free_i32(s);
- }
- void tcg_gen_vec_sar16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c)
- {
- uint32_t s_mask = dup_const(MO_16, 0x8000 >> c);
- uint32_t c_mask = dup_const(MO_16, 0xffff >> c);
- TCGv_i32 s = tcg_temp_ebb_new_i32();
- tcg_gen_shri_i32(d, a, c);
- tcg_gen_andi_i32(s, d, s_mask); /* isolate (shifted) sign bit */
- tcg_gen_andi_i32(d, d, c_mask); /* clear out bits above sign */
- tcg_gen_muli_i32(s, s, (2 << c) - 2); /* replicate isolated signs */
- tcg_gen_or_i32(d, d, s); /* include sign extension */
- tcg_temp_free_i32(s);
- }
- void tcg_gen_gvec_sari(unsigned vece, uint32_t dofs, uint32_t aofs,
- int64_t shift, uint32_t oprsz, uint32_t maxsz)
- {
- static const TCGOpcode vecop_list[] = { INDEX_op_sari_vec, 0 };
- static const GVecGen2i g[4] = {
- { .fni8 = tcg_gen_vec_sar8i_i64,
- .fniv = tcg_gen_sari_vec,
- .fno = gen_helper_gvec_sar8i,
- .opt_opc = vecop_list,
- .vece = MO_8 },
- { .fni8 = tcg_gen_vec_sar16i_i64,
- .fniv = tcg_gen_sari_vec,
- .fno = gen_helper_gvec_sar16i,
- .opt_opc = vecop_list,
- .vece = MO_16 },
- { .fni4 = tcg_gen_sari_i32,
- .fniv = tcg_gen_sari_vec,
- .fno = gen_helper_gvec_sar32i,
- .opt_opc = vecop_list,
- .vece = MO_32 },
- { .fni8 = tcg_gen_sari_i64,
- .fniv = tcg_gen_sari_vec,
- .fno = gen_helper_gvec_sar64i,
- .opt_opc = vecop_list,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .vece = MO_64 },
- };
- tcg_debug_assert(vece <= MO_64);
- tcg_debug_assert(shift >= 0 && shift < (8 << vece));
- if (shift == 0) {
- tcg_gen_gvec_mov(vece, dofs, aofs, oprsz, maxsz);
- } else {
- tcg_gen_gvec_2i(dofs, aofs, oprsz, maxsz, shift, &g[vece]);
- }
- }
- void tcg_gen_vec_rotl8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
- {
- uint64_t mask = dup_const(MO_8, 0xff << c);
- tcg_gen_shli_i64(d, a, c);
- tcg_gen_shri_i64(a, a, 8 - c);
- tcg_gen_andi_i64(d, d, mask);
- tcg_gen_andi_i64(a, a, ~mask);
- tcg_gen_or_i64(d, d, a);
- }
- void tcg_gen_vec_rotl16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
- {
- uint64_t mask = dup_const(MO_16, 0xffff << c);
- tcg_gen_shli_i64(d, a, c);
- tcg_gen_shri_i64(a, a, 16 - c);
- tcg_gen_andi_i64(d, d, mask);
- tcg_gen_andi_i64(a, a, ~mask);
- tcg_gen_or_i64(d, d, a);
- }
- void tcg_gen_gvec_rotli(unsigned vece, uint32_t dofs, uint32_t aofs,
- int64_t shift, uint32_t oprsz, uint32_t maxsz)
- {
- static const TCGOpcode vecop_list[] = { INDEX_op_rotli_vec, 0 };
- static const GVecGen2i g[4] = {
- { .fni8 = tcg_gen_vec_rotl8i_i64,
- .fniv = tcg_gen_rotli_vec,
- .fno = gen_helper_gvec_rotl8i,
- .opt_opc = vecop_list,
- .vece = MO_8 },
- { .fni8 = tcg_gen_vec_rotl16i_i64,
- .fniv = tcg_gen_rotli_vec,
- .fno = gen_helper_gvec_rotl16i,
- .opt_opc = vecop_list,
- .vece = MO_16 },
- { .fni4 = tcg_gen_rotli_i32,
- .fniv = tcg_gen_rotli_vec,
- .fno = gen_helper_gvec_rotl32i,
- .opt_opc = vecop_list,
- .vece = MO_32 },
- { .fni8 = tcg_gen_rotli_i64,
- .fniv = tcg_gen_rotli_vec,
- .fno = gen_helper_gvec_rotl64i,
- .opt_opc = vecop_list,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .vece = MO_64 },
- };
- tcg_debug_assert(vece <= MO_64);
- tcg_debug_assert(shift >= 0 && shift < (8 << vece));
- if (shift == 0) {
- tcg_gen_gvec_mov(vece, dofs, aofs, oprsz, maxsz);
- } else {
- tcg_gen_gvec_2i(dofs, aofs, oprsz, maxsz, shift, &g[vece]);
- }
- }
- void tcg_gen_gvec_rotri(unsigned vece, uint32_t dofs, uint32_t aofs,
- int64_t shift, uint32_t oprsz, uint32_t maxsz)
- {
- tcg_debug_assert(vece <= MO_64);
- tcg_debug_assert(shift >= 0 && shift < (8 << vece));
- tcg_gen_gvec_rotli(vece, dofs, aofs, -shift & ((8 << vece) - 1),
- oprsz, maxsz);
- }
- /*
- * Specialized generation vector shifts by a non-constant scalar.
- */
- typedef struct {
- void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32);
- void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64);
- void (*fniv_s)(unsigned, TCGv_vec, TCGv_vec, TCGv_i32);
- void (*fniv_v)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec);
- gen_helper_gvec_2 *fno[4];
- TCGOpcode s_list[2];
- TCGOpcode v_list[2];
- } GVecGen2sh;
- static void expand_2sh_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t oprsz, uint32_t tysz, TCGType type,
- TCGv_i32 shift,
- void (*fni)(unsigned, TCGv_vec, TCGv_vec, TCGv_i32))
- {
- for (uint32_t i = 0; i < oprsz; i += tysz) {
- TCGv_vec t0 = tcg_temp_new_vec(type);
- TCGv_vec t1 = tcg_temp_new_vec(type);
- tcg_gen_ld_vec(t0, tcg_env, aofs + i);
- fni(vece, t1, t0, shift);
- tcg_gen_st_vec(t1, tcg_env, dofs + i);
- }
- }
- static void
- do_gvec_shifts(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i32 shift,
- uint32_t oprsz, uint32_t maxsz, const GVecGen2sh *g)
- {
- TCGType type;
- uint32_t some;
- check_size_align(oprsz, maxsz, dofs | aofs);
- check_overlap_2(dofs, aofs, maxsz);
- /* If the backend has a scalar expansion, great. */
- type = choose_vector_type(g->s_list, vece, oprsz, vece == MO_64);
- if (type) {
- const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
- switch (type) {
- case TCG_TYPE_V256:
- some = QEMU_ALIGN_DOWN(oprsz, 32);
- expand_2sh_vec(vece, dofs, aofs, some, 32,
- TCG_TYPE_V256, shift, g->fniv_s);
- if (some == oprsz) {
- break;
- }
- dofs += some;
- aofs += some;
- oprsz -= some;
- maxsz -= some;
- /* fallthru */
- case TCG_TYPE_V128:
- expand_2sh_vec(vece, dofs, aofs, oprsz, 16,
- TCG_TYPE_V128, shift, g->fniv_s);
- break;
- case TCG_TYPE_V64:
- expand_2sh_vec(vece, dofs, aofs, oprsz, 8,
- TCG_TYPE_V64, shift, g->fniv_s);
- break;
- default:
- g_assert_not_reached();
- }
- tcg_swap_vecop_list(hold_list);
- goto clear_tail;
- }
- /* If the backend supports variable vector shifts, also cool. */
- type = choose_vector_type(g->v_list, vece, oprsz, vece == MO_64);
- if (type) {
- const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
- TCGv_vec v_shift = tcg_temp_new_vec(type);
- if (vece == MO_64) {
- TCGv_i64 sh64 = tcg_temp_ebb_new_i64();
- tcg_gen_extu_i32_i64(sh64, shift);
- tcg_gen_dup_i64_vec(MO_64, v_shift, sh64);
- tcg_temp_free_i64(sh64);
- } else {
- tcg_gen_dup_i32_vec(vece, v_shift, shift);
- }
- switch (type) {
- case TCG_TYPE_V256:
- some = QEMU_ALIGN_DOWN(oprsz, 32);
- expand_2s_vec(vece, dofs, aofs, some, 32, TCG_TYPE_V256,
- v_shift, false, g->fniv_v);
- if (some == oprsz) {
- break;
- }
- dofs += some;
- aofs += some;
- oprsz -= some;
- maxsz -= some;
- /* fallthru */
- case TCG_TYPE_V128:
- expand_2s_vec(vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128,
- v_shift, false, g->fniv_v);
- break;
- case TCG_TYPE_V64:
- expand_2s_vec(vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64,
- v_shift, false, g->fniv_v);
- break;
- default:
- g_assert_not_reached();
- }
- tcg_temp_free_vec(v_shift);
- tcg_swap_vecop_list(hold_list);
- goto clear_tail;
- }
- /* Otherwise fall back to integral... */
- if (vece == MO_32 && check_size_impl(oprsz, 4)) {
- expand_2s_i32(dofs, aofs, oprsz, shift, false, g->fni4);
- } else if (vece == MO_64 && check_size_impl(oprsz, 8)) {
- TCGv_i64 sh64 = tcg_temp_ebb_new_i64();
- tcg_gen_extu_i32_i64(sh64, shift);
- expand_2s_i64(dofs, aofs, oprsz, sh64, false, g->fni8);
- tcg_temp_free_i64(sh64);
- } else {
- TCGv_ptr a0 = tcg_temp_ebb_new_ptr();
- TCGv_ptr a1 = tcg_temp_ebb_new_ptr();
- TCGv_i32 desc = tcg_temp_ebb_new_i32();
- tcg_gen_shli_i32(desc, shift, SIMD_DATA_SHIFT);
- tcg_gen_ori_i32(desc, desc, simd_desc(oprsz, maxsz, 0));
- tcg_gen_addi_ptr(a0, tcg_env, dofs);
- tcg_gen_addi_ptr(a1, tcg_env, aofs);
- g->fno[vece](a0, a1, desc);
- tcg_temp_free_ptr(a0);
- tcg_temp_free_ptr(a1);
- tcg_temp_free_i32(desc);
- return;
- }
- clear_tail:
- if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
- }
- }
- void tcg_gen_gvec_shls(unsigned vece, uint32_t dofs, uint32_t aofs,
- TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz)
- {
- static const GVecGen2sh g = {
- .fni4 = tcg_gen_shl_i32,
- .fni8 = tcg_gen_shl_i64,
- .fniv_s = tcg_gen_shls_vec,
- .fniv_v = tcg_gen_shlv_vec,
- .fno = {
- gen_helper_gvec_shl8i,
- gen_helper_gvec_shl16i,
- gen_helper_gvec_shl32i,
- gen_helper_gvec_shl64i,
- },
- .s_list = { INDEX_op_shls_vec, 0 },
- .v_list = { INDEX_op_shlv_vec, 0 },
- };
- tcg_debug_assert(vece <= MO_64);
- do_gvec_shifts(vece, dofs, aofs, shift, oprsz, maxsz, &g);
- }
- void tcg_gen_gvec_shrs(unsigned vece, uint32_t dofs, uint32_t aofs,
- TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz)
- {
- static const GVecGen2sh g = {
- .fni4 = tcg_gen_shr_i32,
- .fni8 = tcg_gen_shr_i64,
- .fniv_s = tcg_gen_shrs_vec,
- .fniv_v = tcg_gen_shrv_vec,
- .fno = {
- gen_helper_gvec_shr8i,
- gen_helper_gvec_shr16i,
- gen_helper_gvec_shr32i,
- gen_helper_gvec_shr64i,
- },
- .s_list = { INDEX_op_shrs_vec, 0 },
- .v_list = { INDEX_op_shrv_vec, 0 },
- };
- tcg_debug_assert(vece <= MO_64);
- do_gvec_shifts(vece, dofs, aofs, shift, oprsz, maxsz, &g);
- }
- void tcg_gen_gvec_sars(unsigned vece, uint32_t dofs, uint32_t aofs,
- TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz)
- {
- static const GVecGen2sh g = {
- .fni4 = tcg_gen_sar_i32,
- .fni8 = tcg_gen_sar_i64,
- .fniv_s = tcg_gen_sars_vec,
- .fniv_v = tcg_gen_sarv_vec,
- .fno = {
- gen_helper_gvec_sar8i,
- gen_helper_gvec_sar16i,
- gen_helper_gvec_sar32i,
- gen_helper_gvec_sar64i,
- },
- .s_list = { INDEX_op_sars_vec, 0 },
- .v_list = { INDEX_op_sarv_vec, 0 },
- };
- tcg_debug_assert(vece <= MO_64);
- do_gvec_shifts(vece, dofs, aofs, shift, oprsz, maxsz, &g);
- }
- void tcg_gen_gvec_rotls(unsigned vece, uint32_t dofs, uint32_t aofs,
- TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz)
- {
- static const GVecGen2sh g = {
- .fni4 = tcg_gen_rotl_i32,
- .fni8 = tcg_gen_rotl_i64,
- .fniv_s = tcg_gen_rotls_vec,
- .fniv_v = tcg_gen_rotlv_vec,
- .fno = {
- gen_helper_gvec_rotl8i,
- gen_helper_gvec_rotl16i,
- gen_helper_gvec_rotl32i,
- gen_helper_gvec_rotl64i,
- },
- .s_list = { INDEX_op_rotls_vec, 0 },
- .v_list = { INDEX_op_rotlv_vec, 0 },
- };
- tcg_debug_assert(vece <= MO_64);
- do_gvec_shifts(vece, dofs, aofs, shift, oprsz, maxsz, &g);
- }
- void tcg_gen_gvec_rotrs(unsigned vece, uint32_t dofs, uint32_t aofs,
- TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz)
- {
- TCGv_i32 tmp = tcg_temp_ebb_new_i32();
- tcg_gen_neg_i32(tmp, shift);
- tcg_gen_andi_i32(tmp, tmp, (8 << vece) - 1);
- tcg_gen_gvec_rotls(vece, dofs, aofs, tmp, oprsz, maxsz);
- tcg_temp_free_i32(tmp);
- }
- /*
- * Expand D = A << (B % element bits)
- *
- * Unlike scalar shifts, where it is easy for the target front end
- * to include the modulo as part of the expansion. If the target
- * naturally includes the modulo as part of the operation, great!
- * If the target has some other behaviour from out-of-range shifts,
- * then it could not use this function anyway, and would need to
- * do it's own expansion with custom functions.
- */
- static void tcg_gen_shlv_mod_vec(unsigned vece, TCGv_vec d,
- TCGv_vec a, TCGv_vec b)
- {
- TCGv_vec t = tcg_temp_new_vec_matching(d);
- TCGv_vec m = tcg_constant_vec_matching(d, vece, (8 << vece) - 1);
- tcg_gen_and_vec(vece, t, b, m);
- tcg_gen_shlv_vec(vece, d, a, t);
- tcg_temp_free_vec(t);
- }
- static void tcg_gen_shl_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
- {
- TCGv_i32 t = tcg_temp_ebb_new_i32();
- tcg_gen_andi_i32(t, b, 31);
- tcg_gen_shl_i32(d, a, t);
- tcg_temp_free_i32(t);
- }
- static void tcg_gen_shl_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
- {
- TCGv_i64 t = tcg_temp_ebb_new_i64();
- tcg_gen_andi_i64(t, b, 63);
- tcg_gen_shl_i64(d, a, t);
- tcg_temp_free_i64(t);
- }
- void tcg_gen_gvec_shlv(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
- {
- static const TCGOpcode vecop_list[] = { INDEX_op_shlv_vec, 0 };
- static const GVecGen3 g[4] = {
- { .fniv = tcg_gen_shlv_mod_vec,
- .fno = gen_helper_gvec_shl8v,
- .opt_opc = vecop_list,
- .vece = MO_8 },
- { .fniv = tcg_gen_shlv_mod_vec,
- .fno = gen_helper_gvec_shl16v,
- .opt_opc = vecop_list,
- .vece = MO_16 },
- { .fni4 = tcg_gen_shl_mod_i32,
- .fniv = tcg_gen_shlv_mod_vec,
- .fno = gen_helper_gvec_shl32v,
- .opt_opc = vecop_list,
- .vece = MO_32 },
- { .fni8 = tcg_gen_shl_mod_i64,
- .fniv = tcg_gen_shlv_mod_vec,
- .fno = gen_helper_gvec_shl64v,
- .opt_opc = vecop_list,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .vece = MO_64 },
- };
- tcg_debug_assert(vece <= MO_64);
- tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
- }
- /*
- * Similarly for logical right shifts.
- */
- static void tcg_gen_shrv_mod_vec(unsigned vece, TCGv_vec d,
- TCGv_vec a, TCGv_vec b)
- {
- TCGv_vec t = tcg_temp_new_vec_matching(d);
- TCGv_vec m = tcg_constant_vec_matching(d, vece, (8 << vece) - 1);
- tcg_gen_and_vec(vece, t, b, m);
- tcg_gen_shrv_vec(vece, d, a, t);
- tcg_temp_free_vec(t);
- }
- static void tcg_gen_shr_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
- {
- TCGv_i32 t = tcg_temp_ebb_new_i32();
- tcg_gen_andi_i32(t, b, 31);
- tcg_gen_shr_i32(d, a, t);
- tcg_temp_free_i32(t);
- }
- static void tcg_gen_shr_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
- {
- TCGv_i64 t = tcg_temp_ebb_new_i64();
- tcg_gen_andi_i64(t, b, 63);
- tcg_gen_shr_i64(d, a, t);
- tcg_temp_free_i64(t);
- }
- void tcg_gen_gvec_shrv(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
- {
- static const TCGOpcode vecop_list[] = { INDEX_op_shrv_vec, 0 };
- static const GVecGen3 g[4] = {
- { .fniv = tcg_gen_shrv_mod_vec,
- .fno = gen_helper_gvec_shr8v,
- .opt_opc = vecop_list,
- .vece = MO_8 },
- { .fniv = tcg_gen_shrv_mod_vec,
- .fno = gen_helper_gvec_shr16v,
- .opt_opc = vecop_list,
- .vece = MO_16 },
- { .fni4 = tcg_gen_shr_mod_i32,
- .fniv = tcg_gen_shrv_mod_vec,
- .fno = gen_helper_gvec_shr32v,
- .opt_opc = vecop_list,
- .vece = MO_32 },
- { .fni8 = tcg_gen_shr_mod_i64,
- .fniv = tcg_gen_shrv_mod_vec,
- .fno = gen_helper_gvec_shr64v,
- .opt_opc = vecop_list,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .vece = MO_64 },
- };
- tcg_debug_assert(vece <= MO_64);
- tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
- }
- /*
- * Similarly for arithmetic right shifts.
- */
- static void tcg_gen_sarv_mod_vec(unsigned vece, TCGv_vec d,
- TCGv_vec a, TCGv_vec b)
- {
- TCGv_vec t = tcg_temp_new_vec_matching(d);
- TCGv_vec m = tcg_constant_vec_matching(d, vece, (8 << vece) - 1);
- tcg_gen_and_vec(vece, t, b, m);
- tcg_gen_sarv_vec(vece, d, a, t);
- tcg_temp_free_vec(t);
- }
- static void tcg_gen_sar_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
- {
- TCGv_i32 t = tcg_temp_ebb_new_i32();
- tcg_gen_andi_i32(t, b, 31);
- tcg_gen_sar_i32(d, a, t);
- tcg_temp_free_i32(t);
- }
- static void tcg_gen_sar_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
- {
- TCGv_i64 t = tcg_temp_ebb_new_i64();
- tcg_gen_andi_i64(t, b, 63);
- tcg_gen_sar_i64(d, a, t);
- tcg_temp_free_i64(t);
- }
- void tcg_gen_gvec_sarv(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
- {
- static const TCGOpcode vecop_list[] = { INDEX_op_sarv_vec, 0 };
- static const GVecGen3 g[4] = {
- { .fniv = tcg_gen_sarv_mod_vec,
- .fno = gen_helper_gvec_sar8v,
- .opt_opc = vecop_list,
- .vece = MO_8 },
- { .fniv = tcg_gen_sarv_mod_vec,
- .fno = gen_helper_gvec_sar16v,
- .opt_opc = vecop_list,
- .vece = MO_16 },
- { .fni4 = tcg_gen_sar_mod_i32,
- .fniv = tcg_gen_sarv_mod_vec,
- .fno = gen_helper_gvec_sar32v,
- .opt_opc = vecop_list,
- .vece = MO_32 },
- { .fni8 = tcg_gen_sar_mod_i64,
- .fniv = tcg_gen_sarv_mod_vec,
- .fno = gen_helper_gvec_sar64v,
- .opt_opc = vecop_list,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .vece = MO_64 },
- };
- tcg_debug_assert(vece <= MO_64);
- tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
- }
- /*
- * Similarly for rotates.
- */
- static void tcg_gen_rotlv_mod_vec(unsigned vece, TCGv_vec d,
- TCGv_vec a, TCGv_vec b)
- {
- TCGv_vec t = tcg_temp_new_vec_matching(d);
- TCGv_vec m = tcg_constant_vec_matching(d, vece, (8 << vece) - 1);
- tcg_gen_and_vec(vece, t, b, m);
- tcg_gen_rotlv_vec(vece, d, a, t);
- tcg_temp_free_vec(t);
- }
- static void tcg_gen_rotl_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
- {
- TCGv_i32 t = tcg_temp_ebb_new_i32();
- tcg_gen_andi_i32(t, b, 31);
- tcg_gen_rotl_i32(d, a, t);
- tcg_temp_free_i32(t);
- }
- static void tcg_gen_rotl_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
- {
- TCGv_i64 t = tcg_temp_ebb_new_i64();
- tcg_gen_andi_i64(t, b, 63);
- tcg_gen_rotl_i64(d, a, t);
- tcg_temp_free_i64(t);
- }
- void tcg_gen_gvec_rotlv(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
- {
- static const TCGOpcode vecop_list[] = { INDEX_op_rotlv_vec, 0 };
- static const GVecGen3 g[4] = {
- { .fniv = tcg_gen_rotlv_mod_vec,
- .fno = gen_helper_gvec_rotl8v,
- .opt_opc = vecop_list,
- .vece = MO_8 },
- { .fniv = tcg_gen_rotlv_mod_vec,
- .fno = gen_helper_gvec_rotl16v,
- .opt_opc = vecop_list,
- .vece = MO_16 },
- { .fni4 = tcg_gen_rotl_mod_i32,
- .fniv = tcg_gen_rotlv_mod_vec,
- .fno = gen_helper_gvec_rotl32v,
- .opt_opc = vecop_list,
- .vece = MO_32 },
- { .fni8 = tcg_gen_rotl_mod_i64,
- .fniv = tcg_gen_rotlv_mod_vec,
- .fno = gen_helper_gvec_rotl64v,
- .opt_opc = vecop_list,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .vece = MO_64 },
- };
- tcg_debug_assert(vece <= MO_64);
- tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
- }
- static void tcg_gen_rotrv_mod_vec(unsigned vece, TCGv_vec d,
- TCGv_vec a, TCGv_vec b)
- {
- TCGv_vec t = tcg_temp_new_vec_matching(d);
- TCGv_vec m = tcg_constant_vec_matching(d, vece, (8 << vece) - 1);
- tcg_gen_and_vec(vece, t, b, m);
- tcg_gen_rotrv_vec(vece, d, a, t);
- tcg_temp_free_vec(t);
- }
- static void tcg_gen_rotr_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
- {
- TCGv_i32 t = tcg_temp_ebb_new_i32();
- tcg_gen_andi_i32(t, b, 31);
- tcg_gen_rotr_i32(d, a, t);
- tcg_temp_free_i32(t);
- }
- static void tcg_gen_rotr_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
- {
- TCGv_i64 t = tcg_temp_ebb_new_i64();
- tcg_gen_andi_i64(t, b, 63);
- tcg_gen_rotr_i64(d, a, t);
- tcg_temp_free_i64(t);
- }
- void tcg_gen_gvec_rotrv(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
- {
- static const TCGOpcode vecop_list[] = { INDEX_op_rotrv_vec, 0 };
- static const GVecGen3 g[4] = {
- { .fniv = tcg_gen_rotrv_mod_vec,
- .fno = gen_helper_gvec_rotr8v,
- .opt_opc = vecop_list,
- .vece = MO_8 },
- { .fniv = tcg_gen_rotrv_mod_vec,
- .fno = gen_helper_gvec_rotr16v,
- .opt_opc = vecop_list,
- .vece = MO_16 },
- { .fni4 = tcg_gen_rotr_mod_i32,
- .fniv = tcg_gen_rotrv_mod_vec,
- .fno = gen_helper_gvec_rotr32v,
- .opt_opc = vecop_list,
- .vece = MO_32 },
- { .fni8 = tcg_gen_rotr_mod_i64,
- .fniv = tcg_gen_rotrv_mod_vec,
- .fno = gen_helper_gvec_rotr64v,
- .opt_opc = vecop_list,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .vece = MO_64 },
- };
- tcg_debug_assert(vece <= MO_64);
- tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
- }
- /* Expand OPSZ bytes worth of three-operand operations using i32 elements. */
- static void expand_cmp_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- uint32_t oprsz, TCGCond cond)
- {
- TCGv_i32 t0 = tcg_temp_ebb_new_i32();
- TCGv_i32 t1 = tcg_temp_ebb_new_i32();
- uint32_t i;
- for (i = 0; i < oprsz; i += 4) {
- tcg_gen_ld_i32(t0, tcg_env, aofs + i);
- tcg_gen_ld_i32(t1, tcg_env, bofs + i);
- tcg_gen_negsetcond_i32(cond, t0, t0, t1);
- tcg_gen_st_i32(t0, tcg_env, dofs + i);
- }
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t0);
- }
- static void expand_cmp_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- uint32_t oprsz, TCGCond cond)
- {
- TCGv_i64 t0 = tcg_temp_ebb_new_i64();
- TCGv_i64 t1 = tcg_temp_ebb_new_i64();
- uint32_t i;
- for (i = 0; i < oprsz; i += 8) {
- tcg_gen_ld_i64(t0, tcg_env, aofs + i);
- tcg_gen_ld_i64(t1, tcg_env, bofs + i);
- tcg_gen_negsetcond_i64(cond, t0, t0, t1);
- tcg_gen_st_i64(t0, tcg_env, dofs + i);
- }
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t0);
- }
- static void expand_cmp_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t tysz,
- TCGType type, TCGCond cond)
- {
- for (uint32_t i = 0; i < oprsz; i += tysz) {
- TCGv_vec t0 = tcg_temp_new_vec(type);
- TCGv_vec t1 = tcg_temp_new_vec(type);
- TCGv_vec t2 = tcg_temp_new_vec(type);
- tcg_gen_ld_vec(t0, tcg_env, aofs + i);
- tcg_gen_ld_vec(t1, tcg_env, bofs + i);
- tcg_gen_cmp_vec(cond, vece, t2, t0, t1);
- tcg_gen_st_vec(t2, tcg_env, dofs + i);
- }
- }
- void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,
- uint32_t aofs, uint32_t bofs,
- uint32_t oprsz, uint32_t maxsz)
- {
- static const TCGOpcode cmp_list[] = { INDEX_op_cmp_vec, 0 };
- static gen_helper_gvec_3 * const eq_fn[4] = {
- gen_helper_gvec_eq8, gen_helper_gvec_eq16,
- gen_helper_gvec_eq32, gen_helper_gvec_eq64
- };
- static gen_helper_gvec_3 * const ne_fn[4] = {
- gen_helper_gvec_ne8, gen_helper_gvec_ne16,
- gen_helper_gvec_ne32, gen_helper_gvec_ne64
- };
- static gen_helper_gvec_3 * const lt_fn[4] = {
- gen_helper_gvec_lt8, gen_helper_gvec_lt16,
- gen_helper_gvec_lt32, gen_helper_gvec_lt64
- };
- static gen_helper_gvec_3 * const le_fn[4] = {
- gen_helper_gvec_le8, gen_helper_gvec_le16,
- gen_helper_gvec_le32, gen_helper_gvec_le64
- };
- static gen_helper_gvec_3 * const ltu_fn[4] = {
- gen_helper_gvec_ltu8, gen_helper_gvec_ltu16,
- gen_helper_gvec_ltu32, gen_helper_gvec_ltu64
- };
- static gen_helper_gvec_3 * const leu_fn[4] = {
- gen_helper_gvec_leu8, gen_helper_gvec_leu16,
- gen_helper_gvec_leu32, gen_helper_gvec_leu64
- };
- static gen_helper_gvec_3 * const * const fns[16] = {
- [TCG_COND_EQ] = eq_fn,
- [TCG_COND_NE] = ne_fn,
- [TCG_COND_LT] = lt_fn,
- [TCG_COND_LE] = le_fn,
- [TCG_COND_LTU] = ltu_fn,
- [TCG_COND_LEU] = leu_fn,
- };
- const TCGOpcode *hold_list;
- TCGType type;
- uint32_t some;
- check_size_align(oprsz, maxsz, dofs | aofs | bofs);
- check_overlap_3(dofs, aofs, bofs, maxsz);
- if (cond == TCG_COND_NEVER || cond == TCG_COND_ALWAYS) {
- do_dup(MO_8, dofs, oprsz, maxsz,
- NULL, NULL, -(cond == TCG_COND_ALWAYS));
- return;
- }
- /*
- * Implement inline with a vector type, if possible.
- * Prefer integer when 64-bit host and 64-bit comparison.
- */
- hold_list = tcg_swap_vecop_list(cmp_list);
- type = choose_vector_type(cmp_list, vece, oprsz,
- TCG_TARGET_REG_BITS == 64 && vece == MO_64);
- switch (type) {
- case TCG_TYPE_V256:
- /* Recall that ARM SVE allows vector sizes that are not a
- * power of 2, but always a multiple of 16. The intent is
- * that e.g. size == 80 would be expanded with 2x32 + 1x16.
- */
- some = QEMU_ALIGN_DOWN(oprsz, 32);
- expand_cmp_vec(vece, dofs, aofs, bofs, some, 32, TCG_TYPE_V256, cond);
- if (some == oprsz) {
- break;
- }
- dofs += some;
- aofs += some;
- bofs += some;
- oprsz -= some;
- maxsz -= some;
- /* fallthru */
- case TCG_TYPE_V128:
- expand_cmp_vec(vece, dofs, aofs, bofs, oprsz, 16, TCG_TYPE_V128, cond);
- break;
- case TCG_TYPE_V64:
- expand_cmp_vec(vece, dofs, aofs, bofs, oprsz, 8, TCG_TYPE_V64, cond);
- break;
- case 0:
- if (vece == MO_64 && check_size_impl(oprsz, 8)) {
- expand_cmp_i64(dofs, aofs, bofs, oprsz, cond);
- } else if (vece == MO_32 && check_size_impl(oprsz, 4)) {
- expand_cmp_i32(dofs, aofs, bofs, oprsz, cond);
- } else {
- gen_helper_gvec_3 * const *fn = fns[cond];
- if (fn == NULL) {
- uint32_t tmp;
- tmp = aofs, aofs = bofs, bofs = tmp;
- cond = tcg_swap_cond(cond);
- fn = fns[cond];
- assert(fn != NULL);
- }
- tcg_gen_gvec_3_ool(dofs, aofs, bofs, oprsz, maxsz, 0, fn[vece]);
- oprsz = maxsz;
- }
- break;
- default:
- g_assert_not_reached();
- }
- tcg_swap_vecop_list(hold_list);
- if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
- }
- }
- static void expand_cmps_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t oprsz, uint32_t tysz, TCGType type,
- TCGCond cond, TCGv_vec c)
- {
- TCGv_vec t0 = tcg_temp_new_vec(type);
- TCGv_vec t1 = tcg_temp_new_vec(type);
- uint32_t i;
- for (i = 0; i < oprsz; i += tysz) {
- tcg_gen_ld_vec(t1, tcg_env, aofs + i);
- tcg_gen_cmp_vec(cond, vece, t0, t1, c);
- tcg_gen_st_vec(t0, tcg_env, dofs + i);
- }
- }
- void tcg_gen_gvec_cmps(TCGCond cond, unsigned vece, uint32_t dofs,
- uint32_t aofs, TCGv_i64 c,
- uint32_t oprsz, uint32_t maxsz)
- {
- static const TCGOpcode cmp_list[] = { INDEX_op_cmp_vec, 0 };
- static gen_helper_gvec_2i * const eq_fn[4] = {
- gen_helper_gvec_eqs8, gen_helper_gvec_eqs16,
- gen_helper_gvec_eqs32, gen_helper_gvec_eqs64
- };
- static gen_helper_gvec_2i * const lt_fn[4] = {
- gen_helper_gvec_lts8, gen_helper_gvec_lts16,
- gen_helper_gvec_lts32, gen_helper_gvec_lts64
- };
- static gen_helper_gvec_2i * const le_fn[4] = {
- gen_helper_gvec_les8, gen_helper_gvec_les16,
- gen_helper_gvec_les32, gen_helper_gvec_les64
- };
- static gen_helper_gvec_2i * const ltu_fn[4] = {
- gen_helper_gvec_ltus8, gen_helper_gvec_ltus16,
- gen_helper_gvec_ltus32, gen_helper_gvec_ltus64
- };
- static gen_helper_gvec_2i * const leu_fn[4] = {
- gen_helper_gvec_leus8, gen_helper_gvec_leus16,
- gen_helper_gvec_leus32, gen_helper_gvec_leus64
- };
- static gen_helper_gvec_2i * const * const fns[16] = {
- [TCG_COND_EQ] = eq_fn,
- [TCG_COND_LT] = lt_fn,
- [TCG_COND_LE] = le_fn,
- [TCG_COND_LTU] = ltu_fn,
- [TCG_COND_LEU] = leu_fn,
- };
- TCGType type;
- check_size_align(oprsz, maxsz, dofs | aofs);
- check_overlap_2(dofs, aofs, maxsz);
- if (cond == TCG_COND_NEVER || cond == TCG_COND_ALWAYS) {
- do_dup(MO_8, dofs, oprsz, maxsz,
- NULL, NULL, -(cond == TCG_COND_ALWAYS));
- return;
- }
- /*
- * Implement inline with a vector type, if possible.
- * Prefer integer when 64-bit host and 64-bit comparison.
- */
- type = choose_vector_type(cmp_list, vece, oprsz,
- TCG_TARGET_REG_BITS == 64 && vece == MO_64);
- if (type != 0) {
- const TCGOpcode *hold_list = tcg_swap_vecop_list(cmp_list);
- TCGv_vec t_vec = tcg_temp_new_vec(type);
- uint32_t some;
- tcg_gen_dup_i64_vec(vece, t_vec, c);
- switch (type) {
- case TCG_TYPE_V256:
- some = QEMU_ALIGN_DOWN(oprsz, 32);
- expand_cmps_vec(vece, dofs, aofs, some, 32,
- TCG_TYPE_V256, cond, t_vec);
- aofs += some;
- dofs += some;
- oprsz -= some;
- maxsz -= some;
- /* fallthru */
- case TCG_TYPE_V128:
- some = QEMU_ALIGN_DOWN(oprsz, 16);
- expand_cmps_vec(vece, dofs, aofs, some, 16,
- TCG_TYPE_V128, cond, t_vec);
- break;
- case TCG_TYPE_V64:
- some = QEMU_ALIGN_DOWN(oprsz, 8);
- expand_cmps_vec(vece, dofs, aofs, some, 8,
- TCG_TYPE_V64, cond, t_vec);
- break;
- default:
- g_assert_not_reached();
- }
- tcg_temp_free_vec(t_vec);
- tcg_swap_vecop_list(hold_list);
- } else if (vece == MO_64 && check_size_impl(oprsz, 8)) {
- TCGv_i64 t0 = tcg_temp_ebb_new_i64();
- uint32_t i;
- for (i = 0; i < oprsz; i += 8) {
- tcg_gen_ld_i64(t0, tcg_env, aofs + i);
- tcg_gen_negsetcond_i64(cond, t0, t0, c);
- tcg_gen_st_i64(t0, tcg_env, dofs + i);
- }
- tcg_temp_free_i64(t0);
- } else if (vece == MO_32 && check_size_impl(oprsz, 4)) {
- TCGv_i32 t0 = tcg_temp_ebb_new_i32();
- TCGv_i32 t1 = tcg_temp_ebb_new_i32();
- uint32_t i;
- tcg_gen_extrl_i64_i32(t1, c);
- for (i = 0; i < oprsz; i += 4) {
- tcg_gen_ld_i32(t0, tcg_env, aofs + i);
- tcg_gen_negsetcond_i32(cond, t0, t0, t1);
- tcg_gen_st_i32(t0, tcg_env, dofs + i);
- }
- tcg_temp_free_i32(t0);
- tcg_temp_free_i32(t1);
- } else {
- gen_helper_gvec_2i * const *fn = fns[cond];
- bool inv = false;
- if (fn == NULL) {
- cond = tcg_invert_cond(cond);
- fn = fns[cond];
- assert(fn != NULL);
- inv = true;
- }
- tcg_gen_gvec_2i_ool(dofs, aofs, c, oprsz, maxsz, inv, fn[vece]);
- return;
- }
- if (oprsz < maxsz) {
- expand_clr(dofs + oprsz, maxsz - oprsz);
- }
- }
- void tcg_gen_gvec_cmpi(TCGCond cond, unsigned vece, uint32_t dofs,
- uint32_t aofs, int64_t c,
- uint32_t oprsz, uint32_t maxsz)
- {
- TCGv_i64 tmp = tcg_constant_i64(c);
- tcg_gen_gvec_cmps(cond, vece, dofs, aofs, tmp, oprsz, maxsz);
- }
- static void tcg_gen_bitsel_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 c)
- {
- TCGv_i64 t = tcg_temp_ebb_new_i64();
- tcg_gen_and_i64(t, b, a);
- tcg_gen_andc_i64(d, c, a);
- tcg_gen_or_i64(d, d, t);
- tcg_temp_free_i64(t);
- }
- void tcg_gen_gvec_bitsel(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t cofs,
- uint32_t oprsz, uint32_t maxsz)
- {
- static const GVecGen4 g = {
- .fni8 = tcg_gen_bitsel_i64,
- .fniv = tcg_gen_bitsel_vec,
- .fno = gen_helper_gvec_bitsel,
- };
- tcg_gen_gvec_4(dofs, aofs, bofs, cofs, oprsz, maxsz, &g);
- }
|