2
0

tcg-op-gvec.c 126 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011
  1. /*
  2. * Generic vector operation expansion
  3. *
  4. * Copyright (c) 2018 Linaro
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include "qemu/osdep.h"
  20. #include "tcg/tcg.h"
  21. #include "tcg/tcg-temp-internal.h"
  22. #include "tcg/tcg-op-common.h"
  23. #include "tcg/tcg-op-gvec-common.h"
  24. #include "tcg/tcg-gvec-desc.h"
  25. #include "tcg-has.h"
  26. #define MAX_UNROLL 4
  27. #ifdef CONFIG_DEBUG_TCG
  28. static const TCGOpcode vecop_list_empty[1] = { 0 };
  29. #else
  30. #define vecop_list_empty NULL
  31. #endif
  32. /* Verify vector size and alignment rules. OFS should be the OR of all
  33. of the operand offsets so that we can check them all at once. */
  34. static void check_size_align(uint32_t oprsz, uint32_t maxsz, uint32_t ofs)
  35. {
  36. uint32_t max_align;
  37. switch (oprsz) {
  38. case 8:
  39. case 16:
  40. case 32:
  41. tcg_debug_assert(oprsz <= maxsz);
  42. break;
  43. default:
  44. tcg_debug_assert(oprsz == maxsz);
  45. break;
  46. }
  47. tcg_debug_assert(maxsz <= (8 << SIMD_MAXSZ_BITS));
  48. max_align = maxsz >= 16 ? 15 : 7;
  49. tcg_debug_assert((maxsz & max_align) == 0);
  50. tcg_debug_assert((ofs & max_align) == 0);
  51. }
  52. /* Verify vector overlap rules for two operands. */
  53. static void check_overlap_2(uint32_t d, uint32_t a, uint32_t s)
  54. {
  55. tcg_debug_assert(d == a || d + s <= a || a + s <= d);
  56. }
  57. /* Verify vector overlap rules for three operands. */
  58. static void check_overlap_3(uint32_t d, uint32_t a, uint32_t b, uint32_t s)
  59. {
  60. check_overlap_2(d, a, s);
  61. check_overlap_2(d, b, s);
  62. check_overlap_2(a, b, s);
  63. }
  64. /* Verify vector overlap rules for four operands. */
  65. static void check_overlap_4(uint32_t d, uint32_t a, uint32_t b,
  66. uint32_t c, uint32_t s)
  67. {
  68. check_overlap_2(d, a, s);
  69. check_overlap_2(d, b, s);
  70. check_overlap_2(d, c, s);
  71. check_overlap_2(a, b, s);
  72. check_overlap_2(a, c, s);
  73. check_overlap_2(b, c, s);
  74. }
  75. /* Create a descriptor from components. */
  76. uint32_t simd_desc(uint32_t oprsz, uint32_t maxsz, int32_t data)
  77. {
  78. uint32_t desc = 0;
  79. check_size_align(oprsz, maxsz, 0);
  80. /*
  81. * We want to check that 'data' will fit into SIMD_DATA_BITS.
  82. * However, some callers want to treat the data as a signed
  83. * value (which they can later get back with simd_data())
  84. * and some want to treat it as an unsigned value.
  85. * So here we assert only that the data will fit into the
  86. * field in at least one way. This means that some invalid
  87. * values from the caller will not be detected, e.g. if the
  88. * caller wants to handle the value as a signed integer but
  89. * incorrectly passes us 1 << (SIMD_DATA_BITS - 1).
  90. */
  91. tcg_debug_assert(data == sextract32(data, 0, SIMD_DATA_BITS) ||
  92. data == extract32(data, 0, SIMD_DATA_BITS));
  93. oprsz = (oprsz / 8) - 1;
  94. maxsz = (maxsz / 8) - 1;
  95. /*
  96. * We have just asserted in check_size_align that either
  97. * oprsz is {8,16,32} or matches maxsz. Encode the final
  98. * case with '2', as that would otherwise map to 24.
  99. */
  100. if (oprsz == maxsz) {
  101. oprsz = 2;
  102. }
  103. desc = deposit32(desc, SIMD_OPRSZ_SHIFT, SIMD_OPRSZ_BITS, oprsz);
  104. desc = deposit32(desc, SIMD_MAXSZ_SHIFT, SIMD_MAXSZ_BITS, maxsz);
  105. desc = deposit32(desc, SIMD_DATA_SHIFT, SIMD_DATA_BITS, data);
  106. return desc;
  107. }
  108. /* Generate a call to a gvec-style helper with two vector operands. */
  109. void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs,
  110. uint32_t oprsz, uint32_t maxsz, int32_t data,
  111. gen_helper_gvec_2 *fn)
  112. {
  113. TCGv_ptr a0, a1;
  114. TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
  115. a0 = tcg_temp_ebb_new_ptr();
  116. a1 = tcg_temp_ebb_new_ptr();
  117. tcg_gen_addi_ptr(a0, tcg_env, dofs);
  118. tcg_gen_addi_ptr(a1, tcg_env, aofs);
  119. fn(a0, a1, desc);
  120. tcg_temp_free_ptr(a0);
  121. tcg_temp_free_ptr(a1);
  122. }
  123. /* Generate a call to a gvec-style helper with two vector operands
  124. and one scalar operand. */
  125. void tcg_gen_gvec_2i_ool(uint32_t dofs, uint32_t aofs, TCGv_i64 c,
  126. uint32_t oprsz, uint32_t maxsz, int32_t data,
  127. gen_helper_gvec_2i *fn)
  128. {
  129. TCGv_ptr a0, a1;
  130. TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
  131. a0 = tcg_temp_ebb_new_ptr();
  132. a1 = tcg_temp_ebb_new_ptr();
  133. tcg_gen_addi_ptr(a0, tcg_env, dofs);
  134. tcg_gen_addi_ptr(a1, tcg_env, aofs);
  135. fn(a0, a1, c, desc);
  136. tcg_temp_free_ptr(a0);
  137. tcg_temp_free_ptr(a1);
  138. }
  139. /* Generate a call to a gvec-style helper with three vector operands. */
  140. void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
  141. uint32_t oprsz, uint32_t maxsz, int32_t data,
  142. gen_helper_gvec_3 *fn)
  143. {
  144. TCGv_ptr a0, a1, a2;
  145. TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
  146. a0 = tcg_temp_ebb_new_ptr();
  147. a1 = tcg_temp_ebb_new_ptr();
  148. a2 = tcg_temp_ebb_new_ptr();
  149. tcg_gen_addi_ptr(a0, tcg_env, dofs);
  150. tcg_gen_addi_ptr(a1, tcg_env, aofs);
  151. tcg_gen_addi_ptr(a2, tcg_env, bofs);
  152. fn(a0, a1, a2, desc);
  153. tcg_temp_free_ptr(a0);
  154. tcg_temp_free_ptr(a1);
  155. tcg_temp_free_ptr(a2);
  156. }
  157. /* Generate a call to a gvec-style helper with four vector operands. */
  158. void tcg_gen_gvec_4_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
  159. uint32_t cofs, uint32_t oprsz, uint32_t maxsz,
  160. int32_t data, gen_helper_gvec_4 *fn)
  161. {
  162. TCGv_ptr a0, a1, a2, a3;
  163. TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
  164. a0 = tcg_temp_ebb_new_ptr();
  165. a1 = tcg_temp_ebb_new_ptr();
  166. a2 = tcg_temp_ebb_new_ptr();
  167. a3 = tcg_temp_ebb_new_ptr();
  168. tcg_gen_addi_ptr(a0, tcg_env, dofs);
  169. tcg_gen_addi_ptr(a1, tcg_env, aofs);
  170. tcg_gen_addi_ptr(a2, tcg_env, bofs);
  171. tcg_gen_addi_ptr(a3, tcg_env, cofs);
  172. fn(a0, a1, a2, a3, desc);
  173. tcg_temp_free_ptr(a0);
  174. tcg_temp_free_ptr(a1);
  175. tcg_temp_free_ptr(a2);
  176. tcg_temp_free_ptr(a3);
  177. }
  178. /* Generate a call to a gvec-style helper with five vector operands. */
  179. void tcg_gen_gvec_5_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
  180. uint32_t cofs, uint32_t xofs, uint32_t oprsz,
  181. uint32_t maxsz, int32_t data, gen_helper_gvec_5 *fn)
  182. {
  183. TCGv_ptr a0, a1, a2, a3, a4;
  184. TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
  185. a0 = tcg_temp_ebb_new_ptr();
  186. a1 = tcg_temp_ebb_new_ptr();
  187. a2 = tcg_temp_ebb_new_ptr();
  188. a3 = tcg_temp_ebb_new_ptr();
  189. a4 = tcg_temp_ebb_new_ptr();
  190. tcg_gen_addi_ptr(a0, tcg_env, dofs);
  191. tcg_gen_addi_ptr(a1, tcg_env, aofs);
  192. tcg_gen_addi_ptr(a2, tcg_env, bofs);
  193. tcg_gen_addi_ptr(a3, tcg_env, cofs);
  194. tcg_gen_addi_ptr(a4, tcg_env, xofs);
  195. fn(a0, a1, a2, a3, a4, desc);
  196. tcg_temp_free_ptr(a0);
  197. tcg_temp_free_ptr(a1);
  198. tcg_temp_free_ptr(a2);
  199. tcg_temp_free_ptr(a3);
  200. tcg_temp_free_ptr(a4);
  201. }
  202. /* Generate a call to a gvec-style helper with three vector operands
  203. and an extra pointer operand. */
  204. void tcg_gen_gvec_2_ptr(uint32_t dofs, uint32_t aofs,
  205. TCGv_ptr ptr, uint32_t oprsz, uint32_t maxsz,
  206. int32_t data, gen_helper_gvec_2_ptr *fn)
  207. {
  208. TCGv_ptr a0, a1;
  209. TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
  210. a0 = tcg_temp_ebb_new_ptr();
  211. a1 = tcg_temp_ebb_new_ptr();
  212. tcg_gen_addi_ptr(a0, tcg_env, dofs);
  213. tcg_gen_addi_ptr(a1, tcg_env, aofs);
  214. fn(a0, a1, ptr, desc);
  215. tcg_temp_free_ptr(a0);
  216. tcg_temp_free_ptr(a1);
  217. }
  218. /* Generate a call to a gvec-style helper with three vector operands
  219. and an extra pointer operand. */
  220. void tcg_gen_gvec_3_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
  221. TCGv_ptr ptr, uint32_t oprsz, uint32_t maxsz,
  222. int32_t data, gen_helper_gvec_3_ptr *fn)
  223. {
  224. TCGv_ptr a0, a1, a2;
  225. TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
  226. a0 = tcg_temp_ebb_new_ptr();
  227. a1 = tcg_temp_ebb_new_ptr();
  228. a2 = tcg_temp_ebb_new_ptr();
  229. tcg_gen_addi_ptr(a0, tcg_env, dofs);
  230. tcg_gen_addi_ptr(a1, tcg_env, aofs);
  231. tcg_gen_addi_ptr(a2, tcg_env, bofs);
  232. fn(a0, a1, a2, ptr, desc);
  233. tcg_temp_free_ptr(a0);
  234. tcg_temp_free_ptr(a1);
  235. tcg_temp_free_ptr(a2);
  236. }
  237. /* Generate a call to a gvec-style helper with four vector operands
  238. and an extra pointer operand. */
  239. void tcg_gen_gvec_4_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
  240. uint32_t cofs, TCGv_ptr ptr, uint32_t oprsz,
  241. uint32_t maxsz, int32_t data,
  242. gen_helper_gvec_4_ptr *fn)
  243. {
  244. TCGv_ptr a0, a1, a2, a3;
  245. TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
  246. a0 = tcg_temp_ebb_new_ptr();
  247. a1 = tcg_temp_ebb_new_ptr();
  248. a2 = tcg_temp_ebb_new_ptr();
  249. a3 = tcg_temp_ebb_new_ptr();
  250. tcg_gen_addi_ptr(a0, tcg_env, dofs);
  251. tcg_gen_addi_ptr(a1, tcg_env, aofs);
  252. tcg_gen_addi_ptr(a2, tcg_env, bofs);
  253. tcg_gen_addi_ptr(a3, tcg_env, cofs);
  254. fn(a0, a1, a2, a3, ptr, desc);
  255. tcg_temp_free_ptr(a0);
  256. tcg_temp_free_ptr(a1);
  257. tcg_temp_free_ptr(a2);
  258. tcg_temp_free_ptr(a3);
  259. }
  260. /* Generate a call to a gvec-style helper with five vector operands
  261. and an extra pointer operand. */
  262. void tcg_gen_gvec_5_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
  263. uint32_t cofs, uint32_t eofs, TCGv_ptr ptr,
  264. uint32_t oprsz, uint32_t maxsz, int32_t data,
  265. gen_helper_gvec_5_ptr *fn)
  266. {
  267. TCGv_ptr a0, a1, a2, a3, a4;
  268. TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
  269. a0 = tcg_temp_ebb_new_ptr();
  270. a1 = tcg_temp_ebb_new_ptr();
  271. a2 = tcg_temp_ebb_new_ptr();
  272. a3 = tcg_temp_ebb_new_ptr();
  273. a4 = tcg_temp_ebb_new_ptr();
  274. tcg_gen_addi_ptr(a0, tcg_env, dofs);
  275. tcg_gen_addi_ptr(a1, tcg_env, aofs);
  276. tcg_gen_addi_ptr(a2, tcg_env, bofs);
  277. tcg_gen_addi_ptr(a3, tcg_env, cofs);
  278. tcg_gen_addi_ptr(a4, tcg_env, eofs);
  279. fn(a0, a1, a2, a3, a4, ptr, desc);
  280. tcg_temp_free_ptr(a0);
  281. tcg_temp_free_ptr(a1);
  282. tcg_temp_free_ptr(a2);
  283. tcg_temp_free_ptr(a3);
  284. tcg_temp_free_ptr(a4);
  285. }
  286. /* Return true if we want to implement something of OPRSZ bytes
  287. in units of LNSZ. This limits the expansion of inline code. */
  288. static inline bool check_size_impl(uint32_t oprsz, uint32_t lnsz)
  289. {
  290. uint32_t q, r;
  291. if (oprsz < lnsz) {
  292. return false;
  293. }
  294. q = oprsz / lnsz;
  295. r = oprsz % lnsz;
  296. tcg_debug_assert((r & 7) == 0);
  297. if (lnsz < 16) {
  298. /* For sizes below 16, accept no remainder. */
  299. if (r != 0) {
  300. return false;
  301. }
  302. } else {
  303. /*
  304. * Recall that ARM SVE allows vector sizes that are not a
  305. * power of 2, but always a multiple of 16. The intent is
  306. * that e.g. size == 80 would be expanded with 2x32 + 1x16.
  307. * In addition, expand_clr needs to handle a multiple of 8.
  308. * Thus we can handle the tail with one more operation per
  309. * diminishing power of 2.
  310. */
  311. q += ctpop32(r);
  312. }
  313. return q <= MAX_UNROLL;
  314. }
  315. static void expand_clr(uint32_t dofs, uint32_t maxsz);
  316. /* Duplicate C as per VECE. */
  317. uint64_t (dup_const)(unsigned vece, uint64_t c)
  318. {
  319. switch (vece) {
  320. case MO_8:
  321. return 0x0101010101010101ull * (uint8_t)c;
  322. case MO_16:
  323. return 0x0001000100010001ull * (uint16_t)c;
  324. case MO_32:
  325. return 0x0000000100000001ull * (uint32_t)c;
  326. case MO_64:
  327. return c;
  328. default:
  329. g_assert_not_reached();
  330. }
  331. }
  332. /* Duplicate IN into OUT as per VECE. */
  333. void tcg_gen_dup_i32(unsigned vece, TCGv_i32 out, TCGv_i32 in)
  334. {
  335. switch (vece) {
  336. case MO_8:
  337. tcg_gen_ext8u_i32(out, in);
  338. tcg_gen_muli_i32(out, out, 0x01010101);
  339. break;
  340. case MO_16:
  341. tcg_gen_deposit_i32(out, in, in, 16, 16);
  342. break;
  343. case MO_32:
  344. tcg_gen_mov_i32(out, in);
  345. break;
  346. default:
  347. g_assert_not_reached();
  348. }
  349. }
  350. void tcg_gen_dup_i64(unsigned vece, TCGv_i64 out, TCGv_i64 in)
  351. {
  352. switch (vece) {
  353. case MO_8:
  354. tcg_gen_ext8u_i64(out, in);
  355. tcg_gen_muli_i64(out, out, 0x0101010101010101ull);
  356. break;
  357. case MO_16:
  358. tcg_gen_ext16u_i64(out, in);
  359. tcg_gen_muli_i64(out, out, 0x0001000100010001ull);
  360. break;
  361. case MO_32:
  362. tcg_gen_deposit_i64(out, in, in, 32, 32);
  363. break;
  364. case MO_64:
  365. tcg_gen_mov_i64(out, in);
  366. break;
  367. default:
  368. g_assert_not_reached();
  369. }
  370. }
  371. /* Select a supported vector type for implementing an operation on SIZE
  372. * bytes. If OP is 0, assume that the real operation to be performed is
  373. * required by all backends. Otherwise, make sure than OP can be performed
  374. * on elements of size VECE in the selected type. Do not select V64 if
  375. * PREFER_I64 is true. Return 0 if no vector type is selected.
  376. */
  377. static TCGType choose_vector_type(const TCGOpcode *list, unsigned vece,
  378. uint32_t size, bool prefer_i64)
  379. {
  380. /*
  381. * Recall that ARM SVE allows vector sizes that are not a
  382. * power of 2, but always a multiple of 16. The intent is
  383. * that e.g. size == 80 would be expanded with 2x32 + 1x16.
  384. * It is hard to imagine a case in which v256 is supported
  385. * but v128 is not, but check anyway.
  386. * In addition, expand_clr needs to handle a multiple of 8.
  387. */
  388. if (TCG_TARGET_HAS_v256 &&
  389. check_size_impl(size, 32) &&
  390. tcg_can_emit_vecop_list(list, TCG_TYPE_V256, vece) &&
  391. (!(size & 16) ||
  392. (TCG_TARGET_HAS_v128 &&
  393. tcg_can_emit_vecop_list(list, TCG_TYPE_V128, vece))) &&
  394. (!(size & 8) ||
  395. (TCG_TARGET_HAS_v64 &&
  396. tcg_can_emit_vecop_list(list, TCG_TYPE_V64, vece)))) {
  397. return TCG_TYPE_V256;
  398. }
  399. if (TCG_TARGET_HAS_v128 &&
  400. check_size_impl(size, 16) &&
  401. tcg_can_emit_vecop_list(list, TCG_TYPE_V128, vece) &&
  402. (!(size & 8) ||
  403. (TCG_TARGET_HAS_v64 &&
  404. tcg_can_emit_vecop_list(list, TCG_TYPE_V64, vece)))) {
  405. return TCG_TYPE_V128;
  406. }
  407. if (TCG_TARGET_HAS_v64 && !prefer_i64 && check_size_impl(size, 8)
  408. && tcg_can_emit_vecop_list(list, TCG_TYPE_V64, vece)) {
  409. return TCG_TYPE_V64;
  410. }
  411. return 0;
  412. }
  413. static void do_dup_store(TCGType type, uint32_t dofs, uint32_t oprsz,
  414. uint32_t maxsz, TCGv_vec t_vec)
  415. {
  416. uint32_t i = 0;
  417. tcg_debug_assert(oprsz >= 8);
  418. /*
  419. * This may be expand_clr for the tail of an operation, e.g.
  420. * oprsz == 8 && maxsz == 64. The first 8 bytes of this store
  421. * are misaligned wrt the maximum vector size, so do that first.
  422. */
  423. if (dofs & 8) {
  424. tcg_gen_stl_vec(t_vec, tcg_env, dofs + i, TCG_TYPE_V64);
  425. i += 8;
  426. }
  427. switch (type) {
  428. case TCG_TYPE_V256:
  429. /*
  430. * Recall that ARM SVE allows vector sizes that are not a
  431. * power of 2, but always a multiple of 16. The intent is
  432. * that e.g. size == 80 would be expanded with 2x32 + 1x16.
  433. */
  434. for (; i + 32 <= oprsz; i += 32) {
  435. tcg_gen_stl_vec(t_vec, tcg_env, dofs + i, TCG_TYPE_V256);
  436. }
  437. /* fallthru */
  438. case TCG_TYPE_V128:
  439. for (; i + 16 <= oprsz; i += 16) {
  440. tcg_gen_stl_vec(t_vec, tcg_env, dofs + i, TCG_TYPE_V128);
  441. }
  442. break;
  443. case TCG_TYPE_V64:
  444. for (; i < oprsz; i += 8) {
  445. tcg_gen_stl_vec(t_vec, tcg_env, dofs + i, TCG_TYPE_V64);
  446. }
  447. break;
  448. default:
  449. g_assert_not_reached();
  450. }
  451. if (oprsz < maxsz) {
  452. expand_clr(dofs + oprsz, maxsz - oprsz);
  453. }
  454. }
  455. /* Set OPRSZ bytes at DOFS to replications of IN_32, IN_64 or IN_C.
  456. * Only one of IN_32 or IN_64 may be set;
  457. * IN_C is used if IN_32 and IN_64 are unset.
  458. */
  459. static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
  460. uint32_t maxsz, TCGv_i32 in_32, TCGv_i64 in_64,
  461. uint64_t in_c)
  462. {
  463. TCGType type;
  464. TCGv_i64 t_64;
  465. TCGv_i32 t_32, t_desc;
  466. TCGv_ptr t_ptr;
  467. uint32_t i;
  468. assert(vece <= (in_32 ? MO_32 : MO_64));
  469. assert(in_32 == NULL || in_64 == NULL);
  470. /* If we're storing 0, expand oprsz to maxsz. */
  471. if (in_32 == NULL && in_64 == NULL) {
  472. in_c = dup_const(vece, in_c);
  473. if (in_c == 0) {
  474. oprsz = maxsz;
  475. vece = MO_8;
  476. } else if (in_c == dup_const(MO_8, in_c)) {
  477. vece = MO_8;
  478. }
  479. }
  480. /* Implement inline with a vector type, if possible.
  481. * Prefer integer when 64-bit host and no variable dup.
  482. */
  483. type = choose_vector_type(NULL, vece, oprsz,
  484. (TCG_TARGET_REG_BITS == 64 && in_32 == NULL
  485. && (in_64 == NULL || vece == MO_64)));
  486. if (type != 0) {
  487. TCGv_vec t_vec = tcg_temp_new_vec(type);
  488. if (in_32) {
  489. tcg_gen_dup_i32_vec(vece, t_vec, in_32);
  490. } else if (in_64) {
  491. tcg_gen_dup_i64_vec(vece, t_vec, in_64);
  492. } else {
  493. tcg_gen_dupi_vec(vece, t_vec, in_c);
  494. }
  495. do_dup_store(type, dofs, oprsz, maxsz, t_vec);
  496. return;
  497. }
  498. /* Otherwise, inline with an integer type, unless "large". */
  499. if (check_size_impl(oprsz, TCG_TARGET_REG_BITS / 8)) {
  500. t_64 = NULL;
  501. t_32 = NULL;
  502. if (in_32) {
  503. /* We are given a 32-bit variable input. For a 64-bit host,
  504. use a 64-bit operation unless the 32-bit operation would
  505. be simple enough. */
  506. if (TCG_TARGET_REG_BITS == 64
  507. && (vece != MO_32 || !check_size_impl(oprsz, 4))) {
  508. t_64 = tcg_temp_ebb_new_i64();
  509. tcg_gen_extu_i32_i64(t_64, in_32);
  510. tcg_gen_dup_i64(vece, t_64, t_64);
  511. } else {
  512. t_32 = tcg_temp_ebb_new_i32();
  513. tcg_gen_dup_i32(vece, t_32, in_32);
  514. }
  515. } else if (in_64) {
  516. /* We are given a 64-bit variable input. */
  517. t_64 = tcg_temp_ebb_new_i64();
  518. tcg_gen_dup_i64(vece, t_64, in_64);
  519. } else {
  520. /* We are given a constant input. */
  521. /* For 64-bit hosts, use 64-bit constants for "simple" constants
  522. or when we'd need too many 32-bit stores, or when a 64-bit
  523. constant is really required. */
  524. if (vece == MO_64
  525. || (TCG_TARGET_REG_BITS == 64
  526. && (in_c == 0 || in_c == -1
  527. || !check_size_impl(oprsz, 4)))) {
  528. t_64 = tcg_constant_i64(in_c);
  529. } else {
  530. t_32 = tcg_constant_i32(in_c);
  531. }
  532. }
  533. /* Implement inline if we picked an implementation size above. */
  534. if (t_32) {
  535. for (i = 0; i < oprsz; i += 4) {
  536. tcg_gen_st_i32(t_32, tcg_env, dofs + i);
  537. }
  538. tcg_temp_free_i32(t_32);
  539. goto done;
  540. }
  541. if (t_64) {
  542. for (i = 0; i < oprsz; i += 8) {
  543. tcg_gen_st_i64(t_64, tcg_env, dofs + i);
  544. }
  545. tcg_temp_free_i64(t_64);
  546. goto done;
  547. }
  548. }
  549. /* Otherwise implement out of line. */
  550. t_ptr = tcg_temp_ebb_new_ptr();
  551. tcg_gen_addi_ptr(t_ptr, tcg_env, dofs);
  552. /*
  553. * This may be expand_clr for the tail of an operation, e.g.
  554. * oprsz == 8 && maxsz == 64. The size of the clear is misaligned
  555. * wrt simd_desc and will assert. Simply pass all replicated byte
  556. * stores through to memset.
  557. */
  558. if (oprsz == maxsz && vece == MO_8) {
  559. TCGv_ptr t_size = tcg_constant_ptr(oprsz);
  560. TCGv_i32 t_val;
  561. if (in_32) {
  562. t_val = in_32;
  563. } else if (in_64) {
  564. t_val = tcg_temp_ebb_new_i32();
  565. tcg_gen_extrl_i64_i32(t_val, in_64);
  566. } else {
  567. t_val = tcg_constant_i32(in_c);
  568. }
  569. gen_helper_memset(t_ptr, t_ptr, t_val, t_size);
  570. if (in_64) {
  571. tcg_temp_free_i32(t_val);
  572. }
  573. tcg_temp_free_ptr(t_ptr);
  574. return;
  575. }
  576. t_desc = tcg_constant_i32(simd_desc(oprsz, maxsz, 0));
  577. if (vece == MO_64) {
  578. if (in_64) {
  579. gen_helper_gvec_dup64(t_ptr, t_desc, in_64);
  580. } else {
  581. t_64 = tcg_constant_i64(in_c);
  582. gen_helper_gvec_dup64(t_ptr, t_desc, t_64);
  583. }
  584. } else {
  585. typedef void dup_fn(TCGv_ptr, TCGv_i32, TCGv_i32);
  586. static dup_fn * const fns[3] = {
  587. gen_helper_gvec_dup8,
  588. gen_helper_gvec_dup16,
  589. gen_helper_gvec_dup32
  590. };
  591. if (in_32) {
  592. fns[vece](t_ptr, t_desc, in_32);
  593. } else if (in_64) {
  594. t_32 = tcg_temp_ebb_new_i32();
  595. tcg_gen_extrl_i64_i32(t_32, in_64);
  596. fns[vece](t_ptr, t_desc, t_32);
  597. tcg_temp_free_i32(t_32);
  598. } else {
  599. if (vece == MO_8) {
  600. in_c &= 0xff;
  601. } else if (vece == MO_16) {
  602. in_c &= 0xffff;
  603. }
  604. t_32 = tcg_constant_i32(in_c);
  605. fns[vece](t_ptr, t_desc, t_32);
  606. }
  607. }
  608. tcg_temp_free_ptr(t_ptr);
  609. return;
  610. done:
  611. if (oprsz < maxsz) {
  612. expand_clr(dofs + oprsz, maxsz - oprsz);
  613. }
  614. }
  615. /* Likewise, but with zero. */
  616. static void expand_clr(uint32_t dofs, uint32_t maxsz)
  617. {
  618. do_dup(MO_8, dofs, maxsz, maxsz, NULL, NULL, 0);
  619. }
  620. /* Expand OPSZ bytes worth of two-operand operations using i32 elements. */
  621. static void expand_2_i32(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
  622. bool load_dest, void (*fni)(TCGv_i32, TCGv_i32))
  623. {
  624. TCGv_i32 t0 = tcg_temp_new_i32();
  625. TCGv_i32 t1 = tcg_temp_new_i32();
  626. uint32_t i;
  627. for (i = 0; i < oprsz; i += 4) {
  628. tcg_gen_ld_i32(t0, tcg_env, aofs + i);
  629. if (load_dest) {
  630. tcg_gen_ld_i32(t1, tcg_env, dofs + i);
  631. }
  632. fni(t1, t0);
  633. tcg_gen_st_i32(t1, tcg_env, dofs + i);
  634. }
  635. tcg_temp_free_i32(t0);
  636. tcg_temp_free_i32(t1);
  637. }
  638. static void expand_2i_i32(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
  639. int32_t c, bool load_dest,
  640. void (*fni)(TCGv_i32, TCGv_i32, int32_t))
  641. {
  642. TCGv_i32 t0 = tcg_temp_new_i32();
  643. TCGv_i32 t1 = tcg_temp_new_i32();
  644. uint32_t i;
  645. for (i = 0; i < oprsz; i += 4) {
  646. tcg_gen_ld_i32(t0, tcg_env, aofs + i);
  647. if (load_dest) {
  648. tcg_gen_ld_i32(t1, tcg_env, dofs + i);
  649. }
  650. fni(t1, t0, c);
  651. tcg_gen_st_i32(t1, tcg_env, dofs + i);
  652. }
  653. tcg_temp_free_i32(t0);
  654. tcg_temp_free_i32(t1);
  655. }
  656. static void expand_2s_i32(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
  657. TCGv_i32 c, bool scalar_first,
  658. void (*fni)(TCGv_i32, TCGv_i32, TCGv_i32))
  659. {
  660. TCGv_i32 t0 = tcg_temp_new_i32();
  661. TCGv_i32 t1 = tcg_temp_new_i32();
  662. uint32_t i;
  663. for (i = 0; i < oprsz; i += 4) {
  664. tcg_gen_ld_i32(t0, tcg_env, aofs + i);
  665. if (scalar_first) {
  666. fni(t1, c, t0);
  667. } else {
  668. fni(t1, t0, c);
  669. }
  670. tcg_gen_st_i32(t1, tcg_env, dofs + i);
  671. }
  672. tcg_temp_free_i32(t0);
  673. tcg_temp_free_i32(t1);
  674. }
  675. /* Expand OPSZ bytes worth of three-operand operations using i32 elements. */
  676. static void expand_3_i32(uint32_t dofs, uint32_t aofs,
  677. uint32_t bofs, uint32_t oprsz, bool load_dest,
  678. void (*fni)(TCGv_i32, TCGv_i32, TCGv_i32))
  679. {
  680. TCGv_i32 t0 = tcg_temp_new_i32();
  681. TCGv_i32 t1 = tcg_temp_new_i32();
  682. TCGv_i32 t2 = tcg_temp_new_i32();
  683. uint32_t i;
  684. for (i = 0; i < oprsz; i += 4) {
  685. tcg_gen_ld_i32(t0, tcg_env, aofs + i);
  686. tcg_gen_ld_i32(t1, tcg_env, bofs + i);
  687. if (load_dest) {
  688. tcg_gen_ld_i32(t2, tcg_env, dofs + i);
  689. }
  690. fni(t2, t0, t1);
  691. tcg_gen_st_i32(t2, tcg_env, dofs + i);
  692. }
  693. tcg_temp_free_i32(t2);
  694. tcg_temp_free_i32(t1);
  695. tcg_temp_free_i32(t0);
  696. }
  697. static void expand_3i_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs,
  698. uint32_t oprsz, int32_t c,
  699. bool load_dest, bool write_aofs,
  700. void (*fni)(TCGv_i32, TCGv_i32, TCGv_i32, int32_t))
  701. {
  702. TCGv_i32 t0 = tcg_temp_new_i32();
  703. TCGv_i32 t1 = tcg_temp_new_i32();
  704. TCGv_i32 t2 = tcg_temp_new_i32();
  705. uint32_t i;
  706. for (i = 0; i < oprsz; i += 4) {
  707. tcg_gen_ld_i32(t0, tcg_env, aofs + i);
  708. tcg_gen_ld_i32(t1, tcg_env, bofs + i);
  709. if (load_dest) {
  710. tcg_gen_ld_i32(t2, tcg_env, dofs + i);
  711. }
  712. fni(t2, t0, t1, c);
  713. tcg_gen_st_i32(t2, tcg_env, dofs + i);
  714. if (write_aofs) {
  715. tcg_gen_st_i32(t0, tcg_env, aofs + i);
  716. }
  717. }
  718. tcg_temp_free_i32(t0);
  719. tcg_temp_free_i32(t1);
  720. tcg_temp_free_i32(t2);
  721. }
  722. /* Expand OPSZ bytes worth of three-operand operations using i32 elements. */
  723. static void expand_4_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs,
  724. uint32_t cofs, uint32_t oprsz, bool write_aofs,
  725. void (*fni)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32))
  726. {
  727. TCGv_i32 t0 = tcg_temp_new_i32();
  728. TCGv_i32 t1 = tcg_temp_new_i32();
  729. TCGv_i32 t2 = tcg_temp_new_i32();
  730. TCGv_i32 t3 = tcg_temp_new_i32();
  731. uint32_t i;
  732. for (i = 0; i < oprsz; i += 4) {
  733. tcg_gen_ld_i32(t1, tcg_env, aofs + i);
  734. tcg_gen_ld_i32(t2, tcg_env, bofs + i);
  735. tcg_gen_ld_i32(t3, tcg_env, cofs + i);
  736. fni(t0, t1, t2, t3);
  737. tcg_gen_st_i32(t0, tcg_env, dofs + i);
  738. if (write_aofs) {
  739. tcg_gen_st_i32(t1, tcg_env, aofs + i);
  740. }
  741. }
  742. tcg_temp_free_i32(t3);
  743. tcg_temp_free_i32(t2);
  744. tcg_temp_free_i32(t1);
  745. tcg_temp_free_i32(t0);
  746. }
  747. static void expand_4i_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs,
  748. uint32_t cofs, uint32_t oprsz, int32_t c,
  749. void (*fni)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32,
  750. int32_t))
  751. {
  752. TCGv_i32 t0 = tcg_temp_new_i32();
  753. TCGv_i32 t1 = tcg_temp_new_i32();
  754. TCGv_i32 t2 = tcg_temp_new_i32();
  755. TCGv_i32 t3 = tcg_temp_new_i32();
  756. uint32_t i;
  757. for (i = 0; i < oprsz; i += 4) {
  758. tcg_gen_ld_i32(t1, tcg_env, aofs + i);
  759. tcg_gen_ld_i32(t2, tcg_env, bofs + i);
  760. tcg_gen_ld_i32(t3, tcg_env, cofs + i);
  761. fni(t0, t1, t2, t3, c);
  762. tcg_gen_st_i32(t0, tcg_env, dofs + i);
  763. }
  764. tcg_temp_free_i32(t3);
  765. tcg_temp_free_i32(t2);
  766. tcg_temp_free_i32(t1);
  767. tcg_temp_free_i32(t0);
  768. }
  769. /* Expand OPSZ bytes worth of two-operand operations using i64 elements. */
  770. static void expand_2_i64(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
  771. bool load_dest, void (*fni)(TCGv_i64, TCGv_i64))
  772. {
  773. TCGv_i64 t0 = tcg_temp_new_i64();
  774. TCGv_i64 t1 = tcg_temp_new_i64();
  775. uint32_t i;
  776. for (i = 0; i < oprsz; i += 8) {
  777. tcg_gen_ld_i64(t0, tcg_env, aofs + i);
  778. if (load_dest) {
  779. tcg_gen_ld_i64(t1, tcg_env, dofs + i);
  780. }
  781. fni(t1, t0);
  782. tcg_gen_st_i64(t1, tcg_env, dofs + i);
  783. }
  784. tcg_temp_free_i64(t0);
  785. tcg_temp_free_i64(t1);
  786. }
  787. static void expand_2i_i64(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
  788. int64_t c, bool load_dest,
  789. void (*fni)(TCGv_i64, TCGv_i64, int64_t))
  790. {
  791. TCGv_i64 t0 = tcg_temp_new_i64();
  792. TCGv_i64 t1 = tcg_temp_new_i64();
  793. uint32_t i;
  794. for (i = 0; i < oprsz; i += 8) {
  795. tcg_gen_ld_i64(t0, tcg_env, aofs + i);
  796. if (load_dest) {
  797. tcg_gen_ld_i64(t1, tcg_env, dofs + i);
  798. }
  799. fni(t1, t0, c);
  800. tcg_gen_st_i64(t1, tcg_env, dofs + i);
  801. }
  802. tcg_temp_free_i64(t0);
  803. tcg_temp_free_i64(t1);
  804. }
  805. static void expand_2s_i64(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
  806. TCGv_i64 c, bool scalar_first,
  807. void (*fni)(TCGv_i64, TCGv_i64, TCGv_i64))
  808. {
  809. TCGv_i64 t0 = tcg_temp_new_i64();
  810. TCGv_i64 t1 = tcg_temp_new_i64();
  811. uint32_t i;
  812. for (i = 0; i < oprsz; i += 8) {
  813. tcg_gen_ld_i64(t0, tcg_env, aofs + i);
  814. if (scalar_first) {
  815. fni(t1, c, t0);
  816. } else {
  817. fni(t1, t0, c);
  818. }
  819. tcg_gen_st_i64(t1, tcg_env, dofs + i);
  820. }
  821. tcg_temp_free_i64(t0);
  822. tcg_temp_free_i64(t1);
  823. }
  824. /* Expand OPSZ bytes worth of three-operand operations using i64 elements. */
  825. static void expand_3_i64(uint32_t dofs, uint32_t aofs,
  826. uint32_t bofs, uint32_t oprsz, bool load_dest,
  827. void (*fni)(TCGv_i64, TCGv_i64, TCGv_i64))
  828. {
  829. TCGv_i64 t0 = tcg_temp_new_i64();
  830. TCGv_i64 t1 = tcg_temp_new_i64();
  831. TCGv_i64 t2 = tcg_temp_new_i64();
  832. uint32_t i;
  833. for (i = 0; i < oprsz; i += 8) {
  834. tcg_gen_ld_i64(t0, tcg_env, aofs + i);
  835. tcg_gen_ld_i64(t1, tcg_env, bofs + i);
  836. if (load_dest) {
  837. tcg_gen_ld_i64(t2, tcg_env, dofs + i);
  838. }
  839. fni(t2, t0, t1);
  840. tcg_gen_st_i64(t2, tcg_env, dofs + i);
  841. }
  842. tcg_temp_free_i64(t2);
  843. tcg_temp_free_i64(t1);
  844. tcg_temp_free_i64(t0);
  845. }
  846. static void expand_3i_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs,
  847. uint32_t oprsz, int64_t c,
  848. bool load_dest, bool write_aofs,
  849. void (*fni)(TCGv_i64, TCGv_i64, TCGv_i64, int64_t))
  850. {
  851. TCGv_i64 t0 = tcg_temp_new_i64();
  852. TCGv_i64 t1 = tcg_temp_new_i64();
  853. TCGv_i64 t2 = tcg_temp_new_i64();
  854. uint32_t i;
  855. for (i = 0; i < oprsz; i += 8) {
  856. tcg_gen_ld_i64(t0, tcg_env, aofs + i);
  857. tcg_gen_ld_i64(t1, tcg_env, bofs + i);
  858. if (load_dest) {
  859. tcg_gen_ld_i64(t2, tcg_env, dofs + i);
  860. }
  861. fni(t2, t0, t1, c);
  862. tcg_gen_st_i64(t2, tcg_env, dofs + i);
  863. if (write_aofs) {
  864. tcg_gen_st_i64(t0, tcg_env, aofs + i);
  865. }
  866. }
  867. tcg_temp_free_i64(t0);
  868. tcg_temp_free_i64(t1);
  869. tcg_temp_free_i64(t2);
  870. }
  871. /* Expand OPSZ bytes worth of three-operand operations using i64 elements. */
  872. static void expand_4_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs,
  873. uint32_t cofs, uint32_t oprsz, bool write_aofs,
  874. void (*fni)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
  875. {
  876. TCGv_i64 t0 = tcg_temp_new_i64();
  877. TCGv_i64 t1 = tcg_temp_new_i64();
  878. TCGv_i64 t2 = tcg_temp_new_i64();
  879. TCGv_i64 t3 = tcg_temp_new_i64();
  880. uint32_t i;
  881. for (i = 0; i < oprsz; i += 8) {
  882. tcg_gen_ld_i64(t1, tcg_env, aofs + i);
  883. tcg_gen_ld_i64(t2, tcg_env, bofs + i);
  884. tcg_gen_ld_i64(t3, tcg_env, cofs + i);
  885. fni(t0, t1, t2, t3);
  886. tcg_gen_st_i64(t0, tcg_env, dofs + i);
  887. if (write_aofs) {
  888. tcg_gen_st_i64(t1, tcg_env, aofs + i);
  889. }
  890. }
  891. tcg_temp_free_i64(t3);
  892. tcg_temp_free_i64(t2);
  893. tcg_temp_free_i64(t1);
  894. tcg_temp_free_i64(t0);
  895. }
  896. static void expand_4i_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs,
  897. uint32_t cofs, uint32_t oprsz, int64_t c,
  898. void (*fni)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64,
  899. int64_t))
  900. {
  901. TCGv_i64 t0 = tcg_temp_new_i64();
  902. TCGv_i64 t1 = tcg_temp_new_i64();
  903. TCGv_i64 t2 = tcg_temp_new_i64();
  904. TCGv_i64 t3 = tcg_temp_new_i64();
  905. uint32_t i;
  906. for (i = 0; i < oprsz; i += 8) {
  907. tcg_gen_ld_i64(t1, tcg_env, aofs + i);
  908. tcg_gen_ld_i64(t2, tcg_env, bofs + i);
  909. tcg_gen_ld_i64(t3, tcg_env, cofs + i);
  910. fni(t0, t1, t2, t3, c);
  911. tcg_gen_st_i64(t0, tcg_env, dofs + i);
  912. }
  913. tcg_temp_free_i64(t3);
  914. tcg_temp_free_i64(t2);
  915. tcg_temp_free_i64(t1);
  916. tcg_temp_free_i64(t0);
  917. }
  918. /* Expand OPSZ bytes worth of two-operand operations using host vectors. */
  919. static void expand_2_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
  920. uint32_t oprsz, uint32_t tysz, TCGType type,
  921. bool load_dest,
  922. void (*fni)(unsigned, TCGv_vec, TCGv_vec))
  923. {
  924. for (uint32_t i = 0; i < oprsz; i += tysz) {
  925. TCGv_vec t0 = tcg_temp_new_vec(type);
  926. TCGv_vec t1 = tcg_temp_new_vec(type);
  927. tcg_gen_ld_vec(t0, tcg_env, aofs + i);
  928. if (load_dest) {
  929. tcg_gen_ld_vec(t1, tcg_env, dofs + i);
  930. }
  931. fni(vece, t1, t0);
  932. tcg_gen_st_vec(t1, tcg_env, dofs + i);
  933. }
  934. }
  935. /* Expand OPSZ bytes worth of two-vector operands and an immediate operand
  936. using host vectors. */
  937. static void expand_2i_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
  938. uint32_t oprsz, uint32_t tysz, TCGType type,
  939. int64_t c, bool load_dest,
  940. void (*fni)(unsigned, TCGv_vec, TCGv_vec, int64_t))
  941. {
  942. for (uint32_t i = 0; i < oprsz; i += tysz) {
  943. TCGv_vec t0 = tcg_temp_new_vec(type);
  944. TCGv_vec t1 = tcg_temp_new_vec(type);
  945. tcg_gen_ld_vec(t0, tcg_env, aofs + i);
  946. if (load_dest) {
  947. tcg_gen_ld_vec(t1, tcg_env, dofs + i);
  948. }
  949. fni(vece, t1, t0, c);
  950. tcg_gen_st_vec(t1, tcg_env, dofs + i);
  951. }
  952. }
  953. static void expand_2s_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
  954. uint32_t oprsz, uint32_t tysz, TCGType type,
  955. TCGv_vec c, bool scalar_first,
  956. void (*fni)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec))
  957. {
  958. for (uint32_t i = 0; i < oprsz; i += tysz) {
  959. TCGv_vec t0 = tcg_temp_new_vec(type);
  960. TCGv_vec t1 = tcg_temp_new_vec(type);
  961. tcg_gen_ld_vec(t0, tcg_env, aofs + i);
  962. if (scalar_first) {
  963. fni(vece, t1, c, t0);
  964. } else {
  965. fni(vece, t1, t0, c);
  966. }
  967. tcg_gen_st_vec(t1, tcg_env, dofs + i);
  968. }
  969. }
  970. /* Expand OPSZ bytes worth of three-operand operations using host vectors. */
  971. static void expand_3_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
  972. uint32_t bofs, uint32_t oprsz,
  973. uint32_t tysz, TCGType type, bool load_dest,
  974. void (*fni)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec))
  975. {
  976. for (uint32_t i = 0; i < oprsz; i += tysz) {
  977. TCGv_vec t0 = tcg_temp_new_vec(type);
  978. TCGv_vec t1 = tcg_temp_new_vec(type);
  979. TCGv_vec t2 = tcg_temp_new_vec(type);
  980. tcg_gen_ld_vec(t0, tcg_env, aofs + i);
  981. tcg_gen_ld_vec(t1, tcg_env, bofs + i);
  982. if (load_dest) {
  983. tcg_gen_ld_vec(t2, tcg_env, dofs + i);
  984. }
  985. fni(vece, t2, t0, t1);
  986. tcg_gen_st_vec(t2, tcg_env, dofs + i);
  987. }
  988. }
  989. /*
  990. * Expand OPSZ bytes worth of three-vector operands and an immediate operand
  991. * using host vectors.
  992. */
  993. static void expand_3i_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
  994. uint32_t bofs, uint32_t oprsz, uint32_t tysz,
  995. TCGType type, int64_t c,
  996. bool load_dest, bool write_aofs,
  997. void (*fni)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec,
  998. int64_t))
  999. {
  1000. for (uint32_t i = 0; i < oprsz; i += tysz) {
  1001. TCGv_vec t0 = tcg_temp_new_vec(type);
  1002. TCGv_vec t1 = tcg_temp_new_vec(type);
  1003. TCGv_vec t2 = tcg_temp_new_vec(type);
  1004. tcg_gen_ld_vec(t0, tcg_env, aofs + i);
  1005. tcg_gen_ld_vec(t1, tcg_env, bofs + i);
  1006. if (load_dest) {
  1007. tcg_gen_ld_vec(t2, tcg_env, dofs + i);
  1008. }
  1009. fni(vece, t2, t0, t1, c);
  1010. tcg_gen_st_vec(t2, tcg_env, dofs + i);
  1011. if (write_aofs) {
  1012. tcg_gen_st_vec(t0, tcg_env, aofs + i);
  1013. }
  1014. }
  1015. }
  1016. /* Expand OPSZ bytes worth of four-operand operations using host vectors. */
  1017. static void expand_4_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
  1018. uint32_t bofs, uint32_t cofs, uint32_t oprsz,
  1019. uint32_t tysz, TCGType type, bool write_aofs,
  1020. void (*fni)(unsigned, TCGv_vec, TCGv_vec,
  1021. TCGv_vec, TCGv_vec))
  1022. {
  1023. for (uint32_t i = 0; i < oprsz; i += tysz) {
  1024. TCGv_vec t0 = tcg_temp_new_vec(type);
  1025. TCGv_vec t1 = tcg_temp_new_vec(type);
  1026. TCGv_vec t2 = tcg_temp_new_vec(type);
  1027. TCGv_vec t3 = tcg_temp_new_vec(type);
  1028. tcg_gen_ld_vec(t1, tcg_env, aofs + i);
  1029. tcg_gen_ld_vec(t2, tcg_env, bofs + i);
  1030. tcg_gen_ld_vec(t3, tcg_env, cofs + i);
  1031. fni(vece, t0, t1, t2, t3);
  1032. tcg_gen_st_vec(t0, tcg_env, dofs + i);
  1033. if (write_aofs) {
  1034. tcg_gen_st_vec(t1, tcg_env, aofs + i);
  1035. }
  1036. }
  1037. }
  1038. /*
  1039. * Expand OPSZ bytes worth of four-vector operands and an immediate operand
  1040. * using host vectors.
  1041. */
  1042. static void expand_4i_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
  1043. uint32_t bofs, uint32_t cofs, uint32_t oprsz,
  1044. uint32_t tysz, TCGType type, int64_t c,
  1045. void (*fni)(unsigned, TCGv_vec, TCGv_vec,
  1046. TCGv_vec, TCGv_vec, int64_t))
  1047. {
  1048. for (uint32_t i = 0; i < oprsz; i += tysz) {
  1049. TCGv_vec t0 = tcg_temp_new_vec(type);
  1050. TCGv_vec t1 = tcg_temp_new_vec(type);
  1051. TCGv_vec t2 = tcg_temp_new_vec(type);
  1052. TCGv_vec t3 = tcg_temp_new_vec(type);
  1053. tcg_gen_ld_vec(t1, tcg_env, aofs + i);
  1054. tcg_gen_ld_vec(t2, tcg_env, bofs + i);
  1055. tcg_gen_ld_vec(t3, tcg_env, cofs + i);
  1056. fni(vece, t0, t1, t2, t3, c);
  1057. tcg_gen_st_vec(t0, tcg_env, dofs + i);
  1058. }
  1059. }
  1060. /* Expand a vector two-operand operation. */
  1061. void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
  1062. uint32_t oprsz, uint32_t maxsz, const GVecGen2 *g)
  1063. {
  1064. const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty;
  1065. const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list);
  1066. TCGType type;
  1067. uint32_t some;
  1068. check_size_align(oprsz, maxsz, dofs | aofs);
  1069. check_overlap_2(dofs, aofs, maxsz);
  1070. type = 0;
  1071. if (g->fniv) {
  1072. type = choose_vector_type(g->opt_opc, g->vece, oprsz, g->prefer_i64);
  1073. }
  1074. switch (type) {
  1075. case TCG_TYPE_V256:
  1076. /* Recall that ARM SVE allows vector sizes that are not a
  1077. * power of 2, but always a multiple of 16. The intent is
  1078. * that e.g. size == 80 would be expanded with 2x32 + 1x16.
  1079. */
  1080. some = QEMU_ALIGN_DOWN(oprsz, 32);
  1081. expand_2_vec(g->vece, dofs, aofs, some, 32, TCG_TYPE_V256,
  1082. g->load_dest, g->fniv);
  1083. if (some == oprsz) {
  1084. break;
  1085. }
  1086. dofs += some;
  1087. aofs += some;
  1088. oprsz -= some;
  1089. maxsz -= some;
  1090. /* fallthru */
  1091. case TCG_TYPE_V128:
  1092. expand_2_vec(g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128,
  1093. g->load_dest, g->fniv);
  1094. break;
  1095. case TCG_TYPE_V64:
  1096. expand_2_vec(g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64,
  1097. g->load_dest, g->fniv);
  1098. break;
  1099. case 0:
  1100. if (g->fni8 && check_size_impl(oprsz, 8)) {
  1101. expand_2_i64(dofs, aofs, oprsz, g->load_dest, g->fni8);
  1102. } else if (g->fni4 && check_size_impl(oprsz, 4)) {
  1103. expand_2_i32(dofs, aofs, oprsz, g->load_dest, g->fni4);
  1104. } else {
  1105. assert(g->fno != NULL);
  1106. tcg_gen_gvec_2_ool(dofs, aofs, oprsz, maxsz, g->data, g->fno);
  1107. oprsz = maxsz;
  1108. }
  1109. break;
  1110. default:
  1111. g_assert_not_reached();
  1112. }
  1113. tcg_swap_vecop_list(hold_list);
  1114. if (oprsz < maxsz) {
  1115. expand_clr(dofs + oprsz, maxsz - oprsz);
  1116. }
  1117. }
  1118. /* Expand a vector operation with two vectors and an immediate. */
  1119. void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
  1120. uint32_t maxsz, int64_t c, const GVecGen2i *g)
  1121. {
  1122. const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty;
  1123. const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list);
  1124. TCGType type;
  1125. uint32_t some;
  1126. check_size_align(oprsz, maxsz, dofs | aofs);
  1127. check_overlap_2(dofs, aofs, maxsz);
  1128. type = 0;
  1129. if (g->fniv) {
  1130. type = choose_vector_type(g->opt_opc, g->vece, oprsz, g->prefer_i64);
  1131. }
  1132. switch (type) {
  1133. case TCG_TYPE_V256:
  1134. /* Recall that ARM SVE allows vector sizes that are not a
  1135. * power of 2, but always a multiple of 16. The intent is
  1136. * that e.g. size == 80 would be expanded with 2x32 + 1x16.
  1137. */
  1138. some = QEMU_ALIGN_DOWN(oprsz, 32);
  1139. expand_2i_vec(g->vece, dofs, aofs, some, 32, TCG_TYPE_V256,
  1140. c, g->load_dest, g->fniv);
  1141. if (some == oprsz) {
  1142. break;
  1143. }
  1144. dofs += some;
  1145. aofs += some;
  1146. oprsz -= some;
  1147. maxsz -= some;
  1148. /* fallthru */
  1149. case TCG_TYPE_V128:
  1150. expand_2i_vec(g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128,
  1151. c, g->load_dest, g->fniv);
  1152. break;
  1153. case TCG_TYPE_V64:
  1154. expand_2i_vec(g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64,
  1155. c, g->load_dest, g->fniv);
  1156. break;
  1157. case 0:
  1158. if (g->fni8 && check_size_impl(oprsz, 8)) {
  1159. expand_2i_i64(dofs, aofs, oprsz, c, g->load_dest, g->fni8);
  1160. } else if (g->fni4 && check_size_impl(oprsz, 4)) {
  1161. expand_2i_i32(dofs, aofs, oprsz, c, g->load_dest, g->fni4);
  1162. } else {
  1163. if (g->fno) {
  1164. tcg_gen_gvec_2_ool(dofs, aofs, oprsz, maxsz, c, g->fno);
  1165. } else {
  1166. TCGv_i64 tcg_c = tcg_constant_i64(c);
  1167. tcg_gen_gvec_2i_ool(dofs, aofs, tcg_c, oprsz,
  1168. maxsz, c, g->fnoi);
  1169. }
  1170. oprsz = maxsz;
  1171. }
  1172. break;
  1173. default:
  1174. g_assert_not_reached();
  1175. }
  1176. tcg_swap_vecop_list(hold_list);
  1177. if (oprsz < maxsz) {
  1178. expand_clr(dofs + oprsz, maxsz - oprsz);
  1179. }
  1180. }
  1181. /* Expand a vector operation with two vectors and a scalar. */
  1182. void tcg_gen_gvec_2s(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
  1183. uint32_t maxsz, TCGv_i64 c, const GVecGen2s *g)
  1184. {
  1185. TCGType type;
  1186. check_size_align(oprsz, maxsz, dofs | aofs);
  1187. check_overlap_2(dofs, aofs, maxsz);
  1188. type = 0;
  1189. if (g->fniv) {
  1190. type = choose_vector_type(g->opt_opc, g->vece, oprsz, g->prefer_i64);
  1191. }
  1192. if (type != 0) {
  1193. const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty;
  1194. const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list);
  1195. TCGv_vec t_vec = tcg_temp_new_vec(type);
  1196. uint32_t some;
  1197. tcg_gen_dup_i64_vec(g->vece, t_vec, c);
  1198. switch (type) {
  1199. case TCG_TYPE_V256:
  1200. /* Recall that ARM SVE allows vector sizes that are not a
  1201. * power of 2, but always a multiple of 16. The intent is
  1202. * that e.g. size == 80 would be expanded with 2x32 + 1x16.
  1203. */
  1204. some = QEMU_ALIGN_DOWN(oprsz, 32);
  1205. expand_2s_vec(g->vece, dofs, aofs, some, 32, TCG_TYPE_V256,
  1206. t_vec, g->scalar_first, g->fniv);
  1207. if (some == oprsz) {
  1208. break;
  1209. }
  1210. dofs += some;
  1211. aofs += some;
  1212. oprsz -= some;
  1213. maxsz -= some;
  1214. /* fallthru */
  1215. case TCG_TYPE_V128:
  1216. expand_2s_vec(g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128,
  1217. t_vec, g->scalar_first, g->fniv);
  1218. break;
  1219. case TCG_TYPE_V64:
  1220. expand_2s_vec(g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64,
  1221. t_vec, g->scalar_first, g->fniv);
  1222. break;
  1223. default:
  1224. g_assert_not_reached();
  1225. }
  1226. tcg_temp_free_vec(t_vec);
  1227. tcg_swap_vecop_list(hold_list);
  1228. } else if (g->fni8 && check_size_impl(oprsz, 8)) {
  1229. TCGv_i64 t64 = tcg_temp_new_i64();
  1230. tcg_gen_dup_i64(g->vece, t64, c);
  1231. expand_2s_i64(dofs, aofs, oprsz, t64, g->scalar_first, g->fni8);
  1232. tcg_temp_free_i64(t64);
  1233. } else if (g->fni4 && check_size_impl(oprsz, 4)) {
  1234. TCGv_i32 t32 = tcg_temp_new_i32();
  1235. tcg_gen_extrl_i64_i32(t32, c);
  1236. tcg_gen_dup_i32(g->vece, t32, t32);
  1237. expand_2s_i32(dofs, aofs, oprsz, t32, g->scalar_first, g->fni4);
  1238. tcg_temp_free_i32(t32);
  1239. } else {
  1240. tcg_gen_gvec_2i_ool(dofs, aofs, c, oprsz, maxsz, 0, g->fno);
  1241. return;
  1242. }
  1243. if (oprsz < maxsz) {
  1244. expand_clr(dofs + oprsz, maxsz - oprsz);
  1245. }
  1246. }
  1247. /* Expand a vector three-operand operation. */
  1248. void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs,
  1249. uint32_t oprsz, uint32_t maxsz, const GVecGen3 *g)
  1250. {
  1251. const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty;
  1252. const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list);
  1253. TCGType type;
  1254. uint32_t some;
  1255. check_size_align(oprsz, maxsz, dofs | aofs | bofs);
  1256. check_overlap_3(dofs, aofs, bofs, maxsz);
  1257. type = 0;
  1258. if (g->fniv) {
  1259. type = choose_vector_type(g->opt_opc, g->vece, oprsz, g->prefer_i64);
  1260. }
  1261. switch (type) {
  1262. case TCG_TYPE_V256:
  1263. /* Recall that ARM SVE allows vector sizes that are not a
  1264. * power of 2, but always a multiple of 16. The intent is
  1265. * that e.g. size == 80 would be expanded with 2x32 + 1x16.
  1266. */
  1267. some = QEMU_ALIGN_DOWN(oprsz, 32);
  1268. expand_3_vec(g->vece, dofs, aofs, bofs, some, 32, TCG_TYPE_V256,
  1269. g->load_dest, g->fniv);
  1270. if (some == oprsz) {
  1271. break;
  1272. }
  1273. dofs += some;
  1274. aofs += some;
  1275. bofs += some;
  1276. oprsz -= some;
  1277. maxsz -= some;
  1278. /* fallthru */
  1279. case TCG_TYPE_V128:
  1280. expand_3_vec(g->vece, dofs, aofs, bofs, oprsz, 16, TCG_TYPE_V128,
  1281. g->load_dest, g->fniv);
  1282. break;
  1283. case TCG_TYPE_V64:
  1284. expand_3_vec(g->vece, dofs, aofs, bofs, oprsz, 8, TCG_TYPE_V64,
  1285. g->load_dest, g->fniv);
  1286. break;
  1287. case 0:
  1288. if (g->fni8 && check_size_impl(oprsz, 8)) {
  1289. expand_3_i64(dofs, aofs, bofs, oprsz, g->load_dest, g->fni8);
  1290. } else if (g->fni4 && check_size_impl(oprsz, 4)) {
  1291. expand_3_i32(dofs, aofs, bofs, oprsz, g->load_dest, g->fni4);
  1292. } else {
  1293. assert(g->fno != NULL);
  1294. tcg_gen_gvec_3_ool(dofs, aofs, bofs, oprsz,
  1295. maxsz, g->data, g->fno);
  1296. oprsz = maxsz;
  1297. }
  1298. break;
  1299. default:
  1300. g_assert_not_reached();
  1301. }
  1302. tcg_swap_vecop_list(hold_list);
  1303. if (oprsz < maxsz) {
  1304. expand_clr(dofs + oprsz, maxsz - oprsz);
  1305. }
  1306. }
  1307. /* Expand a vector operation with three vectors and an immediate. */
  1308. void tcg_gen_gvec_3i(uint32_t dofs, uint32_t aofs, uint32_t bofs,
  1309. uint32_t oprsz, uint32_t maxsz, int64_t c,
  1310. const GVecGen3i *g)
  1311. {
  1312. const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty;
  1313. const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list);
  1314. TCGType type;
  1315. uint32_t some;
  1316. check_size_align(oprsz, maxsz, dofs | aofs | bofs);
  1317. check_overlap_3(dofs, aofs, bofs, maxsz);
  1318. type = 0;
  1319. if (g->fniv) {
  1320. type = choose_vector_type(g->opt_opc, g->vece, oprsz, g->prefer_i64);
  1321. }
  1322. switch (type) {
  1323. case TCG_TYPE_V256:
  1324. /*
  1325. * Recall that ARM SVE allows vector sizes that are not a
  1326. * power of 2, but always a multiple of 16. The intent is
  1327. * that e.g. size == 80 would be expanded with 2x32 + 1x16.
  1328. */
  1329. some = QEMU_ALIGN_DOWN(oprsz, 32);
  1330. expand_3i_vec(g->vece, dofs, aofs, bofs, some, 32, TCG_TYPE_V256,
  1331. c, g->load_dest, g->write_aofs, g->fniv);
  1332. if (some == oprsz) {
  1333. break;
  1334. }
  1335. dofs += some;
  1336. aofs += some;
  1337. bofs += some;
  1338. oprsz -= some;
  1339. maxsz -= some;
  1340. /* fallthru */
  1341. case TCG_TYPE_V128:
  1342. expand_3i_vec(g->vece, dofs, aofs, bofs, oprsz, 16, TCG_TYPE_V128,
  1343. c, g->load_dest, g->write_aofs, g->fniv);
  1344. break;
  1345. case TCG_TYPE_V64:
  1346. expand_3i_vec(g->vece, dofs, aofs, bofs, oprsz, 8, TCG_TYPE_V64,
  1347. c, g->load_dest, g->write_aofs, g->fniv);
  1348. break;
  1349. case 0:
  1350. if (g->fni8 && check_size_impl(oprsz, 8)) {
  1351. expand_3i_i64(dofs, aofs, bofs, oprsz, c,
  1352. g->load_dest, g->write_aofs, g->fni8);
  1353. } else if (g->fni4 && check_size_impl(oprsz, 4)) {
  1354. expand_3i_i32(dofs, aofs, bofs, oprsz, c,
  1355. g->load_dest, g->write_aofs, g->fni4);
  1356. } else {
  1357. assert(g->fno != NULL);
  1358. tcg_gen_gvec_3_ool(dofs, aofs, bofs, oprsz, maxsz, c, g->fno);
  1359. oprsz = maxsz;
  1360. }
  1361. break;
  1362. default:
  1363. g_assert_not_reached();
  1364. }
  1365. tcg_swap_vecop_list(hold_list);
  1366. if (oprsz < maxsz) {
  1367. expand_clr(dofs + oprsz, maxsz - oprsz);
  1368. }
  1369. }
  1370. /* Expand a vector four-operand operation. */
  1371. void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
  1372. uint32_t oprsz, uint32_t maxsz, const GVecGen4 *g)
  1373. {
  1374. const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty;
  1375. const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list);
  1376. TCGType type;
  1377. uint32_t some;
  1378. check_size_align(oprsz, maxsz, dofs | aofs | bofs | cofs);
  1379. check_overlap_4(dofs, aofs, bofs, cofs, maxsz);
  1380. type = 0;
  1381. if (g->fniv) {
  1382. type = choose_vector_type(g->opt_opc, g->vece, oprsz, g->prefer_i64);
  1383. }
  1384. switch (type) {
  1385. case TCG_TYPE_V256:
  1386. /* Recall that ARM SVE allows vector sizes that are not a
  1387. * power of 2, but always a multiple of 16. The intent is
  1388. * that e.g. size == 80 would be expanded with 2x32 + 1x16.
  1389. */
  1390. some = QEMU_ALIGN_DOWN(oprsz, 32);
  1391. expand_4_vec(g->vece, dofs, aofs, bofs, cofs, some,
  1392. 32, TCG_TYPE_V256, g->write_aofs, g->fniv);
  1393. if (some == oprsz) {
  1394. break;
  1395. }
  1396. dofs += some;
  1397. aofs += some;
  1398. bofs += some;
  1399. cofs += some;
  1400. oprsz -= some;
  1401. maxsz -= some;
  1402. /* fallthru */
  1403. case TCG_TYPE_V128:
  1404. expand_4_vec(g->vece, dofs, aofs, bofs, cofs, oprsz,
  1405. 16, TCG_TYPE_V128, g->write_aofs, g->fniv);
  1406. break;
  1407. case TCG_TYPE_V64:
  1408. expand_4_vec(g->vece, dofs, aofs, bofs, cofs, oprsz,
  1409. 8, TCG_TYPE_V64, g->write_aofs, g->fniv);
  1410. break;
  1411. case 0:
  1412. if (g->fni8 && check_size_impl(oprsz, 8)) {
  1413. expand_4_i64(dofs, aofs, bofs, cofs, oprsz,
  1414. g->write_aofs, g->fni8);
  1415. } else if (g->fni4 && check_size_impl(oprsz, 4)) {
  1416. expand_4_i32(dofs, aofs, bofs, cofs, oprsz,
  1417. g->write_aofs, g->fni4);
  1418. } else {
  1419. assert(g->fno != NULL);
  1420. tcg_gen_gvec_4_ool(dofs, aofs, bofs, cofs,
  1421. oprsz, maxsz, g->data, g->fno);
  1422. oprsz = maxsz;
  1423. }
  1424. break;
  1425. default:
  1426. g_assert_not_reached();
  1427. }
  1428. tcg_swap_vecop_list(hold_list);
  1429. if (oprsz < maxsz) {
  1430. expand_clr(dofs + oprsz, maxsz - oprsz);
  1431. }
  1432. }
  1433. /* Expand a vector four-operand operation. */
  1434. void tcg_gen_gvec_4i(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
  1435. uint32_t oprsz, uint32_t maxsz, int64_t c,
  1436. const GVecGen4i *g)
  1437. {
  1438. const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty;
  1439. const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list);
  1440. TCGType type;
  1441. uint32_t some;
  1442. check_size_align(oprsz, maxsz, dofs | aofs | bofs | cofs);
  1443. check_overlap_4(dofs, aofs, bofs, cofs, maxsz);
  1444. type = 0;
  1445. if (g->fniv) {
  1446. type = choose_vector_type(g->opt_opc, g->vece, oprsz, g->prefer_i64);
  1447. }
  1448. switch (type) {
  1449. case TCG_TYPE_V256:
  1450. /*
  1451. * Recall that ARM SVE allows vector sizes that are not a
  1452. * power of 2, but always a multiple of 16. The intent is
  1453. * that e.g. size == 80 would be expanded with 2x32 + 1x16.
  1454. */
  1455. some = QEMU_ALIGN_DOWN(oprsz, 32);
  1456. expand_4i_vec(g->vece, dofs, aofs, bofs, cofs, some,
  1457. 32, TCG_TYPE_V256, c, g->fniv);
  1458. if (some == oprsz) {
  1459. break;
  1460. }
  1461. dofs += some;
  1462. aofs += some;
  1463. bofs += some;
  1464. cofs += some;
  1465. oprsz -= some;
  1466. maxsz -= some;
  1467. /* fallthru */
  1468. case TCG_TYPE_V128:
  1469. expand_4i_vec(g->vece, dofs, aofs, bofs, cofs, oprsz,
  1470. 16, TCG_TYPE_V128, c, g->fniv);
  1471. break;
  1472. case TCG_TYPE_V64:
  1473. expand_4i_vec(g->vece, dofs, aofs, bofs, cofs, oprsz,
  1474. 8, TCG_TYPE_V64, c, g->fniv);
  1475. break;
  1476. case 0:
  1477. if (g->fni8 && check_size_impl(oprsz, 8)) {
  1478. expand_4i_i64(dofs, aofs, bofs, cofs, oprsz, c, g->fni8);
  1479. } else if (g->fni4 && check_size_impl(oprsz, 4)) {
  1480. expand_4i_i32(dofs, aofs, bofs, cofs, oprsz, c, g->fni4);
  1481. } else {
  1482. assert(g->fno != NULL);
  1483. tcg_gen_gvec_4_ool(dofs, aofs, bofs, cofs,
  1484. oprsz, maxsz, c, g->fno);
  1485. oprsz = maxsz;
  1486. }
  1487. break;
  1488. default:
  1489. g_assert_not_reached();
  1490. }
  1491. tcg_swap_vecop_list(hold_list);
  1492. if (oprsz < maxsz) {
  1493. expand_clr(dofs + oprsz, maxsz - oprsz);
  1494. }
  1495. }
  1496. /*
  1497. * Expand specific vector operations.
  1498. */
  1499. static void vec_mov2(unsigned vece, TCGv_vec a, TCGv_vec b)
  1500. {
  1501. tcg_gen_mov_vec(a, b);
  1502. }
  1503. void tcg_gen_gvec_mov(unsigned vece, uint32_t dofs, uint32_t aofs,
  1504. uint32_t oprsz, uint32_t maxsz)
  1505. {
  1506. static const GVecGen2 g = {
  1507. .fni8 = tcg_gen_mov_i64,
  1508. .fniv = vec_mov2,
  1509. .fno = gen_helper_gvec_mov,
  1510. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  1511. };
  1512. if (dofs != aofs) {
  1513. tcg_gen_gvec_2(dofs, aofs, oprsz, maxsz, &g);
  1514. } else {
  1515. check_size_align(oprsz, maxsz, dofs);
  1516. if (oprsz < maxsz) {
  1517. expand_clr(dofs + oprsz, maxsz - oprsz);
  1518. }
  1519. }
  1520. }
  1521. void tcg_gen_gvec_dup_i32(unsigned vece, uint32_t dofs, uint32_t oprsz,
  1522. uint32_t maxsz, TCGv_i32 in)
  1523. {
  1524. check_size_align(oprsz, maxsz, dofs);
  1525. tcg_debug_assert(vece <= MO_32);
  1526. do_dup(vece, dofs, oprsz, maxsz, in, NULL, 0);
  1527. }
  1528. void tcg_gen_gvec_dup_i64(unsigned vece, uint32_t dofs, uint32_t oprsz,
  1529. uint32_t maxsz, TCGv_i64 in)
  1530. {
  1531. check_size_align(oprsz, maxsz, dofs);
  1532. tcg_debug_assert(vece <= MO_64);
  1533. do_dup(vece, dofs, oprsz, maxsz, NULL, in, 0);
  1534. }
  1535. void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
  1536. uint32_t oprsz, uint32_t maxsz)
  1537. {
  1538. check_size_align(oprsz, maxsz, dofs);
  1539. if (vece <= MO_64) {
  1540. TCGType type = choose_vector_type(NULL, vece, oprsz, 0);
  1541. if (type != 0) {
  1542. TCGv_vec t_vec = tcg_temp_new_vec(type);
  1543. tcg_gen_dup_mem_vec(vece, t_vec, tcg_env, aofs);
  1544. do_dup_store(type, dofs, oprsz, maxsz, t_vec);
  1545. } else if (vece <= MO_32) {
  1546. TCGv_i32 in = tcg_temp_ebb_new_i32();
  1547. switch (vece) {
  1548. case MO_8:
  1549. tcg_gen_ld8u_i32(in, tcg_env, aofs);
  1550. break;
  1551. case MO_16:
  1552. tcg_gen_ld16u_i32(in, tcg_env, aofs);
  1553. break;
  1554. default:
  1555. tcg_gen_ld_i32(in, tcg_env, aofs);
  1556. break;
  1557. }
  1558. do_dup(vece, dofs, oprsz, maxsz, in, NULL, 0);
  1559. tcg_temp_free_i32(in);
  1560. } else {
  1561. TCGv_i64 in = tcg_temp_ebb_new_i64();
  1562. tcg_gen_ld_i64(in, tcg_env, aofs);
  1563. do_dup(vece, dofs, oprsz, maxsz, NULL, in, 0);
  1564. tcg_temp_free_i64(in);
  1565. }
  1566. } else if (vece == 4) {
  1567. /* 128-bit duplicate. */
  1568. int i;
  1569. tcg_debug_assert(oprsz >= 16);
  1570. if (TCG_TARGET_HAS_v128) {
  1571. TCGv_vec in = tcg_temp_new_vec(TCG_TYPE_V128);
  1572. tcg_gen_ld_vec(in, tcg_env, aofs);
  1573. for (i = (aofs == dofs) * 16; i < oprsz; i += 16) {
  1574. tcg_gen_st_vec(in, tcg_env, dofs + i);
  1575. }
  1576. } else {
  1577. TCGv_i64 in0 = tcg_temp_ebb_new_i64();
  1578. TCGv_i64 in1 = tcg_temp_ebb_new_i64();
  1579. tcg_gen_ld_i64(in0, tcg_env, aofs);
  1580. tcg_gen_ld_i64(in1, tcg_env, aofs + 8);
  1581. for (i = (aofs == dofs) * 16; i < oprsz; i += 16) {
  1582. tcg_gen_st_i64(in0, tcg_env, dofs + i);
  1583. tcg_gen_st_i64(in1, tcg_env, dofs + i + 8);
  1584. }
  1585. tcg_temp_free_i64(in0);
  1586. tcg_temp_free_i64(in1);
  1587. }
  1588. if (oprsz < maxsz) {
  1589. expand_clr(dofs + oprsz, maxsz - oprsz);
  1590. }
  1591. } else if (vece == 5) {
  1592. /* 256-bit duplicate. */
  1593. int i;
  1594. tcg_debug_assert(oprsz >= 32);
  1595. tcg_debug_assert(oprsz % 32 == 0);
  1596. if (TCG_TARGET_HAS_v256) {
  1597. TCGv_vec in = tcg_temp_new_vec(TCG_TYPE_V256);
  1598. tcg_gen_ld_vec(in, tcg_env, aofs);
  1599. for (i = (aofs == dofs) * 32; i < oprsz; i += 32) {
  1600. tcg_gen_st_vec(in, tcg_env, dofs + i);
  1601. }
  1602. } else if (TCG_TARGET_HAS_v128) {
  1603. TCGv_vec in0 = tcg_temp_new_vec(TCG_TYPE_V128);
  1604. TCGv_vec in1 = tcg_temp_new_vec(TCG_TYPE_V128);
  1605. tcg_gen_ld_vec(in0, tcg_env, aofs);
  1606. tcg_gen_ld_vec(in1, tcg_env, aofs + 16);
  1607. for (i = (aofs == dofs) * 32; i < oprsz; i += 32) {
  1608. tcg_gen_st_vec(in0, tcg_env, dofs + i);
  1609. tcg_gen_st_vec(in1, tcg_env, dofs + i + 16);
  1610. }
  1611. } else {
  1612. TCGv_i64 in[4];
  1613. int j;
  1614. for (j = 0; j < 4; ++j) {
  1615. in[j] = tcg_temp_ebb_new_i64();
  1616. tcg_gen_ld_i64(in[j], tcg_env, aofs + j * 8);
  1617. }
  1618. for (i = (aofs == dofs) * 32; i < oprsz; i += 32) {
  1619. for (j = 0; j < 4; ++j) {
  1620. tcg_gen_st_i64(in[j], tcg_env, dofs + i + j * 8);
  1621. }
  1622. }
  1623. for (j = 0; j < 4; ++j) {
  1624. tcg_temp_free_i64(in[j]);
  1625. }
  1626. }
  1627. if (oprsz < maxsz) {
  1628. expand_clr(dofs + oprsz, maxsz - oprsz);
  1629. }
  1630. } else {
  1631. g_assert_not_reached();
  1632. }
  1633. }
  1634. void tcg_gen_gvec_dup_imm(unsigned vece, uint32_t dofs, uint32_t oprsz,
  1635. uint32_t maxsz, uint64_t x)
  1636. {
  1637. check_size_align(oprsz, maxsz, dofs);
  1638. do_dup(vece, dofs, oprsz, maxsz, NULL, NULL, x);
  1639. }
  1640. void tcg_gen_gvec_not(unsigned vece, uint32_t dofs, uint32_t aofs,
  1641. uint32_t oprsz, uint32_t maxsz)
  1642. {
  1643. static const GVecGen2 g = {
  1644. .fni8 = tcg_gen_not_i64,
  1645. .fniv = tcg_gen_not_vec,
  1646. .fno = gen_helper_gvec_not,
  1647. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  1648. };
  1649. tcg_gen_gvec_2(dofs, aofs, oprsz, maxsz, &g);
  1650. }
  1651. /* Perform a vector addition using normal addition and a mask. The mask
  1652. should be the sign bit of each lane. This 6-operation form is more
  1653. efficient than separate additions when there are 4 or more lanes in
  1654. the 64-bit operation. */
  1655. static void gen_addv_mask(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 m)
  1656. {
  1657. TCGv_i64 t1 = tcg_temp_ebb_new_i64();
  1658. TCGv_i64 t2 = tcg_temp_ebb_new_i64();
  1659. TCGv_i64 t3 = tcg_temp_ebb_new_i64();
  1660. tcg_gen_andc_i64(t1, a, m);
  1661. tcg_gen_andc_i64(t2, b, m);
  1662. tcg_gen_xor_i64(t3, a, b);
  1663. tcg_gen_add_i64(d, t1, t2);
  1664. tcg_gen_and_i64(t3, t3, m);
  1665. tcg_gen_xor_i64(d, d, t3);
  1666. tcg_temp_free_i64(t1);
  1667. tcg_temp_free_i64(t2);
  1668. tcg_temp_free_i64(t3);
  1669. }
  1670. void tcg_gen_vec_add8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
  1671. {
  1672. TCGv_i64 m = tcg_constant_i64(dup_const(MO_8, 0x80));
  1673. gen_addv_mask(d, a, b, m);
  1674. }
  1675. void tcg_gen_vec_add8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
  1676. {
  1677. TCGv_i32 m = tcg_constant_i32((int32_t)dup_const(MO_8, 0x80));
  1678. TCGv_i32 t1 = tcg_temp_ebb_new_i32();
  1679. TCGv_i32 t2 = tcg_temp_ebb_new_i32();
  1680. TCGv_i32 t3 = tcg_temp_ebb_new_i32();
  1681. tcg_gen_andc_i32(t1, a, m);
  1682. tcg_gen_andc_i32(t2, b, m);
  1683. tcg_gen_xor_i32(t3, a, b);
  1684. tcg_gen_add_i32(d, t1, t2);
  1685. tcg_gen_and_i32(t3, t3, m);
  1686. tcg_gen_xor_i32(d, d, t3);
  1687. tcg_temp_free_i32(t1);
  1688. tcg_temp_free_i32(t2);
  1689. tcg_temp_free_i32(t3);
  1690. }
  1691. void tcg_gen_vec_add16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
  1692. {
  1693. TCGv_i64 m = tcg_constant_i64(dup_const(MO_16, 0x8000));
  1694. gen_addv_mask(d, a, b, m);
  1695. }
  1696. void tcg_gen_vec_add16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
  1697. {
  1698. TCGv_i32 t1 = tcg_temp_ebb_new_i32();
  1699. TCGv_i32 t2 = tcg_temp_ebb_new_i32();
  1700. tcg_gen_andi_i32(t1, a, ~0xffff);
  1701. tcg_gen_add_i32(t2, a, b);
  1702. tcg_gen_add_i32(t1, t1, b);
  1703. tcg_gen_deposit_i32(d, t1, t2, 0, 16);
  1704. tcg_temp_free_i32(t1);
  1705. tcg_temp_free_i32(t2);
  1706. }
  1707. void tcg_gen_vec_add32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
  1708. {
  1709. TCGv_i64 t1 = tcg_temp_ebb_new_i64();
  1710. TCGv_i64 t2 = tcg_temp_ebb_new_i64();
  1711. tcg_gen_andi_i64(t1, a, ~0xffffffffull);
  1712. tcg_gen_add_i64(t2, a, b);
  1713. tcg_gen_add_i64(t1, t1, b);
  1714. tcg_gen_deposit_i64(d, t1, t2, 0, 32);
  1715. tcg_temp_free_i64(t1);
  1716. tcg_temp_free_i64(t2);
  1717. }
  1718. static const TCGOpcode vecop_list_add[] = { INDEX_op_add_vec, 0 };
  1719. void tcg_gen_gvec_add(unsigned vece, uint32_t dofs, uint32_t aofs,
  1720. uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
  1721. {
  1722. static const GVecGen3 g[4] = {
  1723. { .fni8 = tcg_gen_vec_add8_i64,
  1724. .fniv = tcg_gen_add_vec,
  1725. .fno = gen_helper_gvec_add8,
  1726. .opt_opc = vecop_list_add,
  1727. .vece = MO_8 },
  1728. { .fni8 = tcg_gen_vec_add16_i64,
  1729. .fniv = tcg_gen_add_vec,
  1730. .fno = gen_helper_gvec_add16,
  1731. .opt_opc = vecop_list_add,
  1732. .vece = MO_16 },
  1733. { .fni4 = tcg_gen_add_i32,
  1734. .fniv = tcg_gen_add_vec,
  1735. .fno = gen_helper_gvec_add32,
  1736. .opt_opc = vecop_list_add,
  1737. .vece = MO_32 },
  1738. { .fni8 = tcg_gen_add_i64,
  1739. .fniv = tcg_gen_add_vec,
  1740. .fno = gen_helper_gvec_add64,
  1741. .opt_opc = vecop_list_add,
  1742. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  1743. .vece = MO_64 },
  1744. };
  1745. tcg_debug_assert(vece <= MO_64);
  1746. tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
  1747. }
  1748. void tcg_gen_gvec_adds(unsigned vece, uint32_t dofs, uint32_t aofs,
  1749. TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
  1750. {
  1751. static const GVecGen2s g[4] = {
  1752. { .fni8 = tcg_gen_vec_add8_i64,
  1753. .fniv = tcg_gen_add_vec,
  1754. .fno = gen_helper_gvec_adds8,
  1755. .opt_opc = vecop_list_add,
  1756. .vece = MO_8 },
  1757. { .fni8 = tcg_gen_vec_add16_i64,
  1758. .fniv = tcg_gen_add_vec,
  1759. .fno = gen_helper_gvec_adds16,
  1760. .opt_opc = vecop_list_add,
  1761. .vece = MO_16 },
  1762. { .fni4 = tcg_gen_add_i32,
  1763. .fniv = tcg_gen_add_vec,
  1764. .fno = gen_helper_gvec_adds32,
  1765. .opt_opc = vecop_list_add,
  1766. .vece = MO_32 },
  1767. { .fni8 = tcg_gen_add_i64,
  1768. .fniv = tcg_gen_add_vec,
  1769. .fno = gen_helper_gvec_adds64,
  1770. .opt_opc = vecop_list_add,
  1771. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  1772. .vece = MO_64 },
  1773. };
  1774. tcg_debug_assert(vece <= MO_64);
  1775. tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, c, &g[vece]);
  1776. }
  1777. void tcg_gen_gvec_addi(unsigned vece, uint32_t dofs, uint32_t aofs,
  1778. int64_t c, uint32_t oprsz, uint32_t maxsz)
  1779. {
  1780. TCGv_i64 tmp = tcg_constant_i64(c);
  1781. tcg_gen_gvec_adds(vece, dofs, aofs, tmp, oprsz, maxsz);
  1782. }
  1783. static const TCGOpcode vecop_list_sub[] = { INDEX_op_sub_vec, 0 };
  1784. void tcg_gen_gvec_subs(unsigned vece, uint32_t dofs, uint32_t aofs,
  1785. TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
  1786. {
  1787. static const GVecGen2s g[4] = {
  1788. { .fni8 = tcg_gen_vec_sub8_i64,
  1789. .fniv = tcg_gen_sub_vec,
  1790. .fno = gen_helper_gvec_subs8,
  1791. .opt_opc = vecop_list_sub,
  1792. .vece = MO_8 },
  1793. { .fni8 = tcg_gen_vec_sub16_i64,
  1794. .fniv = tcg_gen_sub_vec,
  1795. .fno = gen_helper_gvec_subs16,
  1796. .opt_opc = vecop_list_sub,
  1797. .vece = MO_16 },
  1798. { .fni4 = tcg_gen_sub_i32,
  1799. .fniv = tcg_gen_sub_vec,
  1800. .fno = gen_helper_gvec_subs32,
  1801. .opt_opc = vecop_list_sub,
  1802. .vece = MO_32 },
  1803. { .fni8 = tcg_gen_sub_i64,
  1804. .fniv = tcg_gen_sub_vec,
  1805. .fno = gen_helper_gvec_subs64,
  1806. .opt_opc = vecop_list_sub,
  1807. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  1808. .vece = MO_64 },
  1809. };
  1810. tcg_debug_assert(vece <= MO_64);
  1811. tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, c, &g[vece]);
  1812. }
  1813. /* Perform a vector subtraction using normal subtraction and a mask.
  1814. Compare gen_addv_mask above. */
  1815. static void gen_subv_mask(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 m)
  1816. {
  1817. TCGv_i64 t1 = tcg_temp_ebb_new_i64();
  1818. TCGv_i64 t2 = tcg_temp_ebb_new_i64();
  1819. TCGv_i64 t3 = tcg_temp_ebb_new_i64();
  1820. tcg_gen_or_i64(t1, a, m);
  1821. tcg_gen_andc_i64(t2, b, m);
  1822. tcg_gen_eqv_i64(t3, a, b);
  1823. tcg_gen_sub_i64(d, t1, t2);
  1824. tcg_gen_and_i64(t3, t3, m);
  1825. tcg_gen_xor_i64(d, d, t3);
  1826. tcg_temp_free_i64(t1);
  1827. tcg_temp_free_i64(t2);
  1828. tcg_temp_free_i64(t3);
  1829. }
  1830. void tcg_gen_vec_sub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
  1831. {
  1832. TCGv_i64 m = tcg_constant_i64(dup_const(MO_8, 0x80));
  1833. gen_subv_mask(d, a, b, m);
  1834. }
  1835. void tcg_gen_vec_sub8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
  1836. {
  1837. TCGv_i32 m = tcg_constant_i32((int32_t)dup_const(MO_8, 0x80));
  1838. TCGv_i32 t1 = tcg_temp_ebb_new_i32();
  1839. TCGv_i32 t2 = tcg_temp_ebb_new_i32();
  1840. TCGv_i32 t3 = tcg_temp_ebb_new_i32();
  1841. tcg_gen_or_i32(t1, a, m);
  1842. tcg_gen_andc_i32(t2, b, m);
  1843. tcg_gen_eqv_i32(t3, a, b);
  1844. tcg_gen_sub_i32(d, t1, t2);
  1845. tcg_gen_and_i32(t3, t3, m);
  1846. tcg_gen_xor_i32(d, d, t3);
  1847. tcg_temp_free_i32(t1);
  1848. tcg_temp_free_i32(t2);
  1849. tcg_temp_free_i32(t3);
  1850. }
  1851. void tcg_gen_vec_sub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
  1852. {
  1853. TCGv_i64 m = tcg_constant_i64(dup_const(MO_16, 0x8000));
  1854. gen_subv_mask(d, a, b, m);
  1855. }
  1856. void tcg_gen_vec_sub16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
  1857. {
  1858. TCGv_i32 t1 = tcg_temp_ebb_new_i32();
  1859. TCGv_i32 t2 = tcg_temp_ebb_new_i32();
  1860. tcg_gen_andi_i32(t1, b, ~0xffff);
  1861. tcg_gen_sub_i32(t2, a, b);
  1862. tcg_gen_sub_i32(t1, a, t1);
  1863. tcg_gen_deposit_i32(d, t1, t2, 0, 16);
  1864. tcg_temp_free_i32(t1);
  1865. tcg_temp_free_i32(t2);
  1866. }
  1867. void tcg_gen_vec_sub32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
  1868. {
  1869. TCGv_i64 t1 = tcg_temp_ebb_new_i64();
  1870. TCGv_i64 t2 = tcg_temp_ebb_new_i64();
  1871. tcg_gen_andi_i64(t1, b, ~0xffffffffull);
  1872. tcg_gen_sub_i64(t2, a, b);
  1873. tcg_gen_sub_i64(t1, a, t1);
  1874. tcg_gen_deposit_i64(d, t1, t2, 0, 32);
  1875. tcg_temp_free_i64(t1);
  1876. tcg_temp_free_i64(t2);
  1877. }
  1878. void tcg_gen_gvec_sub(unsigned vece, uint32_t dofs, uint32_t aofs,
  1879. uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
  1880. {
  1881. static const GVecGen3 g[4] = {
  1882. { .fni8 = tcg_gen_vec_sub8_i64,
  1883. .fniv = tcg_gen_sub_vec,
  1884. .fno = gen_helper_gvec_sub8,
  1885. .opt_opc = vecop_list_sub,
  1886. .vece = MO_8 },
  1887. { .fni8 = tcg_gen_vec_sub16_i64,
  1888. .fniv = tcg_gen_sub_vec,
  1889. .fno = gen_helper_gvec_sub16,
  1890. .opt_opc = vecop_list_sub,
  1891. .vece = MO_16 },
  1892. { .fni4 = tcg_gen_sub_i32,
  1893. .fniv = tcg_gen_sub_vec,
  1894. .fno = gen_helper_gvec_sub32,
  1895. .opt_opc = vecop_list_sub,
  1896. .vece = MO_32 },
  1897. { .fni8 = tcg_gen_sub_i64,
  1898. .fniv = tcg_gen_sub_vec,
  1899. .fno = gen_helper_gvec_sub64,
  1900. .opt_opc = vecop_list_sub,
  1901. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  1902. .vece = MO_64 },
  1903. };
  1904. tcg_debug_assert(vece <= MO_64);
  1905. tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
  1906. }
  1907. static const TCGOpcode vecop_list_mul[] = { INDEX_op_mul_vec, 0 };
  1908. void tcg_gen_gvec_mul(unsigned vece, uint32_t dofs, uint32_t aofs,
  1909. uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
  1910. {
  1911. static const GVecGen3 g[4] = {
  1912. { .fniv = tcg_gen_mul_vec,
  1913. .fno = gen_helper_gvec_mul8,
  1914. .opt_opc = vecop_list_mul,
  1915. .vece = MO_8 },
  1916. { .fniv = tcg_gen_mul_vec,
  1917. .fno = gen_helper_gvec_mul16,
  1918. .opt_opc = vecop_list_mul,
  1919. .vece = MO_16 },
  1920. { .fni4 = tcg_gen_mul_i32,
  1921. .fniv = tcg_gen_mul_vec,
  1922. .fno = gen_helper_gvec_mul32,
  1923. .opt_opc = vecop_list_mul,
  1924. .vece = MO_32 },
  1925. { .fni8 = tcg_gen_mul_i64,
  1926. .fniv = tcg_gen_mul_vec,
  1927. .fno = gen_helper_gvec_mul64,
  1928. .opt_opc = vecop_list_mul,
  1929. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  1930. .vece = MO_64 },
  1931. };
  1932. tcg_debug_assert(vece <= MO_64);
  1933. tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
  1934. }
  1935. void tcg_gen_gvec_muls(unsigned vece, uint32_t dofs, uint32_t aofs,
  1936. TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
  1937. {
  1938. static const GVecGen2s g[4] = {
  1939. { .fniv = tcg_gen_mul_vec,
  1940. .fno = gen_helper_gvec_muls8,
  1941. .opt_opc = vecop_list_mul,
  1942. .vece = MO_8 },
  1943. { .fniv = tcg_gen_mul_vec,
  1944. .fno = gen_helper_gvec_muls16,
  1945. .opt_opc = vecop_list_mul,
  1946. .vece = MO_16 },
  1947. { .fni4 = tcg_gen_mul_i32,
  1948. .fniv = tcg_gen_mul_vec,
  1949. .fno = gen_helper_gvec_muls32,
  1950. .opt_opc = vecop_list_mul,
  1951. .vece = MO_32 },
  1952. { .fni8 = tcg_gen_mul_i64,
  1953. .fniv = tcg_gen_mul_vec,
  1954. .fno = gen_helper_gvec_muls64,
  1955. .opt_opc = vecop_list_mul,
  1956. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  1957. .vece = MO_64 },
  1958. };
  1959. tcg_debug_assert(vece <= MO_64);
  1960. tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, c, &g[vece]);
  1961. }
  1962. void tcg_gen_gvec_muli(unsigned vece, uint32_t dofs, uint32_t aofs,
  1963. int64_t c, uint32_t oprsz, uint32_t maxsz)
  1964. {
  1965. TCGv_i64 tmp = tcg_constant_i64(c);
  1966. tcg_gen_gvec_muls(vece, dofs, aofs, tmp, oprsz, maxsz);
  1967. }
  1968. void tcg_gen_gvec_ssadd(unsigned vece, uint32_t dofs, uint32_t aofs,
  1969. uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
  1970. {
  1971. static const TCGOpcode vecop_list[] = { INDEX_op_ssadd_vec, 0 };
  1972. static const GVecGen3 g[4] = {
  1973. { .fniv = tcg_gen_ssadd_vec,
  1974. .fno = gen_helper_gvec_ssadd8,
  1975. .opt_opc = vecop_list,
  1976. .vece = MO_8 },
  1977. { .fniv = tcg_gen_ssadd_vec,
  1978. .fno = gen_helper_gvec_ssadd16,
  1979. .opt_opc = vecop_list,
  1980. .vece = MO_16 },
  1981. { .fniv = tcg_gen_ssadd_vec,
  1982. .fno = gen_helper_gvec_ssadd32,
  1983. .opt_opc = vecop_list,
  1984. .vece = MO_32 },
  1985. { .fniv = tcg_gen_ssadd_vec,
  1986. .fno = gen_helper_gvec_ssadd64,
  1987. .opt_opc = vecop_list,
  1988. .vece = MO_64 },
  1989. };
  1990. tcg_debug_assert(vece <= MO_64);
  1991. tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
  1992. }
  1993. void tcg_gen_gvec_sssub(unsigned vece, uint32_t dofs, uint32_t aofs,
  1994. uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
  1995. {
  1996. static const TCGOpcode vecop_list[] = { INDEX_op_sssub_vec, 0 };
  1997. static const GVecGen3 g[4] = {
  1998. { .fniv = tcg_gen_sssub_vec,
  1999. .fno = gen_helper_gvec_sssub8,
  2000. .opt_opc = vecop_list,
  2001. .vece = MO_8 },
  2002. { .fniv = tcg_gen_sssub_vec,
  2003. .fno = gen_helper_gvec_sssub16,
  2004. .opt_opc = vecop_list,
  2005. .vece = MO_16 },
  2006. { .fniv = tcg_gen_sssub_vec,
  2007. .fno = gen_helper_gvec_sssub32,
  2008. .opt_opc = vecop_list,
  2009. .vece = MO_32 },
  2010. { .fniv = tcg_gen_sssub_vec,
  2011. .fno = gen_helper_gvec_sssub64,
  2012. .opt_opc = vecop_list,
  2013. .vece = MO_64 },
  2014. };
  2015. tcg_debug_assert(vece <= MO_64);
  2016. tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
  2017. }
  2018. static void tcg_gen_usadd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
  2019. {
  2020. TCGv_i32 max = tcg_constant_i32(-1);
  2021. tcg_gen_add_i32(d, a, b);
  2022. tcg_gen_movcond_i32(TCG_COND_LTU, d, d, a, max, d);
  2023. }
  2024. static void tcg_gen_usadd_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
  2025. {
  2026. TCGv_i64 max = tcg_constant_i64(-1);
  2027. tcg_gen_add_i64(d, a, b);
  2028. tcg_gen_movcond_i64(TCG_COND_LTU, d, d, a, max, d);
  2029. }
  2030. void tcg_gen_gvec_usadd(unsigned vece, uint32_t dofs, uint32_t aofs,
  2031. uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
  2032. {
  2033. static const TCGOpcode vecop_list[] = { INDEX_op_usadd_vec, 0 };
  2034. static const GVecGen3 g[4] = {
  2035. { .fniv = tcg_gen_usadd_vec,
  2036. .fno = gen_helper_gvec_usadd8,
  2037. .opt_opc = vecop_list,
  2038. .vece = MO_8 },
  2039. { .fniv = tcg_gen_usadd_vec,
  2040. .fno = gen_helper_gvec_usadd16,
  2041. .opt_opc = vecop_list,
  2042. .vece = MO_16 },
  2043. { .fni4 = tcg_gen_usadd_i32,
  2044. .fniv = tcg_gen_usadd_vec,
  2045. .fno = gen_helper_gvec_usadd32,
  2046. .opt_opc = vecop_list,
  2047. .vece = MO_32 },
  2048. { .fni8 = tcg_gen_usadd_i64,
  2049. .fniv = tcg_gen_usadd_vec,
  2050. .fno = gen_helper_gvec_usadd64,
  2051. .opt_opc = vecop_list,
  2052. .vece = MO_64 }
  2053. };
  2054. tcg_debug_assert(vece <= MO_64);
  2055. tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
  2056. }
  2057. static void tcg_gen_ussub_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
  2058. {
  2059. TCGv_i32 min = tcg_constant_i32(0);
  2060. tcg_gen_sub_i32(d, a, b);
  2061. tcg_gen_movcond_i32(TCG_COND_LTU, d, a, b, min, d);
  2062. }
  2063. static void tcg_gen_ussub_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
  2064. {
  2065. TCGv_i64 min = tcg_constant_i64(0);
  2066. tcg_gen_sub_i64(d, a, b);
  2067. tcg_gen_movcond_i64(TCG_COND_LTU, d, a, b, min, d);
  2068. }
  2069. void tcg_gen_gvec_ussub(unsigned vece, uint32_t dofs, uint32_t aofs,
  2070. uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
  2071. {
  2072. static const TCGOpcode vecop_list[] = { INDEX_op_ussub_vec, 0 };
  2073. static const GVecGen3 g[4] = {
  2074. { .fniv = tcg_gen_ussub_vec,
  2075. .fno = gen_helper_gvec_ussub8,
  2076. .opt_opc = vecop_list,
  2077. .vece = MO_8 },
  2078. { .fniv = tcg_gen_ussub_vec,
  2079. .fno = gen_helper_gvec_ussub16,
  2080. .opt_opc = vecop_list,
  2081. .vece = MO_16 },
  2082. { .fni4 = tcg_gen_ussub_i32,
  2083. .fniv = tcg_gen_ussub_vec,
  2084. .fno = gen_helper_gvec_ussub32,
  2085. .opt_opc = vecop_list,
  2086. .vece = MO_32 },
  2087. { .fni8 = tcg_gen_ussub_i64,
  2088. .fniv = tcg_gen_ussub_vec,
  2089. .fno = gen_helper_gvec_ussub64,
  2090. .opt_opc = vecop_list,
  2091. .vece = MO_64 }
  2092. };
  2093. tcg_debug_assert(vece <= MO_64);
  2094. tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
  2095. }
  2096. void tcg_gen_gvec_smin(unsigned vece, uint32_t dofs, uint32_t aofs,
  2097. uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
  2098. {
  2099. static const TCGOpcode vecop_list[] = { INDEX_op_smin_vec, 0 };
  2100. static const GVecGen3 g[4] = {
  2101. { .fniv = tcg_gen_smin_vec,
  2102. .fno = gen_helper_gvec_smin8,
  2103. .opt_opc = vecop_list,
  2104. .vece = MO_8 },
  2105. { .fniv = tcg_gen_smin_vec,
  2106. .fno = gen_helper_gvec_smin16,
  2107. .opt_opc = vecop_list,
  2108. .vece = MO_16 },
  2109. { .fni4 = tcg_gen_smin_i32,
  2110. .fniv = tcg_gen_smin_vec,
  2111. .fno = gen_helper_gvec_smin32,
  2112. .opt_opc = vecop_list,
  2113. .vece = MO_32 },
  2114. { .fni8 = tcg_gen_smin_i64,
  2115. .fniv = tcg_gen_smin_vec,
  2116. .fno = gen_helper_gvec_smin64,
  2117. .opt_opc = vecop_list,
  2118. .vece = MO_64 }
  2119. };
  2120. tcg_debug_assert(vece <= MO_64);
  2121. tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
  2122. }
  2123. void tcg_gen_gvec_umin(unsigned vece, uint32_t dofs, uint32_t aofs,
  2124. uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
  2125. {
  2126. static const TCGOpcode vecop_list[] = { INDEX_op_umin_vec, 0 };
  2127. static const GVecGen3 g[4] = {
  2128. { .fniv = tcg_gen_umin_vec,
  2129. .fno = gen_helper_gvec_umin8,
  2130. .opt_opc = vecop_list,
  2131. .vece = MO_8 },
  2132. { .fniv = tcg_gen_umin_vec,
  2133. .fno = gen_helper_gvec_umin16,
  2134. .opt_opc = vecop_list,
  2135. .vece = MO_16 },
  2136. { .fni4 = tcg_gen_umin_i32,
  2137. .fniv = tcg_gen_umin_vec,
  2138. .fno = gen_helper_gvec_umin32,
  2139. .opt_opc = vecop_list,
  2140. .vece = MO_32 },
  2141. { .fni8 = tcg_gen_umin_i64,
  2142. .fniv = tcg_gen_umin_vec,
  2143. .fno = gen_helper_gvec_umin64,
  2144. .opt_opc = vecop_list,
  2145. .vece = MO_64 }
  2146. };
  2147. tcg_debug_assert(vece <= MO_64);
  2148. tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
  2149. }
  2150. void tcg_gen_gvec_smax(unsigned vece, uint32_t dofs, uint32_t aofs,
  2151. uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
  2152. {
  2153. static const TCGOpcode vecop_list[] = { INDEX_op_smax_vec, 0 };
  2154. static const GVecGen3 g[4] = {
  2155. { .fniv = tcg_gen_smax_vec,
  2156. .fno = gen_helper_gvec_smax8,
  2157. .opt_opc = vecop_list,
  2158. .vece = MO_8 },
  2159. { .fniv = tcg_gen_smax_vec,
  2160. .fno = gen_helper_gvec_smax16,
  2161. .opt_opc = vecop_list,
  2162. .vece = MO_16 },
  2163. { .fni4 = tcg_gen_smax_i32,
  2164. .fniv = tcg_gen_smax_vec,
  2165. .fno = gen_helper_gvec_smax32,
  2166. .opt_opc = vecop_list,
  2167. .vece = MO_32 },
  2168. { .fni8 = tcg_gen_smax_i64,
  2169. .fniv = tcg_gen_smax_vec,
  2170. .fno = gen_helper_gvec_smax64,
  2171. .opt_opc = vecop_list,
  2172. .vece = MO_64 }
  2173. };
  2174. tcg_debug_assert(vece <= MO_64);
  2175. tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
  2176. }
  2177. void tcg_gen_gvec_umax(unsigned vece, uint32_t dofs, uint32_t aofs,
  2178. uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
  2179. {
  2180. static const TCGOpcode vecop_list[] = { INDEX_op_umax_vec, 0 };
  2181. static const GVecGen3 g[4] = {
  2182. { .fniv = tcg_gen_umax_vec,
  2183. .fno = gen_helper_gvec_umax8,
  2184. .opt_opc = vecop_list,
  2185. .vece = MO_8 },
  2186. { .fniv = tcg_gen_umax_vec,
  2187. .fno = gen_helper_gvec_umax16,
  2188. .opt_opc = vecop_list,
  2189. .vece = MO_16 },
  2190. { .fni4 = tcg_gen_umax_i32,
  2191. .fniv = tcg_gen_umax_vec,
  2192. .fno = gen_helper_gvec_umax32,
  2193. .opt_opc = vecop_list,
  2194. .vece = MO_32 },
  2195. { .fni8 = tcg_gen_umax_i64,
  2196. .fniv = tcg_gen_umax_vec,
  2197. .fno = gen_helper_gvec_umax64,
  2198. .opt_opc = vecop_list,
  2199. .vece = MO_64 }
  2200. };
  2201. tcg_debug_assert(vece <= MO_64);
  2202. tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
  2203. }
  2204. /* Perform a vector negation using normal negation and a mask.
  2205. Compare gen_subv_mask above. */
  2206. static void gen_negv_mask(TCGv_i64 d, TCGv_i64 b, TCGv_i64 m)
  2207. {
  2208. TCGv_i64 t2 = tcg_temp_ebb_new_i64();
  2209. TCGv_i64 t3 = tcg_temp_ebb_new_i64();
  2210. tcg_gen_andc_i64(t3, m, b);
  2211. tcg_gen_andc_i64(t2, b, m);
  2212. tcg_gen_sub_i64(d, m, t2);
  2213. tcg_gen_xor_i64(d, d, t3);
  2214. tcg_temp_free_i64(t2);
  2215. tcg_temp_free_i64(t3);
  2216. }
  2217. void tcg_gen_vec_neg8_i64(TCGv_i64 d, TCGv_i64 b)
  2218. {
  2219. TCGv_i64 m = tcg_constant_i64(dup_const(MO_8, 0x80));
  2220. gen_negv_mask(d, b, m);
  2221. }
  2222. void tcg_gen_vec_neg16_i64(TCGv_i64 d, TCGv_i64 b)
  2223. {
  2224. TCGv_i64 m = tcg_constant_i64(dup_const(MO_16, 0x8000));
  2225. gen_negv_mask(d, b, m);
  2226. }
  2227. void tcg_gen_vec_neg32_i64(TCGv_i64 d, TCGv_i64 b)
  2228. {
  2229. TCGv_i64 t1 = tcg_temp_ebb_new_i64();
  2230. TCGv_i64 t2 = tcg_temp_ebb_new_i64();
  2231. tcg_gen_andi_i64(t1, b, ~0xffffffffull);
  2232. tcg_gen_neg_i64(t2, b);
  2233. tcg_gen_neg_i64(t1, t1);
  2234. tcg_gen_deposit_i64(d, t1, t2, 0, 32);
  2235. tcg_temp_free_i64(t1);
  2236. tcg_temp_free_i64(t2);
  2237. }
  2238. void tcg_gen_gvec_neg(unsigned vece, uint32_t dofs, uint32_t aofs,
  2239. uint32_t oprsz, uint32_t maxsz)
  2240. {
  2241. static const TCGOpcode vecop_list[] = { INDEX_op_neg_vec, 0 };
  2242. static const GVecGen2 g[4] = {
  2243. { .fni8 = tcg_gen_vec_neg8_i64,
  2244. .fniv = tcg_gen_neg_vec,
  2245. .fno = gen_helper_gvec_neg8,
  2246. .opt_opc = vecop_list,
  2247. .vece = MO_8 },
  2248. { .fni8 = tcg_gen_vec_neg16_i64,
  2249. .fniv = tcg_gen_neg_vec,
  2250. .fno = gen_helper_gvec_neg16,
  2251. .opt_opc = vecop_list,
  2252. .vece = MO_16 },
  2253. { .fni4 = tcg_gen_neg_i32,
  2254. .fniv = tcg_gen_neg_vec,
  2255. .fno = gen_helper_gvec_neg32,
  2256. .opt_opc = vecop_list,
  2257. .vece = MO_32 },
  2258. { .fni8 = tcg_gen_neg_i64,
  2259. .fniv = tcg_gen_neg_vec,
  2260. .fno = gen_helper_gvec_neg64,
  2261. .opt_opc = vecop_list,
  2262. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  2263. .vece = MO_64 },
  2264. };
  2265. tcg_debug_assert(vece <= MO_64);
  2266. tcg_gen_gvec_2(dofs, aofs, oprsz, maxsz, &g[vece]);
  2267. }
  2268. static void gen_absv_mask(TCGv_i64 d, TCGv_i64 b, unsigned vece)
  2269. {
  2270. TCGv_i64 t = tcg_temp_ebb_new_i64();
  2271. int nbit = 8 << vece;
  2272. /* Create -1 for each negative element. */
  2273. tcg_gen_shri_i64(t, b, nbit - 1);
  2274. tcg_gen_andi_i64(t, t, dup_const(vece, 1));
  2275. tcg_gen_muli_i64(t, t, (1 << nbit) - 1);
  2276. /*
  2277. * Invert (via xor -1) and add one.
  2278. * Because of the ordering the msb is cleared,
  2279. * so we never have carry into the next element.
  2280. */
  2281. tcg_gen_xor_i64(d, b, t);
  2282. tcg_gen_andi_i64(t, t, dup_const(vece, 1));
  2283. tcg_gen_add_i64(d, d, t);
  2284. tcg_temp_free_i64(t);
  2285. }
  2286. static void tcg_gen_vec_abs8_i64(TCGv_i64 d, TCGv_i64 b)
  2287. {
  2288. gen_absv_mask(d, b, MO_8);
  2289. }
  2290. static void tcg_gen_vec_abs16_i64(TCGv_i64 d, TCGv_i64 b)
  2291. {
  2292. gen_absv_mask(d, b, MO_16);
  2293. }
  2294. void tcg_gen_gvec_abs(unsigned vece, uint32_t dofs, uint32_t aofs,
  2295. uint32_t oprsz, uint32_t maxsz)
  2296. {
  2297. static const TCGOpcode vecop_list[] = { INDEX_op_abs_vec, 0 };
  2298. static const GVecGen2 g[4] = {
  2299. { .fni8 = tcg_gen_vec_abs8_i64,
  2300. .fniv = tcg_gen_abs_vec,
  2301. .fno = gen_helper_gvec_abs8,
  2302. .opt_opc = vecop_list,
  2303. .vece = MO_8 },
  2304. { .fni8 = tcg_gen_vec_abs16_i64,
  2305. .fniv = tcg_gen_abs_vec,
  2306. .fno = gen_helper_gvec_abs16,
  2307. .opt_opc = vecop_list,
  2308. .vece = MO_16 },
  2309. { .fni4 = tcg_gen_abs_i32,
  2310. .fniv = tcg_gen_abs_vec,
  2311. .fno = gen_helper_gvec_abs32,
  2312. .opt_opc = vecop_list,
  2313. .vece = MO_32 },
  2314. { .fni8 = tcg_gen_abs_i64,
  2315. .fniv = tcg_gen_abs_vec,
  2316. .fno = gen_helper_gvec_abs64,
  2317. .opt_opc = vecop_list,
  2318. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  2319. .vece = MO_64 },
  2320. };
  2321. tcg_debug_assert(vece <= MO_64);
  2322. tcg_gen_gvec_2(dofs, aofs, oprsz, maxsz, &g[vece]);
  2323. }
  2324. void tcg_gen_gvec_and(unsigned vece, uint32_t dofs, uint32_t aofs,
  2325. uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
  2326. {
  2327. static const GVecGen3 g = {
  2328. .fni8 = tcg_gen_and_i64,
  2329. .fniv = tcg_gen_and_vec,
  2330. .fno = gen_helper_gvec_and,
  2331. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  2332. };
  2333. if (aofs == bofs) {
  2334. tcg_gen_gvec_mov(vece, dofs, aofs, oprsz, maxsz);
  2335. } else {
  2336. tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
  2337. }
  2338. }
  2339. void tcg_gen_gvec_or(unsigned vece, uint32_t dofs, uint32_t aofs,
  2340. uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
  2341. {
  2342. static const GVecGen3 g = {
  2343. .fni8 = tcg_gen_or_i64,
  2344. .fniv = tcg_gen_or_vec,
  2345. .fno = gen_helper_gvec_or,
  2346. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  2347. };
  2348. if (aofs == bofs) {
  2349. tcg_gen_gvec_mov(vece, dofs, aofs, oprsz, maxsz);
  2350. } else {
  2351. tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
  2352. }
  2353. }
  2354. void tcg_gen_gvec_xor(unsigned vece, uint32_t dofs, uint32_t aofs,
  2355. uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
  2356. {
  2357. static const GVecGen3 g = {
  2358. .fni8 = tcg_gen_xor_i64,
  2359. .fniv = tcg_gen_xor_vec,
  2360. .fno = gen_helper_gvec_xor,
  2361. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  2362. };
  2363. if (aofs == bofs) {
  2364. tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, 0);
  2365. } else {
  2366. tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
  2367. }
  2368. }
  2369. void tcg_gen_gvec_andc(unsigned vece, uint32_t dofs, uint32_t aofs,
  2370. uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
  2371. {
  2372. static const GVecGen3 g = {
  2373. .fni8 = tcg_gen_andc_i64,
  2374. .fniv = tcg_gen_andc_vec,
  2375. .fno = gen_helper_gvec_andc,
  2376. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  2377. };
  2378. if (aofs == bofs) {
  2379. tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, 0);
  2380. } else {
  2381. tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
  2382. }
  2383. }
  2384. void tcg_gen_gvec_orc(unsigned vece, uint32_t dofs, uint32_t aofs,
  2385. uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
  2386. {
  2387. static const GVecGen3 g = {
  2388. .fni8 = tcg_gen_orc_i64,
  2389. .fniv = tcg_gen_orc_vec,
  2390. .fno = gen_helper_gvec_orc,
  2391. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  2392. };
  2393. if (aofs == bofs) {
  2394. tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, -1);
  2395. } else {
  2396. tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
  2397. }
  2398. }
  2399. void tcg_gen_gvec_nand(unsigned vece, uint32_t dofs, uint32_t aofs,
  2400. uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
  2401. {
  2402. static const GVecGen3 g = {
  2403. .fni8 = tcg_gen_nand_i64,
  2404. .fniv = tcg_gen_nand_vec,
  2405. .fno = gen_helper_gvec_nand,
  2406. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  2407. };
  2408. if (aofs == bofs) {
  2409. tcg_gen_gvec_not(vece, dofs, aofs, oprsz, maxsz);
  2410. } else {
  2411. tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
  2412. }
  2413. }
  2414. void tcg_gen_gvec_nor(unsigned vece, uint32_t dofs, uint32_t aofs,
  2415. uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
  2416. {
  2417. static const GVecGen3 g = {
  2418. .fni8 = tcg_gen_nor_i64,
  2419. .fniv = tcg_gen_nor_vec,
  2420. .fno = gen_helper_gvec_nor,
  2421. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  2422. };
  2423. if (aofs == bofs) {
  2424. tcg_gen_gvec_not(vece, dofs, aofs, oprsz, maxsz);
  2425. } else {
  2426. tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
  2427. }
  2428. }
  2429. void tcg_gen_gvec_eqv(unsigned vece, uint32_t dofs, uint32_t aofs,
  2430. uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
  2431. {
  2432. static const GVecGen3 g = {
  2433. .fni8 = tcg_gen_eqv_i64,
  2434. .fniv = tcg_gen_eqv_vec,
  2435. .fno = gen_helper_gvec_eqv,
  2436. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  2437. };
  2438. if (aofs == bofs) {
  2439. tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, -1);
  2440. } else {
  2441. tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
  2442. }
  2443. }
  2444. static const GVecGen2s gop_ands = {
  2445. .fni8 = tcg_gen_and_i64,
  2446. .fniv = tcg_gen_and_vec,
  2447. .fno = gen_helper_gvec_ands,
  2448. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  2449. .vece = MO_64
  2450. };
  2451. void tcg_gen_gvec_ands(unsigned vece, uint32_t dofs, uint32_t aofs,
  2452. TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
  2453. {
  2454. TCGv_i64 tmp = tcg_temp_ebb_new_i64();
  2455. tcg_gen_dup_i64(vece, tmp, c);
  2456. tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ands);
  2457. tcg_temp_free_i64(tmp);
  2458. }
  2459. void tcg_gen_gvec_andi(unsigned vece, uint32_t dofs, uint32_t aofs,
  2460. int64_t c, uint32_t oprsz, uint32_t maxsz)
  2461. {
  2462. TCGv_i64 tmp = tcg_constant_i64(dup_const(vece, c));
  2463. tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ands);
  2464. }
  2465. void tcg_gen_gvec_andcs(unsigned vece, uint32_t dofs, uint32_t aofs,
  2466. TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
  2467. {
  2468. static GVecGen2s g = {
  2469. .fni8 = tcg_gen_andc_i64,
  2470. .fniv = tcg_gen_andc_vec,
  2471. .fno = gen_helper_gvec_andcs,
  2472. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  2473. .vece = MO_64
  2474. };
  2475. TCGv_i64 tmp = tcg_temp_ebb_new_i64();
  2476. tcg_gen_dup_i64(vece, tmp, c);
  2477. tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &g);
  2478. tcg_temp_free_i64(tmp);
  2479. }
  2480. static const GVecGen2s gop_xors = {
  2481. .fni8 = tcg_gen_xor_i64,
  2482. .fniv = tcg_gen_xor_vec,
  2483. .fno = gen_helper_gvec_xors,
  2484. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  2485. .vece = MO_64
  2486. };
  2487. void tcg_gen_gvec_xors(unsigned vece, uint32_t dofs, uint32_t aofs,
  2488. TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
  2489. {
  2490. TCGv_i64 tmp = tcg_temp_ebb_new_i64();
  2491. tcg_gen_dup_i64(vece, tmp, c);
  2492. tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_xors);
  2493. tcg_temp_free_i64(tmp);
  2494. }
  2495. void tcg_gen_gvec_xori(unsigned vece, uint32_t dofs, uint32_t aofs,
  2496. int64_t c, uint32_t oprsz, uint32_t maxsz)
  2497. {
  2498. TCGv_i64 tmp = tcg_constant_i64(dup_const(vece, c));
  2499. tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_xors);
  2500. }
  2501. static const GVecGen2s gop_ors = {
  2502. .fni8 = tcg_gen_or_i64,
  2503. .fniv = tcg_gen_or_vec,
  2504. .fno = gen_helper_gvec_ors,
  2505. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  2506. .vece = MO_64
  2507. };
  2508. void tcg_gen_gvec_ors(unsigned vece, uint32_t dofs, uint32_t aofs,
  2509. TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
  2510. {
  2511. TCGv_i64 tmp = tcg_temp_ebb_new_i64();
  2512. tcg_gen_dup_i64(vece, tmp, c);
  2513. tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ors);
  2514. tcg_temp_free_i64(tmp);
  2515. }
  2516. void tcg_gen_gvec_ori(unsigned vece, uint32_t dofs, uint32_t aofs,
  2517. int64_t c, uint32_t oprsz, uint32_t maxsz)
  2518. {
  2519. TCGv_i64 tmp = tcg_constant_i64(dup_const(vece, c));
  2520. tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ors);
  2521. }
  2522. void tcg_gen_vec_shl8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
  2523. {
  2524. uint64_t mask = dup_const(MO_8, 0xff << c);
  2525. tcg_gen_shli_i64(d, a, c);
  2526. tcg_gen_andi_i64(d, d, mask);
  2527. }
  2528. void tcg_gen_vec_shl16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
  2529. {
  2530. uint64_t mask = dup_const(MO_16, 0xffff << c);
  2531. tcg_gen_shli_i64(d, a, c);
  2532. tcg_gen_andi_i64(d, d, mask);
  2533. }
  2534. void tcg_gen_vec_shl8i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c)
  2535. {
  2536. uint32_t mask = dup_const(MO_8, 0xff << c);
  2537. tcg_gen_shli_i32(d, a, c);
  2538. tcg_gen_andi_i32(d, d, mask);
  2539. }
  2540. void tcg_gen_vec_shl16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c)
  2541. {
  2542. uint32_t mask = dup_const(MO_16, 0xffff << c);
  2543. tcg_gen_shli_i32(d, a, c);
  2544. tcg_gen_andi_i32(d, d, mask);
  2545. }
  2546. void tcg_gen_gvec_shli(unsigned vece, uint32_t dofs, uint32_t aofs,
  2547. int64_t shift, uint32_t oprsz, uint32_t maxsz)
  2548. {
  2549. static const TCGOpcode vecop_list[] = { INDEX_op_shli_vec, 0 };
  2550. static const GVecGen2i g[4] = {
  2551. { .fni8 = tcg_gen_vec_shl8i_i64,
  2552. .fniv = tcg_gen_shli_vec,
  2553. .fno = gen_helper_gvec_shl8i,
  2554. .opt_opc = vecop_list,
  2555. .vece = MO_8 },
  2556. { .fni8 = tcg_gen_vec_shl16i_i64,
  2557. .fniv = tcg_gen_shli_vec,
  2558. .fno = gen_helper_gvec_shl16i,
  2559. .opt_opc = vecop_list,
  2560. .vece = MO_16 },
  2561. { .fni4 = tcg_gen_shli_i32,
  2562. .fniv = tcg_gen_shli_vec,
  2563. .fno = gen_helper_gvec_shl32i,
  2564. .opt_opc = vecop_list,
  2565. .vece = MO_32 },
  2566. { .fni8 = tcg_gen_shli_i64,
  2567. .fniv = tcg_gen_shli_vec,
  2568. .fno = gen_helper_gvec_shl64i,
  2569. .opt_opc = vecop_list,
  2570. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  2571. .vece = MO_64 },
  2572. };
  2573. tcg_debug_assert(vece <= MO_64);
  2574. tcg_debug_assert(shift >= 0 && shift < (8 << vece));
  2575. if (shift == 0) {
  2576. tcg_gen_gvec_mov(vece, dofs, aofs, oprsz, maxsz);
  2577. } else {
  2578. tcg_gen_gvec_2i(dofs, aofs, oprsz, maxsz, shift, &g[vece]);
  2579. }
  2580. }
  2581. void tcg_gen_vec_shr8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
  2582. {
  2583. uint64_t mask = dup_const(MO_8, 0xff >> c);
  2584. tcg_gen_shri_i64(d, a, c);
  2585. tcg_gen_andi_i64(d, d, mask);
  2586. }
  2587. void tcg_gen_vec_shr16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
  2588. {
  2589. uint64_t mask = dup_const(MO_16, 0xffff >> c);
  2590. tcg_gen_shri_i64(d, a, c);
  2591. tcg_gen_andi_i64(d, d, mask);
  2592. }
  2593. void tcg_gen_vec_shr8i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c)
  2594. {
  2595. uint32_t mask = dup_const(MO_8, 0xff >> c);
  2596. tcg_gen_shri_i32(d, a, c);
  2597. tcg_gen_andi_i32(d, d, mask);
  2598. }
  2599. void tcg_gen_vec_shr16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c)
  2600. {
  2601. uint32_t mask = dup_const(MO_16, 0xffff >> c);
  2602. tcg_gen_shri_i32(d, a, c);
  2603. tcg_gen_andi_i32(d, d, mask);
  2604. }
  2605. void tcg_gen_gvec_shri(unsigned vece, uint32_t dofs, uint32_t aofs,
  2606. int64_t shift, uint32_t oprsz, uint32_t maxsz)
  2607. {
  2608. static const TCGOpcode vecop_list[] = { INDEX_op_shri_vec, 0 };
  2609. static const GVecGen2i g[4] = {
  2610. { .fni8 = tcg_gen_vec_shr8i_i64,
  2611. .fniv = tcg_gen_shri_vec,
  2612. .fno = gen_helper_gvec_shr8i,
  2613. .opt_opc = vecop_list,
  2614. .vece = MO_8 },
  2615. { .fni8 = tcg_gen_vec_shr16i_i64,
  2616. .fniv = tcg_gen_shri_vec,
  2617. .fno = gen_helper_gvec_shr16i,
  2618. .opt_opc = vecop_list,
  2619. .vece = MO_16 },
  2620. { .fni4 = tcg_gen_shri_i32,
  2621. .fniv = tcg_gen_shri_vec,
  2622. .fno = gen_helper_gvec_shr32i,
  2623. .opt_opc = vecop_list,
  2624. .vece = MO_32 },
  2625. { .fni8 = tcg_gen_shri_i64,
  2626. .fniv = tcg_gen_shri_vec,
  2627. .fno = gen_helper_gvec_shr64i,
  2628. .opt_opc = vecop_list,
  2629. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  2630. .vece = MO_64 },
  2631. };
  2632. tcg_debug_assert(vece <= MO_64);
  2633. tcg_debug_assert(shift >= 0 && shift < (8 << vece));
  2634. if (shift == 0) {
  2635. tcg_gen_gvec_mov(vece, dofs, aofs, oprsz, maxsz);
  2636. } else {
  2637. tcg_gen_gvec_2i(dofs, aofs, oprsz, maxsz, shift, &g[vece]);
  2638. }
  2639. }
  2640. void tcg_gen_vec_sar8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
  2641. {
  2642. uint64_t s_mask = dup_const(MO_8, 0x80 >> c);
  2643. uint64_t c_mask = dup_const(MO_8, 0xff >> c);
  2644. TCGv_i64 s = tcg_temp_ebb_new_i64();
  2645. tcg_gen_shri_i64(d, a, c);
  2646. tcg_gen_andi_i64(s, d, s_mask); /* isolate (shifted) sign bit */
  2647. tcg_gen_muli_i64(s, s, (2 << c) - 2); /* replicate isolated signs */
  2648. tcg_gen_andi_i64(d, d, c_mask); /* clear out bits above sign */
  2649. tcg_gen_or_i64(d, d, s); /* include sign extension */
  2650. tcg_temp_free_i64(s);
  2651. }
  2652. void tcg_gen_vec_sar16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
  2653. {
  2654. uint64_t s_mask = dup_const(MO_16, 0x8000 >> c);
  2655. uint64_t c_mask = dup_const(MO_16, 0xffff >> c);
  2656. TCGv_i64 s = tcg_temp_ebb_new_i64();
  2657. tcg_gen_shri_i64(d, a, c);
  2658. tcg_gen_andi_i64(s, d, s_mask); /* isolate (shifted) sign bit */
  2659. tcg_gen_andi_i64(d, d, c_mask); /* clear out bits above sign */
  2660. tcg_gen_muli_i64(s, s, (2 << c) - 2); /* replicate isolated signs */
  2661. tcg_gen_or_i64(d, d, s); /* include sign extension */
  2662. tcg_temp_free_i64(s);
  2663. }
  2664. void tcg_gen_vec_sar8i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c)
  2665. {
  2666. uint32_t s_mask = dup_const(MO_8, 0x80 >> c);
  2667. uint32_t c_mask = dup_const(MO_8, 0xff >> c);
  2668. TCGv_i32 s = tcg_temp_ebb_new_i32();
  2669. tcg_gen_shri_i32(d, a, c);
  2670. tcg_gen_andi_i32(s, d, s_mask); /* isolate (shifted) sign bit */
  2671. tcg_gen_muli_i32(s, s, (2 << c) - 2); /* replicate isolated signs */
  2672. tcg_gen_andi_i32(d, d, c_mask); /* clear out bits above sign */
  2673. tcg_gen_or_i32(d, d, s); /* include sign extension */
  2674. tcg_temp_free_i32(s);
  2675. }
  2676. void tcg_gen_vec_sar16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c)
  2677. {
  2678. uint32_t s_mask = dup_const(MO_16, 0x8000 >> c);
  2679. uint32_t c_mask = dup_const(MO_16, 0xffff >> c);
  2680. TCGv_i32 s = tcg_temp_ebb_new_i32();
  2681. tcg_gen_shri_i32(d, a, c);
  2682. tcg_gen_andi_i32(s, d, s_mask); /* isolate (shifted) sign bit */
  2683. tcg_gen_andi_i32(d, d, c_mask); /* clear out bits above sign */
  2684. tcg_gen_muli_i32(s, s, (2 << c) - 2); /* replicate isolated signs */
  2685. tcg_gen_or_i32(d, d, s); /* include sign extension */
  2686. tcg_temp_free_i32(s);
  2687. }
  2688. void tcg_gen_gvec_sari(unsigned vece, uint32_t dofs, uint32_t aofs,
  2689. int64_t shift, uint32_t oprsz, uint32_t maxsz)
  2690. {
  2691. static const TCGOpcode vecop_list[] = { INDEX_op_sari_vec, 0 };
  2692. static const GVecGen2i g[4] = {
  2693. { .fni8 = tcg_gen_vec_sar8i_i64,
  2694. .fniv = tcg_gen_sari_vec,
  2695. .fno = gen_helper_gvec_sar8i,
  2696. .opt_opc = vecop_list,
  2697. .vece = MO_8 },
  2698. { .fni8 = tcg_gen_vec_sar16i_i64,
  2699. .fniv = tcg_gen_sari_vec,
  2700. .fno = gen_helper_gvec_sar16i,
  2701. .opt_opc = vecop_list,
  2702. .vece = MO_16 },
  2703. { .fni4 = tcg_gen_sari_i32,
  2704. .fniv = tcg_gen_sari_vec,
  2705. .fno = gen_helper_gvec_sar32i,
  2706. .opt_opc = vecop_list,
  2707. .vece = MO_32 },
  2708. { .fni8 = tcg_gen_sari_i64,
  2709. .fniv = tcg_gen_sari_vec,
  2710. .fno = gen_helper_gvec_sar64i,
  2711. .opt_opc = vecop_list,
  2712. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  2713. .vece = MO_64 },
  2714. };
  2715. tcg_debug_assert(vece <= MO_64);
  2716. tcg_debug_assert(shift >= 0 && shift < (8 << vece));
  2717. if (shift == 0) {
  2718. tcg_gen_gvec_mov(vece, dofs, aofs, oprsz, maxsz);
  2719. } else {
  2720. tcg_gen_gvec_2i(dofs, aofs, oprsz, maxsz, shift, &g[vece]);
  2721. }
  2722. }
  2723. void tcg_gen_vec_rotl8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
  2724. {
  2725. uint64_t mask = dup_const(MO_8, 0xff << c);
  2726. tcg_gen_shli_i64(d, a, c);
  2727. tcg_gen_shri_i64(a, a, 8 - c);
  2728. tcg_gen_andi_i64(d, d, mask);
  2729. tcg_gen_andi_i64(a, a, ~mask);
  2730. tcg_gen_or_i64(d, d, a);
  2731. }
  2732. void tcg_gen_vec_rotl16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
  2733. {
  2734. uint64_t mask = dup_const(MO_16, 0xffff << c);
  2735. tcg_gen_shli_i64(d, a, c);
  2736. tcg_gen_shri_i64(a, a, 16 - c);
  2737. tcg_gen_andi_i64(d, d, mask);
  2738. tcg_gen_andi_i64(a, a, ~mask);
  2739. tcg_gen_or_i64(d, d, a);
  2740. }
  2741. void tcg_gen_gvec_rotli(unsigned vece, uint32_t dofs, uint32_t aofs,
  2742. int64_t shift, uint32_t oprsz, uint32_t maxsz)
  2743. {
  2744. static const TCGOpcode vecop_list[] = { INDEX_op_rotli_vec, 0 };
  2745. static const GVecGen2i g[4] = {
  2746. { .fni8 = tcg_gen_vec_rotl8i_i64,
  2747. .fniv = tcg_gen_rotli_vec,
  2748. .fno = gen_helper_gvec_rotl8i,
  2749. .opt_opc = vecop_list,
  2750. .vece = MO_8 },
  2751. { .fni8 = tcg_gen_vec_rotl16i_i64,
  2752. .fniv = tcg_gen_rotli_vec,
  2753. .fno = gen_helper_gvec_rotl16i,
  2754. .opt_opc = vecop_list,
  2755. .vece = MO_16 },
  2756. { .fni4 = tcg_gen_rotli_i32,
  2757. .fniv = tcg_gen_rotli_vec,
  2758. .fno = gen_helper_gvec_rotl32i,
  2759. .opt_opc = vecop_list,
  2760. .vece = MO_32 },
  2761. { .fni8 = tcg_gen_rotli_i64,
  2762. .fniv = tcg_gen_rotli_vec,
  2763. .fno = gen_helper_gvec_rotl64i,
  2764. .opt_opc = vecop_list,
  2765. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  2766. .vece = MO_64 },
  2767. };
  2768. tcg_debug_assert(vece <= MO_64);
  2769. tcg_debug_assert(shift >= 0 && shift < (8 << vece));
  2770. if (shift == 0) {
  2771. tcg_gen_gvec_mov(vece, dofs, aofs, oprsz, maxsz);
  2772. } else {
  2773. tcg_gen_gvec_2i(dofs, aofs, oprsz, maxsz, shift, &g[vece]);
  2774. }
  2775. }
  2776. void tcg_gen_gvec_rotri(unsigned vece, uint32_t dofs, uint32_t aofs,
  2777. int64_t shift, uint32_t oprsz, uint32_t maxsz)
  2778. {
  2779. tcg_debug_assert(vece <= MO_64);
  2780. tcg_debug_assert(shift >= 0 && shift < (8 << vece));
  2781. tcg_gen_gvec_rotli(vece, dofs, aofs, -shift & ((8 << vece) - 1),
  2782. oprsz, maxsz);
  2783. }
  2784. /*
  2785. * Specialized generation vector shifts by a non-constant scalar.
  2786. */
  2787. typedef struct {
  2788. void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32);
  2789. void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64);
  2790. void (*fniv_s)(unsigned, TCGv_vec, TCGv_vec, TCGv_i32);
  2791. void (*fniv_v)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec);
  2792. gen_helper_gvec_2 *fno[4];
  2793. TCGOpcode s_list[2];
  2794. TCGOpcode v_list[2];
  2795. } GVecGen2sh;
  2796. static void expand_2sh_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
  2797. uint32_t oprsz, uint32_t tysz, TCGType type,
  2798. TCGv_i32 shift,
  2799. void (*fni)(unsigned, TCGv_vec, TCGv_vec, TCGv_i32))
  2800. {
  2801. for (uint32_t i = 0; i < oprsz; i += tysz) {
  2802. TCGv_vec t0 = tcg_temp_new_vec(type);
  2803. TCGv_vec t1 = tcg_temp_new_vec(type);
  2804. tcg_gen_ld_vec(t0, tcg_env, aofs + i);
  2805. fni(vece, t1, t0, shift);
  2806. tcg_gen_st_vec(t1, tcg_env, dofs + i);
  2807. }
  2808. }
  2809. static void
  2810. do_gvec_shifts(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i32 shift,
  2811. uint32_t oprsz, uint32_t maxsz, const GVecGen2sh *g)
  2812. {
  2813. TCGType type;
  2814. uint32_t some;
  2815. check_size_align(oprsz, maxsz, dofs | aofs);
  2816. check_overlap_2(dofs, aofs, maxsz);
  2817. /* If the backend has a scalar expansion, great. */
  2818. type = choose_vector_type(g->s_list, vece, oprsz, vece == MO_64);
  2819. if (type) {
  2820. const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
  2821. switch (type) {
  2822. case TCG_TYPE_V256:
  2823. some = QEMU_ALIGN_DOWN(oprsz, 32);
  2824. expand_2sh_vec(vece, dofs, aofs, some, 32,
  2825. TCG_TYPE_V256, shift, g->fniv_s);
  2826. if (some == oprsz) {
  2827. break;
  2828. }
  2829. dofs += some;
  2830. aofs += some;
  2831. oprsz -= some;
  2832. maxsz -= some;
  2833. /* fallthru */
  2834. case TCG_TYPE_V128:
  2835. expand_2sh_vec(vece, dofs, aofs, oprsz, 16,
  2836. TCG_TYPE_V128, shift, g->fniv_s);
  2837. break;
  2838. case TCG_TYPE_V64:
  2839. expand_2sh_vec(vece, dofs, aofs, oprsz, 8,
  2840. TCG_TYPE_V64, shift, g->fniv_s);
  2841. break;
  2842. default:
  2843. g_assert_not_reached();
  2844. }
  2845. tcg_swap_vecop_list(hold_list);
  2846. goto clear_tail;
  2847. }
  2848. /* If the backend supports variable vector shifts, also cool. */
  2849. type = choose_vector_type(g->v_list, vece, oprsz, vece == MO_64);
  2850. if (type) {
  2851. const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
  2852. TCGv_vec v_shift = tcg_temp_new_vec(type);
  2853. if (vece == MO_64) {
  2854. TCGv_i64 sh64 = tcg_temp_ebb_new_i64();
  2855. tcg_gen_extu_i32_i64(sh64, shift);
  2856. tcg_gen_dup_i64_vec(MO_64, v_shift, sh64);
  2857. tcg_temp_free_i64(sh64);
  2858. } else {
  2859. tcg_gen_dup_i32_vec(vece, v_shift, shift);
  2860. }
  2861. switch (type) {
  2862. case TCG_TYPE_V256:
  2863. some = QEMU_ALIGN_DOWN(oprsz, 32);
  2864. expand_2s_vec(vece, dofs, aofs, some, 32, TCG_TYPE_V256,
  2865. v_shift, false, g->fniv_v);
  2866. if (some == oprsz) {
  2867. break;
  2868. }
  2869. dofs += some;
  2870. aofs += some;
  2871. oprsz -= some;
  2872. maxsz -= some;
  2873. /* fallthru */
  2874. case TCG_TYPE_V128:
  2875. expand_2s_vec(vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128,
  2876. v_shift, false, g->fniv_v);
  2877. break;
  2878. case TCG_TYPE_V64:
  2879. expand_2s_vec(vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64,
  2880. v_shift, false, g->fniv_v);
  2881. break;
  2882. default:
  2883. g_assert_not_reached();
  2884. }
  2885. tcg_temp_free_vec(v_shift);
  2886. tcg_swap_vecop_list(hold_list);
  2887. goto clear_tail;
  2888. }
  2889. /* Otherwise fall back to integral... */
  2890. if (vece == MO_32 && check_size_impl(oprsz, 4)) {
  2891. expand_2s_i32(dofs, aofs, oprsz, shift, false, g->fni4);
  2892. } else if (vece == MO_64 && check_size_impl(oprsz, 8)) {
  2893. TCGv_i64 sh64 = tcg_temp_ebb_new_i64();
  2894. tcg_gen_extu_i32_i64(sh64, shift);
  2895. expand_2s_i64(dofs, aofs, oprsz, sh64, false, g->fni8);
  2896. tcg_temp_free_i64(sh64);
  2897. } else {
  2898. TCGv_ptr a0 = tcg_temp_ebb_new_ptr();
  2899. TCGv_ptr a1 = tcg_temp_ebb_new_ptr();
  2900. TCGv_i32 desc = tcg_temp_ebb_new_i32();
  2901. tcg_gen_shli_i32(desc, shift, SIMD_DATA_SHIFT);
  2902. tcg_gen_ori_i32(desc, desc, simd_desc(oprsz, maxsz, 0));
  2903. tcg_gen_addi_ptr(a0, tcg_env, dofs);
  2904. tcg_gen_addi_ptr(a1, tcg_env, aofs);
  2905. g->fno[vece](a0, a1, desc);
  2906. tcg_temp_free_ptr(a0);
  2907. tcg_temp_free_ptr(a1);
  2908. tcg_temp_free_i32(desc);
  2909. return;
  2910. }
  2911. clear_tail:
  2912. if (oprsz < maxsz) {
  2913. expand_clr(dofs + oprsz, maxsz - oprsz);
  2914. }
  2915. }
  2916. void tcg_gen_gvec_shls(unsigned vece, uint32_t dofs, uint32_t aofs,
  2917. TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz)
  2918. {
  2919. static const GVecGen2sh g = {
  2920. .fni4 = tcg_gen_shl_i32,
  2921. .fni8 = tcg_gen_shl_i64,
  2922. .fniv_s = tcg_gen_shls_vec,
  2923. .fniv_v = tcg_gen_shlv_vec,
  2924. .fno = {
  2925. gen_helper_gvec_shl8i,
  2926. gen_helper_gvec_shl16i,
  2927. gen_helper_gvec_shl32i,
  2928. gen_helper_gvec_shl64i,
  2929. },
  2930. .s_list = { INDEX_op_shls_vec, 0 },
  2931. .v_list = { INDEX_op_shlv_vec, 0 },
  2932. };
  2933. tcg_debug_assert(vece <= MO_64);
  2934. do_gvec_shifts(vece, dofs, aofs, shift, oprsz, maxsz, &g);
  2935. }
  2936. void tcg_gen_gvec_shrs(unsigned vece, uint32_t dofs, uint32_t aofs,
  2937. TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz)
  2938. {
  2939. static const GVecGen2sh g = {
  2940. .fni4 = tcg_gen_shr_i32,
  2941. .fni8 = tcg_gen_shr_i64,
  2942. .fniv_s = tcg_gen_shrs_vec,
  2943. .fniv_v = tcg_gen_shrv_vec,
  2944. .fno = {
  2945. gen_helper_gvec_shr8i,
  2946. gen_helper_gvec_shr16i,
  2947. gen_helper_gvec_shr32i,
  2948. gen_helper_gvec_shr64i,
  2949. },
  2950. .s_list = { INDEX_op_shrs_vec, 0 },
  2951. .v_list = { INDEX_op_shrv_vec, 0 },
  2952. };
  2953. tcg_debug_assert(vece <= MO_64);
  2954. do_gvec_shifts(vece, dofs, aofs, shift, oprsz, maxsz, &g);
  2955. }
  2956. void tcg_gen_gvec_sars(unsigned vece, uint32_t dofs, uint32_t aofs,
  2957. TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz)
  2958. {
  2959. static const GVecGen2sh g = {
  2960. .fni4 = tcg_gen_sar_i32,
  2961. .fni8 = tcg_gen_sar_i64,
  2962. .fniv_s = tcg_gen_sars_vec,
  2963. .fniv_v = tcg_gen_sarv_vec,
  2964. .fno = {
  2965. gen_helper_gvec_sar8i,
  2966. gen_helper_gvec_sar16i,
  2967. gen_helper_gvec_sar32i,
  2968. gen_helper_gvec_sar64i,
  2969. },
  2970. .s_list = { INDEX_op_sars_vec, 0 },
  2971. .v_list = { INDEX_op_sarv_vec, 0 },
  2972. };
  2973. tcg_debug_assert(vece <= MO_64);
  2974. do_gvec_shifts(vece, dofs, aofs, shift, oprsz, maxsz, &g);
  2975. }
  2976. void tcg_gen_gvec_rotls(unsigned vece, uint32_t dofs, uint32_t aofs,
  2977. TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz)
  2978. {
  2979. static const GVecGen2sh g = {
  2980. .fni4 = tcg_gen_rotl_i32,
  2981. .fni8 = tcg_gen_rotl_i64,
  2982. .fniv_s = tcg_gen_rotls_vec,
  2983. .fniv_v = tcg_gen_rotlv_vec,
  2984. .fno = {
  2985. gen_helper_gvec_rotl8i,
  2986. gen_helper_gvec_rotl16i,
  2987. gen_helper_gvec_rotl32i,
  2988. gen_helper_gvec_rotl64i,
  2989. },
  2990. .s_list = { INDEX_op_rotls_vec, 0 },
  2991. .v_list = { INDEX_op_rotlv_vec, 0 },
  2992. };
  2993. tcg_debug_assert(vece <= MO_64);
  2994. do_gvec_shifts(vece, dofs, aofs, shift, oprsz, maxsz, &g);
  2995. }
  2996. void tcg_gen_gvec_rotrs(unsigned vece, uint32_t dofs, uint32_t aofs,
  2997. TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz)
  2998. {
  2999. TCGv_i32 tmp = tcg_temp_ebb_new_i32();
  3000. tcg_gen_neg_i32(tmp, shift);
  3001. tcg_gen_andi_i32(tmp, tmp, (8 << vece) - 1);
  3002. tcg_gen_gvec_rotls(vece, dofs, aofs, tmp, oprsz, maxsz);
  3003. tcg_temp_free_i32(tmp);
  3004. }
  3005. /*
  3006. * Expand D = A << (B % element bits)
  3007. *
  3008. * Unlike scalar shifts, where it is easy for the target front end
  3009. * to include the modulo as part of the expansion. If the target
  3010. * naturally includes the modulo as part of the operation, great!
  3011. * If the target has some other behaviour from out-of-range shifts,
  3012. * then it could not use this function anyway, and would need to
  3013. * do it's own expansion with custom functions.
  3014. */
  3015. static void tcg_gen_shlv_mod_vec(unsigned vece, TCGv_vec d,
  3016. TCGv_vec a, TCGv_vec b)
  3017. {
  3018. TCGv_vec t = tcg_temp_new_vec_matching(d);
  3019. TCGv_vec m = tcg_constant_vec_matching(d, vece, (8 << vece) - 1);
  3020. tcg_gen_and_vec(vece, t, b, m);
  3021. tcg_gen_shlv_vec(vece, d, a, t);
  3022. tcg_temp_free_vec(t);
  3023. }
  3024. static void tcg_gen_shl_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
  3025. {
  3026. TCGv_i32 t = tcg_temp_ebb_new_i32();
  3027. tcg_gen_andi_i32(t, b, 31);
  3028. tcg_gen_shl_i32(d, a, t);
  3029. tcg_temp_free_i32(t);
  3030. }
  3031. static void tcg_gen_shl_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
  3032. {
  3033. TCGv_i64 t = tcg_temp_ebb_new_i64();
  3034. tcg_gen_andi_i64(t, b, 63);
  3035. tcg_gen_shl_i64(d, a, t);
  3036. tcg_temp_free_i64(t);
  3037. }
  3038. void tcg_gen_gvec_shlv(unsigned vece, uint32_t dofs, uint32_t aofs,
  3039. uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
  3040. {
  3041. static const TCGOpcode vecop_list[] = { INDEX_op_shlv_vec, 0 };
  3042. static const GVecGen3 g[4] = {
  3043. { .fniv = tcg_gen_shlv_mod_vec,
  3044. .fno = gen_helper_gvec_shl8v,
  3045. .opt_opc = vecop_list,
  3046. .vece = MO_8 },
  3047. { .fniv = tcg_gen_shlv_mod_vec,
  3048. .fno = gen_helper_gvec_shl16v,
  3049. .opt_opc = vecop_list,
  3050. .vece = MO_16 },
  3051. { .fni4 = tcg_gen_shl_mod_i32,
  3052. .fniv = tcg_gen_shlv_mod_vec,
  3053. .fno = gen_helper_gvec_shl32v,
  3054. .opt_opc = vecop_list,
  3055. .vece = MO_32 },
  3056. { .fni8 = tcg_gen_shl_mod_i64,
  3057. .fniv = tcg_gen_shlv_mod_vec,
  3058. .fno = gen_helper_gvec_shl64v,
  3059. .opt_opc = vecop_list,
  3060. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  3061. .vece = MO_64 },
  3062. };
  3063. tcg_debug_assert(vece <= MO_64);
  3064. tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
  3065. }
  3066. /*
  3067. * Similarly for logical right shifts.
  3068. */
  3069. static void tcg_gen_shrv_mod_vec(unsigned vece, TCGv_vec d,
  3070. TCGv_vec a, TCGv_vec b)
  3071. {
  3072. TCGv_vec t = tcg_temp_new_vec_matching(d);
  3073. TCGv_vec m = tcg_constant_vec_matching(d, vece, (8 << vece) - 1);
  3074. tcg_gen_and_vec(vece, t, b, m);
  3075. tcg_gen_shrv_vec(vece, d, a, t);
  3076. tcg_temp_free_vec(t);
  3077. }
  3078. static void tcg_gen_shr_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
  3079. {
  3080. TCGv_i32 t = tcg_temp_ebb_new_i32();
  3081. tcg_gen_andi_i32(t, b, 31);
  3082. tcg_gen_shr_i32(d, a, t);
  3083. tcg_temp_free_i32(t);
  3084. }
  3085. static void tcg_gen_shr_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
  3086. {
  3087. TCGv_i64 t = tcg_temp_ebb_new_i64();
  3088. tcg_gen_andi_i64(t, b, 63);
  3089. tcg_gen_shr_i64(d, a, t);
  3090. tcg_temp_free_i64(t);
  3091. }
  3092. void tcg_gen_gvec_shrv(unsigned vece, uint32_t dofs, uint32_t aofs,
  3093. uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
  3094. {
  3095. static const TCGOpcode vecop_list[] = { INDEX_op_shrv_vec, 0 };
  3096. static const GVecGen3 g[4] = {
  3097. { .fniv = tcg_gen_shrv_mod_vec,
  3098. .fno = gen_helper_gvec_shr8v,
  3099. .opt_opc = vecop_list,
  3100. .vece = MO_8 },
  3101. { .fniv = tcg_gen_shrv_mod_vec,
  3102. .fno = gen_helper_gvec_shr16v,
  3103. .opt_opc = vecop_list,
  3104. .vece = MO_16 },
  3105. { .fni4 = tcg_gen_shr_mod_i32,
  3106. .fniv = tcg_gen_shrv_mod_vec,
  3107. .fno = gen_helper_gvec_shr32v,
  3108. .opt_opc = vecop_list,
  3109. .vece = MO_32 },
  3110. { .fni8 = tcg_gen_shr_mod_i64,
  3111. .fniv = tcg_gen_shrv_mod_vec,
  3112. .fno = gen_helper_gvec_shr64v,
  3113. .opt_opc = vecop_list,
  3114. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  3115. .vece = MO_64 },
  3116. };
  3117. tcg_debug_assert(vece <= MO_64);
  3118. tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
  3119. }
  3120. /*
  3121. * Similarly for arithmetic right shifts.
  3122. */
  3123. static void tcg_gen_sarv_mod_vec(unsigned vece, TCGv_vec d,
  3124. TCGv_vec a, TCGv_vec b)
  3125. {
  3126. TCGv_vec t = tcg_temp_new_vec_matching(d);
  3127. TCGv_vec m = tcg_constant_vec_matching(d, vece, (8 << vece) - 1);
  3128. tcg_gen_and_vec(vece, t, b, m);
  3129. tcg_gen_sarv_vec(vece, d, a, t);
  3130. tcg_temp_free_vec(t);
  3131. }
  3132. static void tcg_gen_sar_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
  3133. {
  3134. TCGv_i32 t = tcg_temp_ebb_new_i32();
  3135. tcg_gen_andi_i32(t, b, 31);
  3136. tcg_gen_sar_i32(d, a, t);
  3137. tcg_temp_free_i32(t);
  3138. }
  3139. static void tcg_gen_sar_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
  3140. {
  3141. TCGv_i64 t = tcg_temp_ebb_new_i64();
  3142. tcg_gen_andi_i64(t, b, 63);
  3143. tcg_gen_sar_i64(d, a, t);
  3144. tcg_temp_free_i64(t);
  3145. }
  3146. void tcg_gen_gvec_sarv(unsigned vece, uint32_t dofs, uint32_t aofs,
  3147. uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
  3148. {
  3149. static const TCGOpcode vecop_list[] = { INDEX_op_sarv_vec, 0 };
  3150. static const GVecGen3 g[4] = {
  3151. { .fniv = tcg_gen_sarv_mod_vec,
  3152. .fno = gen_helper_gvec_sar8v,
  3153. .opt_opc = vecop_list,
  3154. .vece = MO_8 },
  3155. { .fniv = tcg_gen_sarv_mod_vec,
  3156. .fno = gen_helper_gvec_sar16v,
  3157. .opt_opc = vecop_list,
  3158. .vece = MO_16 },
  3159. { .fni4 = tcg_gen_sar_mod_i32,
  3160. .fniv = tcg_gen_sarv_mod_vec,
  3161. .fno = gen_helper_gvec_sar32v,
  3162. .opt_opc = vecop_list,
  3163. .vece = MO_32 },
  3164. { .fni8 = tcg_gen_sar_mod_i64,
  3165. .fniv = tcg_gen_sarv_mod_vec,
  3166. .fno = gen_helper_gvec_sar64v,
  3167. .opt_opc = vecop_list,
  3168. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  3169. .vece = MO_64 },
  3170. };
  3171. tcg_debug_assert(vece <= MO_64);
  3172. tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
  3173. }
  3174. /*
  3175. * Similarly for rotates.
  3176. */
  3177. static void tcg_gen_rotlv_mod_vec(unsigned vece, TCGv_vec d,
  3178. TCGv_vec a, TCGv_vec b)
  3179. {
  3180. TCGv_vec t = tcg_temp_new_vec_matching(d);
  3181. TCGv_vec m = tcg_constant_vec_matching(d, vece, (8 << vece) - 1);
  3182. tcg_gen_and_vec(vece, t, b, m);
  3183. tcg_gen_rotlv_vec(vece, d, a, t);
  3184. tcg_temp_free_vec(t);
  3185. }
  3186. static void tcg_gen_rotl_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
  3187. {
  3188. TCGv_i32 t = tcg_temp_ebb_new_i32();
  3189. tcg_gen_andi_i32(t, b, 31);
  3190. tcg_gen_rotl_i32(d, a, t);
  3191. tcg_temp_free_i32(t);
  3192. }
  3193. static void tcg_gen_rotl_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
  3194. {
  3195. TCGv_i64 t = tcg_temp_ebb_new_i64();
  3196. tcg_gen_andi_i64(t, b, 63);
  3197. tcg_gen_rotl_i64(d, a, t);
  3198. tcg_temp_free_i64(t);
  3199. }
  3200. void tcg_gen_gvec_rotlv(unsigned vece, uint32_t dofs, uint32_t aofs,
  3201. uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
  3202. {
  3203. static const TCGOpcode vecop_list[] = { INDEX_op_rotlv_vec, 0 };
  3204. static const GVecGen3 g[4] = {
  3205. { .fniv = tcg_gen_rotlv_mod_vec,
  3206. .fno = gen_helper_gvec_rotl8v,
  3207. .opt_opc = vecop_list,
  3208. .vece = MO_8 },
  3209. { .fniv = tcg_gen_rotlv_mod_vec,
  3210. .fno = gen_helper_gvec_rotl16v,
  3211. .opt_opc = vecop_list,
  3212. .vece = MO_16 },
  3213. { .fni4 = tcg_gen_rotl_mod_i32,
  3214. .fniv = tcg_gen_rotlv_mod_vec,
  3215. .fno = gen_helper_gvec_rotl32v,
  3216. .opt_opc = vecop_list,
  3217. .vece = MO_32 },
  3218. { .fni8 = tcg_gen_rotl_mod_i64,
  3219. .fniv = tcg_gen_rotlv_mod_vec,
  3220. .fno = gen_helper_gvec_rotl64v,
  3221. .opt_opc = vecop_list,
  3222. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  3223. .vece = MO_64 },
  3224. };
  3225. tcg_debug_assert(vece <= MO_64);
  3226. tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
  3227. }
  3228. static void tcg_gen_rotrv_mod_vec(unsigned vece, TCGv_vec d,
  3229. TCGv_vec a, TCGv_vec b)
  3230. {
  3231. TCGv_vec t = tcg_temp_new_vec_matching(d);
  3232. TCGv_vec m = tcg_constant_vec_matching(d, vece, (8 << vece) - 1);
  3233. tcg_gen_and_vec(vece, t, b, m);
  3234. tcg_gen_rotrv_vec(vece, d, a, t);
  3235. tcg_temp_free_vec(t);
  3236. }
  3237. static void tcg_gen_rotr_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
  3238. {
  3239. TCGv_i32 t = tcg_temp_ebb_new_i32();
  3240. tcg_gen_andi_i32(t, b, 31);
  3241. tcg_gen_rotr_i32(d, a, t);
  3242. tcg_temp_free_i32(t);
  3243. }
  3244. static void tcg_gen_rotr_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
  3245. {
  3246. TCGv_i64 t = tcg_temp_ebb_new_i64();
  3247. tcg_gen_andi_i64(t, b, 63);
  3248. tcg_gen_rotr_i64(d, a, t);
  3249. tcg_temp_free_i64(t);
  3250. }
  3251. void tcg_gen_gvec_rotrv(unsigned vece, uint32_t dofs, uint32_t aofs,
  3252. uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
  3253. {
  3254. static const TCGOpcode vecop_list[] = { INDEX_op_rotrv_vec, 0 };
  3255. static const GVecGen3 g[4] = {
  3256. { .fniv = tcg_gen_rotrv_mod_vec,
  3257. .fno = gen_helper_gvec_rotr8v,
  3258. .opt_opc = vecop_list,
  3259. .vece = MO_8 },
  3260. { .fniv = tcg_gen_rotrv_mod_vec,
  3261. .fno = gen_helper_gvec_rotr16v,
  3262. .opt_opc = vecop_list,
  3263. .vece = MO_16 },
  3264. { .fni4 = tcg_gen_rotr_mod_i32,
  3265. .fniv = tcg_gen_rotrv_mod_vec,
  3266. .fno = gen_helper_gvec_rotr32v,
  3267. .opt_opc = vecop_list,
  3268. .vece = MO_32 },
  3269. { .fni8 = tcg_gen_rotr_mod_i64,
  3270. .fniv = tcg_gen_rotrv_mod_vec,
  3271. .fno = gen_helper_gvec_rotr64v,
  3272. .opt_opc = vecop_list,
  3273. .prefer_i64 = TCG_TARGET_REG_BITS == 64,
  3274. .vece = MO_64 },
  3275. };
  3276. tcg_debug_assert(vece <= MO_64);
  3277. tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
  3278. }
  3279. /* Expand OPSZ bytes worth of three-operand operations using i32 elements. */
  3280. static void expand_cmp_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs,
  3281. uint32_t oprsz, TCGCond cond)
  3282. {
  3283. TCGv_i32 t0 = tcg_temp_ebb_new_i32();
  3284. TCGv_i32 t1 = tcg_temp_ebb_new_i32();
  3285. uint32_t i;
  3286. for (i = 0; i < oprsz; i += 4) {
  3287. tcg_gen_ld_i32(t0, tcg_env, aofs + i);
  3288. tcg_gen_ld_i32(t1, tcg_env, bofs + i);
  3289. tcg_gen_negsetcond_i32(cond, t0, t0, t1);
  3290. tcg_gen_st_i32(t0, tcg_env, dofs + i);
  3291. }
  3292. tcg_temp_free_i32(t1);
  3293. tcg_temp_free_i32(t0);
  3294. }
  3295. static void expand_cmp_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs,
  3296. uint32_t oprsz, TCGCond cond)
  3297. {
  3298. TCGv_i64 t0 = tcg_temp_ebb_new_i64();
  3299. TCGv_i64 t1 = tcg_temp_ebb_new_i64();
  3300. uint32_t i;
  3301. for (i = 0; i < oprsz; i += 8) {
  3302. tcg_gen_ld_i64(t0, tcg_env, aofs + i);
  3303. tcg_gen_ld_i64(t1, tcg_env, bofs + i);
  3304. tcg_gen_negsetcond_i64(cond, t0, t0, t1);
  3305. tcg_gen_st_i64(t0, tcg_env, dofs + i);
  3306. }
  3307. tcg_temp_free_i64(t1);
  3308. tcg_temp_free_i64(t0);
  3309. }
  3310. static void expand_cmp_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
  3311. uint32_t bofs, uint32_t oprsz, uint32_t tysz,
  3312. TCGType type, TCGCond cond)
  3313. {
  3314. for (uint32_t i = 0; i < oprsz; i += tysz) {
  3315. TCGv_vec t0 = tcg_temp_new_vec(type);
  3316. TCGv_vec t1 = tcg_temp_new_vec(type);
  3317. TCGv_vec t2 = tcg_temp_new_vec(type);
  3318. tcg_gen_ld_vec(t0, tcg_env, aofs + i);
  3319. tcg_gen_ld_vec(t1, tcg_env, bofs + i);
  3320. tcg_gen_cmp_vec(cond, vece, t2, t0, t1);
  3321. tcg_gen_st_vec(t2, tcg_env, dofs + i);
  3322. }
  3323. }
  3324. void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,
  3325. uint32_t aofs, uint32_t bofs,
  3326. uint32_t oprsz, uint32_t maxsz)
  3327. {
  3328. static const TCGOpcode cmp_list[] = { INDEX_op_cmp_vec, 0 };
  3329. static gen_helper_gvec_3 * const eq_fn[4] = {
  3330. gen_helper_gvec_eq8, gen_helper_gvec_eq16,
  3331. gen_helper_gvec_eq32, gen_helper_gvec_eq64
  3332. };
  3333. static gen_helper_gvec_3 * const ne_fn[4] = {
  3334. gen_helper_gvec_ne8, gen_helper_gvec_ne16,
  3335. gen_helper_gvec_ne32, gen_helper_gvec_ne64
  3336. };
  3337. static gen_helper_gvec_3 * const lt_fn[4] = {
  3338. gen_helper_gvec_lt8, gen_helper_gvec_lt16,
  3339. gen_helper_gvec_lt32, gen_helper_gvec_lt64
  3340. };
  3341. static gen_helper_gvec_3 * const le_fn[4] = {
  3342. gen_helper_gvec_le8, gen_helper_gvec_le16,
  3343. gen_helper_gvec_le32, gen_helper_gvec_le64
  3344. };
  3345. static gen_helper_gvec_3 * const ltu_fn[4] = {
  3346. gen_helper_gvec_ltu8, gen_helper_gvec_ltu16,
  3347. gen_helper_gvec_ltu32, gen_helper_gvec_ltu64
  3348. };
  3349. static gen_helper_gvec_3 * const leu_fn[4] = {
  3350. gen_helper_gvec_leu8, gen_helper_gvec_leu16,
  3351. gen_helper_gvec_leu32, gen_helper_gvec_leu64
  3352. };
  3353. static gen_helper_gvec_3 * const * const fns[16] = {
  3354. [TCG_COND_EQ] = eq_fn,
  3355. [TCG_COND_NE] = ne_fn,
  3356. [TCG_COND_LT] = lt_fn,
  3357. [TCG_COND_LE] = le_fn,
  3358. [TCG_COND_LTU] = ltu_fn,
  3359. [TCG_COND_LEU] = leu_fn,
  3360. };
  3361. const TCGOpcode *hold_list;
  3362. TCGType type;
  3363. uint32_t some;
  3364. check_size_align(oprsz, maxsz, dofs | aofs | bofs);
  3365. check_overlap_3(dofs, aofs, bofs, maxsz);
  3366. if (cond == TCG_COND_NEVER || cond == TCG_COND_ALWAYS) {
  3367. do_dup(MO_8, dofs, oprsz, maxsz,
  3368. NULL, NULL, -(cond == TCG_COND_ALWAYS));
  3369. return;
  3370. }
  3371. /*
  3372. * Implement inline with a vector type, if possible.
  3373. * Prefer integer when 64-bit host and 64-bit comparison.
  3374. */
  3375. hold_list = tcg_swap_vecop_list(cmp_list);
  3376. type = choose_vector_type(cmp_list, vece, oprsz,
  3377. TCG_TARGET_REG_BITS == 64 && vece == MO_64);
  3378. switch (type) {
  3379. case TCG_TYPE_V256:
  3380. /* Recall that ARM SVE allows vector sizes that are not a
  3381. * power of 2, but always a multiple of 16. The intent is
  3382. * that e.g. size == 80 would be expanded with 2x32 + 1x16.
  3383. */
  3384. some = QEMU_ALIGN_DOWN(oprsz, 32);
  3385. expand_cmp_vec(vece, dofs, aofs, bofs, some, 32, TCG_TYPE_V256, cond);
  3386. if (some == oprsz) {
  3387. break;
  3388. }
  3389. dofs += some;
  3390. aofs += some;
  3391. bofs += some;
  3392. oprsz -= some;
  3393. maxsz -= some;
  3394. /* fallthru */
  3395. case TCG_TYPE_V128:
  3396. expand_cmp_vec(vece, dofs, aofs, bofs, oprsz, 16, TCG_TYPE_V128, cond);
  3397. break;
  3398. case TCG_TYPE_V64:
  3399. expand_cmp_vec(vece, dofs, aofs, bofs, oprsz, 8, TCG_TYPE_V64, cond);
  3400. break;
  3401. case 0:
  3402. if (vece == MO_64 && check_size_impl(oprsz, 8)) {
  3403. expand_cmp_i64(dofs, aofs, bofs, oprsz, cond);
  3404. } else if (vece == MO_32 && check_size_impl(oprsz, 4)) {
  3405. expand_cmp_i32(dofs, aofs, bofs, oprsz, cond);
  3406. } else {
  3407. gen_helper_gvec_3 * const *fn = fns[cond];
  3408. if (fn == NULL) {
  3409. uint32_t tmp;
  3410. tmp = aofs, aofs = bofs, bofs = tmp;
  3411. cond = tcg_swap_cond(cond);
  3412. fn = fns[cond];
  3413. assert(fn != NULL);
  3414. }
  3415. tcg_gen_gvec_3_ool(dofs, aofs, bofs, oprsz, maxsz, 0, fn[vece]);
  3416. oprsz = maxsz;
  3417. }
  3418. break;
  3419. default:
  3420. g_assert_not_reached();
  3421. }
  3422. tcg_swap_vecop_list(hold_list);
  3423. if (oprsz < maxsz) {
  3424. expand_clr(dofs + oprsz, maxsz - oprsz);
  3425. }
  3426. }
  3427. static void expand_cmps_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
  3428. uint32_t oprsz, uint32_t tysz, TCGType type,
  3429. TCGCond cond, TCGv_vec c)
  3430. {
  3431. TCGv_vec t0 = tcg_temp_new_vec(type);
  3432. TCGv_vec t1 = tcg_temp_new_vec(type);
  3433. uint32_t i;
  3434. for (i = 0; i < oprsz; i += tysz) {
  3435. tcg_gen_ld_vec(t1, tcg_env, aofs + i);
  3436. tcg_gen_cmp_vec(cond, vece, t0, t1, c);
  3437. tcg_gen_st_vec(t0, tcg_env, dofs + i);
  3438. }
  3439. }
  3440. void tcg_gen_gvec_cmps(TCGCond cond, unsigned vece, uint32_t dofs,
  3441. uint32_t aofs, TCGv_i64 c,
  3442. uint32_t oprsz, uint32_t maxsz)
  3443. {
  3444. static const TCGOpcode cmp_list[] = { INDEX_op_cmp_vec, 0 };
  3445. static gen_helper_gvec_2i * const eq_fn[4] = {
  3446. gen_helper_gvec_eqs8, gen_helper_gvec_eqs16,
  3447. gen_helper_gvec_eqs32, gen_helper_gvec_eqs64
  3448. };
  3449. static gen_helper_gvec_2i * const lt_fn[4] = {
  3450. gen_helper_gvec_lts8, gen_helper_gvec_lts16,
  3451. gen_helper_gvec_lts32, gen_helper_gvec_lts64
  3452. };
  3453. static gen_helper_gvec_2i * const le_fn[4] = {
  3454. gen_helper_gvec_les8, gen_helper_gvec_les16,
  3455. gen_helper_gvec_les32, gen_helper_gvec_les64
  3456. };
  3457. static gen_helper_gvec_2i * const ltu_fn[4] = {
  3458. gen_helper_gvec_ltus8, gen_helper_gvec_ltus16,
  3459. gen_helper_gvec_ltus32, gen_helper_gvec_ltus64
  3460. };
  3461. static gen_helper_gvec_2i * const leu_fn[4] = {
  3462. gen_helper_gvec_leus8, gen_helper_gvec_leus16,
  3463. gen_helper_gvec_leus32, gen_helper_gvec_leus64
  3464. };
  3465. static gen_helper_gvec_2i * const * const fns[16] = {
  3466. [TCG_COND_EQ] = eq_fn,
  3467. [TCG_COND_LT] = lt_fn,
  3468. [TCG_COND_LE] = le_fn,
  3469. [TCG_COND_LTU] = ltu_fn,
  3470. [TCG_COND_LEU] = leu_fn,
  3471. };
  3472. TCGType type;
  3473. check_size_align(oprsz, maxsz, dofs | aofs);
  3474. check_overlap_2(dofs, aofs, maxsz);
  3475. if (cond == TCG_COND_NEVER || cond == TCG_COND_ALWAYS) {
  3476. do_dup(MO_8, dofs, oprsz, maxsz,
  3477. NULL, NULL, -(cond == TCG_COND_ALWAYS));
  3478. return;
  3479. }
  3480. /*
  3481. * Implement inline with a vector type, if possible.
  3482. * Prefer integer when 64-bit host and 64-bit comparison.
  3483. */
  3484. type = choose_vector_type(cmp_list, vece, oprsz,
  3485. TCG_TARGET_REG_BITS == 64 && vece == MO_64);
  3486. if (type != 0) {
  3487. const TCGOpcode *hold_list = tcg_swap_vecop_list(cmp_list);
  3488. TCGv_vec t_vec = tcg_temp_new_vec(type);
  3489. uint32_t some;
  3490. tcg_gen_dup_i64_vec(vece, t_vec, c);
  3491. switch (type) {
  3492. case TCG_TYPE_V256:
  3493. some = QEMU_ALIGN_DOWN(oprsz, 32);
  3494. expand_cmps_vec(vece, dofs, aofs, some, 32,
  3495. TCG_TYPE_V256, cond, t_vec);
  3496. aofs += some;
  3497. dofs += some;
  3498. oprsz -= some;
  3499. maxsz -= some;
  3500. /* fallthru */
  3501. case TCG_TYPE_V128:
  3502. some = QEMU_ALIGN_DOWN(oprsz, 16);
  3503. expand_cmps_vec(vece, dofs, aofs, some, 16,
  3504. TCG_TYPE_V128, cond, t_vec);
  3505. break;
  3506. case TCG_TYPE_V64:
  3507. some = QEMU_ALIGN_DOWN(oprsz, 8);
  3508. expand_cmps_vec(vece, dofs, aofs, some, 8,
  3509. TCG_TYPE_V64, cond, t_vec);
  3510. break;
  3511. default:
  3512. g_assert_not_reached();
  3513. }
  3514. tcg_temp_free_vec(t_vec);
  3515. tcg_swap_vecop_list(hold_list);
  3516. } else if (vece == MO_64 && check_size_impl(oprsz, 8)) {
  3517. TCGv_i64 t0 = tcg_temp_ebb_new_i64();
  3518. uint32_t i;
  3519. for (i = 0; i < oprsz; i += 8) {
  3520. tcg_gen_ld_i64(t0, tcg_env, aofs + i);
  3521. tcg_gen_negsetcond_i64(cond, t0, t0, c);
  3522. tcg_gen_st_i64(t0, tcg_env, dofs + i);
  3523. }
  3524. tcg_temp_free_i64(t0);
  3525. } else if (vece == MO_32 && check_size_impl(oprsz, 4)) {
  3526. TCGv_i32 t0 = tcg_temp_ebb_new_i32();
  3527. TCGv_i32 t1 = tcg_temp_ebb_new_i32();
  3528. uint32_t i;
  3529. tcg_gen_extrl_i64_i32(t1, c);
  3530. for (i = 0; i < oprsz; i += 4) {
  3531. tcg_gen_ld_i32(t0, tcg_env, aofs + i);
  3532. tcg_gen_negsetcond_i32(cond, t0, t0, t1);
  3533. tcg_gen_st_i32(t0, tcg_env, dofs + i);
  3534. }
  3535. tcg_temp_free_i32(t0);
  3536. tcg_temp_free_i32(t1);
  3537. } else {
  3538. gen_helper_gvec_2i * const *fn = fns[cond];
  3539. bool inv = false;
  3540. if (fn == NULL) {
  3541. cond = tcg_invert_cond(cond);
  3542. fn = fns[cond];
  3543. assert(fn != NULL);
  3544. inv = true;
  3545. }
  3546. tcg_gen_gvec_2i_ool(dofs, aofs, c, oprsz, maxsz, inv, fn[vece]);
  3547. return;
  3548. }
  3549. if (oprsz < maxsz) {
  3550. expand_clr(dofs + oprsz, maxsz - oprsz);
  3551. }
  3552. }
  3553. void tcg_gen_gvec_cmpi(TCGCond cond, unsigned vece, uint32_t dofs,
  3554. uint32_t aofs, int64_t c,
  3555. uint32_t oprsz, uint32_t maxsz)
  3556. {
  3557. TCGv_i64 tmp = tcg_constant_i64(c);
  3558. tcg_gen_gvec_cmps(cond, vece, dofs, aofs, tmp, oprsz, maxsz);
  3559. }
  3560. static void tcg_gen_bitsel_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 c)
  3561. {
  3562. TCGv_i64 t = tcg_temp_ebb_new_i64();
  3563. tcg_gen_and_i64(t, b, a);
  3564. tcg_gen_andc_i64(d, c, a);
  3565. tcg_gen_or_i64(d, d, t);
  3566. tcg_temp_free_i64(t);
  3567. }
  3568. void tcg_gen_gvec_bitsel(unsigned vece, uint32_t dofs, uint32_t aofs,
  3569. uint32_t bofs, uint32_t cofs,
  3570. uint32_t oprsz, uint32_t maxsz)
  3571. {
  3572. static const GVecGen4 g = {
  3573. .fni8 = tcg_gen_bitsel_i64,
  3574. .fniv = tcg_gen_bitsel_vec,
  3575. .fno = gen_helper_gvec_bitsel,
  3576. };
  3577. tcg_gen_gvec_4(dofs, aofs, bofs, cofs, oprsz, maxsz, &g);
  3578. }