ARMDisassemblerCore.cpp 126 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821
  1. //===- ARMDisassemblerCore.cpp - ARM disassembler helpers -------*- C++ -*-===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This file is part of the ARM Disassembler.
  11. // It contains code to represent the core concepts of Builder and DisassembleFP
  12. // to solve the problem of disassembling an ARM instr.
  13. //
  14. //===----------------------------------------------------------------------===//
  15. #define DEBUG_TYPE "arm-disassembler"
  16. #include "ARMDisassemblerCore.h"
  17. #include "ARMAddressingModes.h"
  18. #include "ARMMCExpr.h"
  19. #include "llvm/Support/Debug.h"
  20. #include "llvm/Support/raw_ostream.h"
  21. //#define DEBUG(X) do { X; } while (0)
  22. /// ARMGenInstrInfo.inc - ARMGenInstrInfo.inc contains the static const
  23. /// MCInstrDesc ARMInsts[] definition and the MCOperandInfo[]'s describing the
  24. /// operand info for each ARMInsts[i].
  25. ///
  26. /// Together with an instruction's encoding format, we can take advantage of the
  27. /// NumOperands and the OpInfo fields of the target instruction description in
  28. /// the quest to build out the MCOperand list for an MCInst.
  29. ///
  30. /// The general guideline is that with a known format, the number of dst and src
  31. /// operands are well-known. The dst is built first, followed by the src
  32. /// operand(s). The operands not yet used at this point are for the Implicit
  33. /// Uses and Defs by this instr. For the Uses part, the pred:$p operand is
  34. /// defined with two components:
  35. ///
  36. /// def pred { // Operand PredicateOperand
  37. /// ValueType Type = OtherVT;
  38. /// string PrintMethod = "printPredicateOperand";
  39. /// string AsmOperandLowerMethod = ?;
  40. /// dag MIOperandInfo = (ops i32imm, CCR);
  41. /// AsmOperandClass ParserMatchClass = ImmAsmOperand;
  42. /// dag DefaultOps = (ops (i32 14), (i32 zero_reg));
  43. /// }
  44. ///
  45. /// which is manifested by the MCOperandInfo[] of:
  46. ///
  47. /// { 0, 0|(1<<MCOI::Predicate), 0 },
  48. /// { ARM::CCRRegClassID, 0|(1<<MCOI::Predicate), 0 }
  49. ///
  50. /// So the first predicate MCOperand corresponds to the immediate part of the
  51. /// ARM condition field (Inst{31-28}), and the second predicate MCOperand
  52. /// corresponds to a register kind of ARM::CPSR.
  53. ///
  54. /// For the Defs part, in the simple case of only cc_out:$s, we have:
  55. ///
  56. /// def cc_out { // Operand OptionalDefOperand
  57. /// ValueType Type = OtherVT;
  58. /// string PrintMethod = "printSBitModifierOperand";
  59. /// string AsmOperandLowerMethod = ?;
  60. /// dag MIOperandInfo = (ops CCR);
  61. /// AsmOperandClass ParserMatchClass = ImmAsmOperand;
  62. /// dag DefaultOps = (ops (i32 zero_reg));
  63. /// }
  64. ///
  65. /// which is manifested by the one MCOperandInfo of:
  66. ///
  67. /// { ARM::CCRRegClassID, 0|(1<<MCOI::OptionalDef), 0 }
  68. ///
  69. /// And this maps to one MCOperand with the regsiter kind of ARM::CPSR.
  70. #include "ARMGenInstrInfo.inc"
  71. using namespace llvm;
  72. const char *ARMUtils::OpcodeName(unsigned Opcode) {
  73. return ARMInsts[Opcode].Name;
  74. }
  75. // Return the register enum Based on RegClass and the raw register number.
  76. // FIXME: Auto-gened?
  77. static unsigned
  78. getRegisterEnum(BO B, unsigned RegClassID, unsigned RawRegister) {
  79. if (RegClassID == ARM::rGPRRegClassID) {
  80. // Check for The register numbers 13 and 15 that are not permitted for many
  81. // Thumb register specifiers.
  82. if (RawRegister == 13 || RawRegister == 15) {
  83. B->SetErr(-1);
  84. return 0;
  85. }
  86. // For this purpose, we can treat rGPR as if it were GPR.
  87. RegClassID = ARM::GPRRegClassID;
  88. }
  89. // See also decodeNEONRd(), decodeNEONRn(), decodeNEONRm().
  90. // A7.3 register encoding
  91. // Qd -> bit[12] == 0
  92. // Qn -> bit[16] == 0
  93. // Qm -> bit[0] == 0
  94. //
  95. // If one of these bits is 1, the instruction is UNDEFINED.
  96. if (RegClassID == ARM::QPRRegClassID && slice(RawRegister, 0, 0) == 1) {
  97. B->SetErr(-1);
  98. return 0;
  99. }
  100. unsigned RegNum =
  101. RegClassID == ARM::QPRRegClassID ? RawRegister >> 1 : RawRegister;
  102. switch (RegNum) {
  103. default:
  104. break;
  105. case 0:
  106. switch (RegClassID) {
  107. case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R0;
  108. case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
  109. case ARM::DPR_VFP2RegClassID:
  110. return ARM::D0;
  111. case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
  112. case ARM::QPR_VFP2RegClassID:
  113. return ARM::Q0;
  114. case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S0;
  115. }
  116. break;
  117. case 1:
  118. switch (RegClassID) {
  119. case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R1;
  120. case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
  121. case ARM::DPR_VFP2RegClassID:
  122. return ARM::D1;
  123. case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
  124. case ARM::QPR_VFP2RegClassID:
  125. return ARM::Q1;
  126. case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S1;
  127. }
  128. break;
  129. case 2:
  130. switch (RegClassID) {
  131. case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R2;
  132. case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
  133. case ARM::DPR_VFP2RegClassID:
  134. return ARM::D2;
  135. case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
  136. case ARM::QPR_VFP2RegClassID:
  137. return ARM::Q2;
  138. case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S2;
  139. }
  140. break;
  141. case 3:
  142. switch (RegClassID) {
  143. case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R3;
  144. case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
  145. case ARM::DPR_VFP2RegClassID:
  146. return ARM::D3;
  147. case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
  148. case ARM::QPR_VFP2RegClassID:
  149. return ARM::Q3;
  150. case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S3;
  151. }
  152. break;
  153. case 4:
  154. switch (RegClassID) {
  155. case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R4;
  156. case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
  157. case ARM::DPR_VFP2RegClassID:
  158. return ARM::D4;
  159. case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q4;
  160. case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S4;
  161. }
  162. break;
  163. case 5:
  164. switch (RegClassID) {
  165. case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R5;
  166. case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
  167. case ARM::DPR_VFP2RegClassID:
  168. return ARM::D5;
  169. case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q5;
  170. case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S5;
  171. }
  172. break;
  173. case 6:
  174. switch (RegClassID) {
  175. case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R6;
  176. case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
  177. case ARM::DPR_VFP2RegClassID:
  178. return ARM::D6;
  179. case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q6;
  180. case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S6;
  181. }
  182. break;
  183. case 7:
  184. switch (RegClassID) {
  185. case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R7;
  186. case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
  187. case ARM::DPR_VFP2RegClassID:
  188. return ARM::D7;
  189. case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q7;
  190. case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S7;
  191. }
  192. break;
  193. case 8:
  194. switch (RegClassID) {
  195. case ARM::GPRRegClassID: return ARM::R8;
  196. case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D8;
  197. case ARM::QPRRegClassID: return ARM::Q8;
  198. case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S8;
  199. }
  200. break;
  201. case 9:
  202. switch (RegClassID) {
  203. case ARM::GPRRegClassID: return ARM::R9;
  204. case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D9;
  205. case ARM::QPRRegClassID: return ARM::Q9;
  206. case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S9;
  207. }
  208. break;
  209. case 10:
  210. switch (RegClassID) {
  211. case ARM::GPRRegClassID: return ARM::R10;
  212. case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D10;
  213. case ARM::QPRRegClassID: return ARM::Q10;
  214. case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S10;
  215. }
  216. break;
  217. case 11:
  218. switch (RegClassID) {
  219. case ARM::GPRRegClassID: return ARM::R11;
  220. case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D11;
  221. case ARM::QPRRegClassID: return ARM::Q11;
  222. case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S11;
  223. }
  224. break;
  225. case 12:
  226. switch (RegClassID) {
  227. case ARM::GPRRegClassID: return ARM::R12;
  228. case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D12;
  229. case ARM::QPRRegClassID: return ARM::Q12;
  230. case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S12;
  231. }
  232. break;
  233. case 13:
  234. switch (RegClassID) {
  235. case ARM::GPRRegClassID: return ARM::SP;
  236. case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D13;
  237. case ARM::QPRRegClassID: return ARM::Q13;
  238. case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S13;
  239. }
  240. break;
  241. case 14:
  242. switch (RegClassID) {
  243. case ARM::GPRRegClassID: return ARM::LR;
  244. case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D14;
  245. case ARM::QPRRegClassID: return ARM::Q14;
  246. case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S14;
  247. }
  248. break;
  249. case 15:
  250. switch (RegClassID) {
  251. case ARM::GPRRegClassID: return ARM::PC;
  252. case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D15;
  253. case ARM::QPRRegClassID: return ARM::Q15;
  254. case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S15;
  255. }
  256. break;
  257. case 16:
  258. switch (RegClassID) {
  259. case ARM::DPRRegClassID: return ARM::D16;
  260. case ARM::SPRRegClassID: return ARM::S16;
  261. }
  262. break;
  263. case 17:
  264. switch (RegClassID) {
  265. case ARM::DPRRegClassID: return ARM::D17;
  266. case ARM::SPRRegClassID: return ARM::S17;
  267. }
  268. break;
  269. case 18:
  270. switch (RegClassID) {
  271. case ARM::DPRRegClassID: return ARM::D18;
  272. case ARM::SPRRegClassID: return ARM::S18;
  273. }
  274. break;
  275. case 19:
  276. switch (RegClassID) {
  277. case ARM::DPRRegClassID: return ARM::D19;
  278. case ARM::SPRRegClassID: return ARM::S19;
  279. }
  280. break;
  281. case 20:
  282. switch (RegClassID) {
  283. case ARM::DPRRegClassID: return ARM::D20;
  284. case ARM::SPRRegClassID: return ARM::S20;
  285. }
  286. break;
  287. case 21:
  288. switch (RegClassID) {
  289. case ARM::DPRRegClassID: return ARM::D21;
  290. case ARM::SPRRegClassID: return ARM::S21;
  291. }
  292. break;
  293. case 22:
  294. switch (RegClassID) {
  295. case ARM::DPRRegClassID: return ARM::D22;
  296. case ARM::SPRRegClassID: return ARM::S22;
  297. }
  298. break;
  299. case 23:
  300. switch (RegClassID) {
  301. case ARM::DPRRegClassID: return ARM::D23;
  302. case ARM::SPRRegClassID: return ARM::S23;
  303. }
  304. break;
  305. case 24:
  306. switch (RegClassID) {
  307. case ARM::DPRRegClassID: return ARM::D24;
  308. case ARM::SPRRegClassID: return ARM::S24;
  309. }
  310. break;
  311. case 25:
  312. switch (RegClassID) {
  313. case ARM::DPRRegClassID: return ARM::D25;
  314. case ARM::SPRRegClassID: return ARM::S25;
  315. }
  316. break;
  317. case 26:
  318. switch (RegClassID) {
  319. case ARM::DPRRegClassID: return ARM::D26;
  320. case ARM::SPRRegClassID: return ARM::S26;
  321. }
  322. break;
  323. case 27:
  324. switch (RegClassID) {
  325. case ARM::DPRRegClassID: return ARM::D27;
  326. case ARM::SPRRegClassID: return ARM::S27;
  327. }
  328. break;
  329. case 28:
  330. switch (RegClassID) {
  331. case ARM::DPRRegClassID: return ARM::D28;
  332. case ARM::SPRRegClassID: return ARM::S28;
  333. }
  334. break;
  335. case 29:
  336. switch (RegClassID) {
  337. case ARM::DPRRegClassID: return ARM::D29;
  338. case ARM::SPRRegClassID: return ARM::S29;
  339. }
  340. break;
  341. case 30:
  342. switch (RegClassID) {
  343. case ARM::DPRRegClassID: return ARM::D30;
  344. case ARM::SPRRegClassID: return ARM::S30;
  345. }
  346. break;
  347. case 31:
  348. switch (RegClassID) {
  349. case ARM::DPRRegClassID: return ARM::D31;
  350. case ARM::SPRRegClassID: return ARM::S31;
  351. }
  352. break;
  353. }
  354. DEBUG(errs() << "Invalid (RegClassID, RawRegister) combination\n");
  355. // Encoding error. Mark the builder with error code != 0.
  356. B->SetErr(-1);
  357. return 0;
  358. }
  359. ///////////////////////////////
  360. // //
  361. // Utility Functions //
  362. // //
  363. ///////////////////////////////
  364. // Extract/Decode Rd: Inst{15-12}.
  365. static inline unsigned decodeRd(uint32_t insn) {
  366. return (insn >> ARMII::RegRdShift) & ARMII::GPRRegMask;
  367. }
  368. // Extract/Decode Rn: Inst{19-16}.
  369. static inline unsigned decodeRn(uint32_t insn) {
  370. return (insn >> ARMII::RegRnShift) & ARMII::GPRRegMask;
  371. }
  372. // Extract/Decode Rm: Inst{3-0}.
  373. static inline unsigned decodeRm(uint32_t insn) {
  374. return (insn & ARMII::GPRRegMask);
  375. }
  376. // Extract/Decode Rs: Inst{11-8}.
  377. static inline unsigned decodeRs(uint32_t insn) {
  378. return (insn >> ARMII::RegRsShift) & ARMII::GPRRegMask;
  379. }
  380. static inline unsigned getCondField(uint32_t insn) {
  381. return (insn >> ARMII::CondShift);
  382. }
  383. static inline unsigned getIBit(uint32_t insn) {
  384. return (insn >> ARMII::I_BitShift) & 1;
  385. }
  386. static inline unsigned getAM3IBit(uint32_t insn) {
  387. return (insn >> ARMII::AM3_I_BitShift) & 1;
  388. }
  389. static inline unsigned getPBit(uint32_t insn) {
  390. return (insn >> ARMII::P_BitShift) & 1;
  391. }
  392. static inline unsigned getUBit(uint32_t insn) {
  393. return (insn >> ARMII::U_BitShift) & 1;
  394. }
  395. static inline unsigned getPUBits(uint32_t insn) {
  396. return (insn >> ARMII::U_BitShift) & 3;
  397. }
  398. static inline unsigned getSBit(uint32_t insn) {
  399. return (insn >> ARMII::S_BitShift) & 1;
  400. }
  401. static inline unsigned getWBit(uint32_t insn) {
  402. return (insn >> ARMII::W_BitShift) & 1;
  403. }
  404. static inline unsigned getDBit(uint32_t insn) {
  405. return (insn >> ARMII::D_BitShift) & 1;
  406. }
  407. static inline unsigned getNBit(uint32_t insn) {
  408. return (insn >> ARMII::N_BitShift) & 1;
  409. }
  410. static inline unsigned getMBit(uint32_t insn) {
  411. return (insn >> ARMII::M_BitShift) & 1;
  412. }
  413. // See A8.4 Shifts applied to a register.
  414. // A8.4.2 Register controlled shifts.
  415. //
  416. // getShiftOpcForBits - getShiftOpcForBits translates from the ARM encoding bits
  417. // into llvm enums for shift opcode. The API clients should pass in the value
  418. // encoded with two bits, so the assert stays to signal a wrong API usage.
  419. //
  420. // A8-12: DecodeRegShift()
  421. static inline ARM_AM::ShiftOpc getShiftOpcForBits(unsigned bits) {
  422. switch (bits) {
  423. default: assert(0 && "No such value"); return ARM_AM::no_shift;
  424. case 0: return ARM_AM::lsl;
  425. case 1: return ARM_AM::lsr;
  426. case 2: return ARM_AM::asr;
  427. case 3: return ARM_AM::ror;
  428. }
  429. }
  430. // See A8.4 Shifts applied to a register.
  431. // A8.4.1 Constant shifts.
  432. //
  433. // getImmShiftSE - getImmShiftSE translates from the raw ShiftOpc and raw Imm5
  434. // encodings into the intended ShiftOpc and shift amount.
  435. //
  436. // A8-11: DecodeImmShift()
  437. static inline void getImmShiftSE(ARM_AM::ShiftOpc &ShOp, unsigned &ShImm) {
  438. if (ShImm != 0)
  439. return;
  440. switch (ShOp) {
  441. case ARM_AM::no_shift:
  442. case ARM_AM::rrx:
  443. break;
  444. case ARM_AM::lsl:
  445. ShOp = ARM_AM::no_shift;
  446. break;
  447. case ARM_AM::lsr:
  448. case ARM_AM::asr:
  449. ShImm = 32;
  450. break;
  451. case ARM_AM::ror:
  452. ShOp = ARM_AM::rrx;
  453. break;
  454. }
  455. }
  456. // getAMSubModeForBits - getAMSubModeForBits translates from the ARM encoding
  457. // bits Inst{24-23} (P(24) and U(23)) into llvm enums for AMSubMode. The API
  458. // clients should pass in the value encoded with two bits, so the assert stays
  459. // to signal a wrong API usage.
  460. static inline ARM_AM::AMSubMode getAMSubModeForBits(unsigned bits) {
  461. switch (bits) {
  462. default: assert(0 && "No such value"); return ARM_AM::bad_am_submode;
  463. case 1: return ARM_AM::ia; // P=0 U=1
  464. case 3: return ARM_AM::ib; // P=1 U=1
  465. case 0: return ARM_AM::da; // P=0 U=0
  466. case 2: return ARM_AM::db; // P=1 U=0
  467. }
  468. }
  469. ////////////////////////////////////////////
  470. // //
  471. // Disassemble function definitions //
  472. // //
  473. ////////////////////////////////////////////
  474. /// There is a separate Disassemble*Frm function entry for disassembly of an ARM
  475. /// instr into a list of MCOperands in the appropriate order, with possible dst,
  476. /// followed by possible src(s).
  477. ///
  478. /// The processing of the predicate, and the 'S' modifier bit, if MI modifies
  479. /// the CPSR, is factored into ARMBasicMCBuilder's method named
  480. /// TryPredicateAndSBitModifier.
  481. static bool DisassemblePseudo(MCInst &MI, unsigned Opcode, uint32_t insn,
  482. unsigned short NumOps, unsigned &NumOpsAdded, BO) {
  483. assert(0 && "Unexpected pseudo instruction!");
  484. return false;
  485. }
  486. // A8.6.94 MLA
  487. // if d == 15 || n == 15 || m == 15 || a == 15 then UNPREDICTABLE;
  488. //
  489. // A8.6.105 MUL
  490. // if d == 15 || n == 15 || m == 15 then UNPREDICTABLE;
  491. //
  492. // A8.6.246 UMULL
  493. // if dLo == 15 || dHi == 15 || n == 15 || m == 15 then UNPREDICTABLE;
  494. // if dHi == dLo then UNPREDICTABLE;
  495. static bool BadRegsMulFrm(unsigned Opcode, uint32_t insn) {
  496. unsigned R19_16 = slice(insn, 19, 16);
  497. unsigned R15_12 = slice(insn, 15, 12);
  498. unsigned R11_8 = slice(insn, 11, 8);
  499. unsigned R3_0 = slice(insn, 3, 0);
  500. switch (Opcode) {
  501. default:
  502. // Did we miss an opcode?
  503. DEBUG(errs() << "BadRegsMulFrm: unexpected opcode!");
  504. return false;
  505. case ARM::MLA: case ARM::MLS: case ARM::SMLABB: case ARM::SMLABT:
  506. case ARM::SMLATB: case ARM::SMLATT: case ARM::SMLAWB: case ARM::SMLAWT:
  507. case ARM::SMMLA: case ARM::SMMLAR: case ARM::SMMLS: case ARM::SMMLSR:
  508. case ARM::USADA8:
  509. if (R19_16 == 15 || R15_12 == 15 || R11_8 == 15 || R3_0 == 15)
  510. return true;
  511. return false;
  512. case ARM::MUL: case ARM::SMMUL: case ARM::SMMULR:
  513. case ARM::SMULBB: case ARM::SMULBT: case ARM::SMULTB: case ARM::SMULTT:
  514. case ARM::SMULWB: case ARM::SMULWT: case ARM::SMUAD: case ARM::SMUADX:
  515. // A8.6.167 SMLAD & A8.6.172 SMLSD
  516. case ARM::SMLAD: case ARM::SMLADX: case ARM::SMLSD: case ARM::SMLSDX:
  517. case ARM::USAD8:
  518. if (R19_16 == 15 || R11_8 == 15 || R3_0 == 15)
  519. return true;
  520. return false;
  521. case ARM::SMLAL: case ARM::SMULL: case ARM::UMAAL: case ARM::UMLAL:
  522. case ARM::UMULL:
  523. case ARM::SMLALBB: case ARM::SMLALBT: case ARM::SMLALTB: case ARM::SMLALTT:
  524. case ARM::SMLALD: case ARM::SMLALDX: case ARM::SMLSLD: case ARM::SMLSLDX:
  525. if (R19_16 == 15 || R15_12 == 15 || R11_8 == 15 || R3_0 == 15)
  526. return true;
  527. if (R19_16 == R15_12)
  528. return true;
  529. return false;;
  530. }
  531. }
  532. // Multiply Instructions.
  533. // MLA, MLS, SMLABB, SMLABT, SMLATB, SMLATT, SMLAWB, SMLAWT, SMMLA, SMMLAR,
  534. // SMMLS, SMMLAR, SMLAD, SMLADX, SMLSD, SMLSDX, and USADA8 (for convenience):
  535. // Rd{19-16} Rn{3-0} Rm{11-8} Ra{15-12}
  536. // But note that register checking for {SMLAD, SMLADX, SMLSD, SMLSDX} is
  537. // only for {d, n, m}.
  538. //
  539. // MUL, SMMUL, SMMULR, SMULBB, SMULBT, SMULTB, SMULTT, SMULWB, SMULWT, SMUAD,
  540. // SMUADX, and USAD8 (for convenience):
  541. // Rd{19-16} Rn{3-0} Rm{11-8}
  542. //
  543. // SMLAL, SMULL, UMAAL, UMLAL, UMULL, SMLALBB, SMLALBT, SMLALTB, SMLALTT,
  544. // SMLALD, SMLADLX, SMLSLD, SMLSLDX:
  545. // RdLo{15-12} RdHi{19-16} Rn{3-0} Rm{11-8}
  546. //
  547. // The mapping of the multiply registers to the "regular" ARM registers, where
  548. // there are convenience decoder functions, is:
  549. //
  550. // Inst{15-12} => Rd
  551. // Inst{19-16} => Rn
  552. // Inst{3-0} => Rm
  553. // Inst{11-8} => Rs
  554. static bool DisassembleMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
  555. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  556. const MCInstrDesc &MCID = ARMInsts[Opcode];
  557. unsigned short NumDefs = MCID.getNumDefs();
  558. const MCOperandInfo *OpInfo = MCID.OpInfo;
  559. unsigned &OpIdx = NumOpsAdded;
  560. OpIdx = 0;
  561. assert(NumDefs > 0 && "NumDefs should be greater than 0 for MulFrm");
  562. assert(NumOps >= 3
  563. && OpInfo[0].RegClass == ARM::GPRRegClassID
  564. && OpInfo[1].RegClass == ARM::GPRRegClassID
  565. && OpInfo[2].RegClass == ARM::GPRRegClassID
  566. && "Expect three register operands");
  567. // Sanity check for the register encodings.
  568. if (BadRegsMulFrm(Opcode, insn))
  569. return false;
  570. // Instructions with two destination registers have RdLo{15-12} first.
  571. if (NumDefs == 2) {
  572. assert(NumOps >= 4 && OpInfo[3].RegClass == ARM::GPRRegClassID &&
  573. "Expect 4th register operand");
  574. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  575. decodeRd(insn))));
  576. ++OpIdx;
  577. }
  578. // The destination register: RdHi{19-16} or Rd{19-16}.
  579. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  580. decodeRn(insn))));
  581. // The two src regsiters: Rn{3-0}, then Rm{11-8}.
  582. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  583. decodeRm(insn))));
  584. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  585. decodeRs(insn))));
  586. OpIdx += 3;
  587. // Many multiply instructions (e.g., MLA) have three src registers.
  588. // The third register operand is Ra{15-12}.
  589. if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) {
  590. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  591. decodeRd(insn))));
  592. ++OpIdx;
  593. }
  594. return true;
  595. }
  596. // Helper routines for disassembly of coprocessor instructions.
  597. static bool LdStCopOpcode(unsigned Opcode) {
  598. if ((Opcode >= ARM::LDC2L_OFFSET && Opcode <= ARM::LDC_PRE) ||
  599. (Opcode >= ARM::STC2L_OFFSET && Opcode <= ARM::STC_PRE))
  600. return true;
  601. return false;
  602. }
  603. static bool CoprocessorOpcode(unsigned Opcode) {
  604. if (LdStCopOpcode(Opcode))
  605. return true;
  606. switch (Opcode) {
  607. default:
  608. return false;
  609. case ARM::CDP: case ARM::CDP2:
  610. case ARM::MCR: case ARM::MCR2: case ARM::MRC: case ARM::MRC2:
  611. case ARM::MCRR: case ARM::MCRR2: case ARM::MRRC: case ARM::MRRC2:
  612. return true;
  613. }
  614. }
  615. static inline unsigned GetCoprocessor(uint32_t insn) {
  616. return slice(insn, 11, 8);
  617. }
  618. static inline unsigned GetCopOpc1(uint32_t insn, bool CDP) {
  619. return CDP ? slice(insn, 23, 20) : slice(insn, 23, 21);
  620. }
  621. static inline unsigned GetCopOpc2(uint32_t insn) {
  622. return slice(insn, 7, 5);
  623. }
  624. static inline unsigned GetCopOpc(uint32_t insn) {
  625. return slice(insn, 7, 4);
  626. }
  627. // Most of the operands are in immediate forms, except Rd and Rn, which are ARM
  628. // core registers.
  629. //
  630. // CDP, CDP2: cop opc1 CRd CRn CRm opc2
  631. //
  632. // MCR, MCR2, MRC, MRC2: cop opc1 Rd CRn CRm opc2
  633. //
  634. // MCRR, MCRR2, MRRC, MRRc2: cop opc Rd Rn CRm
  635. //
  636. // LDC_OFFSET, LDC_PRE, LDC_POST: cop CRd Rn R0 [+/-]imm8:00
  637. // and friends
  638. // STC_OFFSET, STC_PRE, STC_POST: cop CRd Rn R0 [+/-]imm8:00
  639. // and friends
  640. // <-- addrmode2 -->
  641. //
  642. // LDC_OPTION: cop CRd Rn imm8
  643. // and friends
  644. // STC_OPTION: cop CRd Rn imm8
  645. // and friends
  646. //
  647. static bool DisassembleCoprocessor(MCInst &MI, unsigned Opcode, uint32_t insn,
  648. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  649. assert(NumOps >= 4 && "Num of operands >= 4 for coprocessor instr");
  650. unsigned &OpIdx = NumOpsAdded;
  651. // A8.6.92
  652. // if coproc == '101x' then SEE "Advanced SIMD and VFP"
  653. // But since the special instructions have more explicit encoding bits
  654. // specified, if coproc == 10 or 11, we should reject it as invalid.
  655. unsigned coproc = GetCoprocessor(insn);
  656. if ((Opcode == ARM::MCR || Opcode == ARM::MCRR ||
  657. Opcode == ARM::MRC || Opcode == ARM::MRRC) &&
  658. (coproc == 10 || coproc == 11)) {
  659. DEBUG(errs() << "Encoding error: coproc == 10 or 11 for MCR[R]/MR[R]C\n");
  660. return false;
  661. }
  662. bool OneCopOpc = (Opcode == ARM::MCRR || Opcode == ARM::MCRR2 ||
  663. Opcode == ARM::MRRC || Opcode == ARM::MRRC2);
  664. // CDP/CDP2 has no GPR operand; the opc1 operand is also wider (Inst{23-20}).
  665. bool NoGPR = (Opcode == ARM::CDP || Opcode == ARM::CDP2);
  666. bool LdStCop = LdStCopOpcode(Opcode);
  667. bool RtOut = (Opcode == ARM::MRC || Opcode == ARM::MRC2);
  668. OpIdx = 0;
  669. if (RtOut) {
  670. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  671. decodeRd(insn))));
  672. ++OpIdx;
  673. }
  674. MI.addOperand(MCOperand::CreateImm(coproc));
  675. ++OpIdx;
  676. if (LdStCop) {
  677. // Unindex if P:W = 0b00 --> _OPTION variant
  678. unsigned PW = getPBit(insn) << 1 | getWBit(insn);
  679. MI.addOperand(MCOperand::CreateImm(decodeRd(insn)));
  680. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  681. decodeRn(insn))));
  682. OpIdx += 2;
  683. if (PW) {
  684. MI.addOperand(MCOperand::CreateReg(0));
  685. ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
  686. const MCInstrDesc &MCID = ARMInsts[Opcode];
  687. unsigned IndexMode =
  688. (MCID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
  689. unsigned Offset = ARM_AM::getAM2Opc(AddrOpcode, slice(insn, 7, 0) << 2,
  690. ARM_AM::no_shift, IndexMode);
  691. MI.addOperand(MCOperand::CreateImm(Offset));
  692. OpIdx += 2;
  693. } else {
  694. MI.addOperand(MCOperand::CreateImm(slice(insn, 7, 0)));
  695. ++OpIdx;
  696. }
  697. } else {
  698. MI.addOperand(MCOperand::CreateImm(OneCopOpc ? GetCopOpc(insn)
  699. : GetCopOpc1(insn, NoGPR)));
  700. ++OpIdx;
  701. if (!RtOut) {
  702. MI.addOperand(NoGPR ? MCOperand::CreateImm(decodeRd(insn))
  703. : MCOperand::CreateReg(
  704. getRegisterEnum(B, ARM::GPRRegClassID,
  705. decodeRd(insn))));
  706. ++OpIdx;
  707. }
  708. MI.addOperand(OneCopOpc ? MCOperand::CreateReg(
  709. getRegisterEnum(B, ARM::GPRRegClassID,
  710. decodeRn(insn)))
  711. : MCOperand::CreateImm(decodeRn(insn)));
  712. MI.addOperand(MCOperand::CreateImm(decodeRm(insn)));
  713. OpIdx += 2;
  714. if (!OneCopOpc) {
  715. MI.addOperand(MCOperand::CreateImm(GetCopOpc2(insn)));
  716. ++OpIdx;
  717. }
  718. }
  719. return true;
  720. }
  721. // Branch Instructions.
  722. // BL: SignExtend(Imm24:'00', 32)
  723. // Bcc, BL_pred: SignExtend(Imm24:'00', 32) Pred0 Pred1
  724. // SMC: ZeroExtend(imm4, 32)
  725. // SVC: ZeroExtend(Imm24, 32)
  726. //
  727. // Various coprocessor instructions are assigned BrFrm arbitrarily.
  728. // Delegates to DisassembleCoprocessor() helper function.
  729. //
  730. // MRS/MRSsys: Rd
  731. // MSR/MSRsys: Rm mask=Inst{19-16}
  732. // BXJ: Rm
  733. // MSRi/MSRsysi: so_imm
  734. // SRSW/SRS: ldstm_mode:$amode mode_imm
  735. // RFEW/RFE: ldstm_mode:$amode Rn
  736. static bool DisassembleBrFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
  737. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  738. if (CoprocessorOpcode(Opcode))
  739. return DisassembleCoprocessor(MI, Opcode, insn, NumOps, NumOpsAdded, B);
  740. const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
  741. if (!OpInfo) return false;
  742. // MRS and MRSsys take one GPR reg Rd.
  743. if (Opcode == ARM::MRS || Opcode == ARM::MRSsys) {
  744. assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
  745. "Reg operand expected");
  746. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  747. decodeRd(insn))));
  748. NumOpsAdded = 1;
  749. return true;
  750. }
  751. // BXJ takes one GPR reg Rm.
  752. if (Opcode == ARM::BXJ) {
  753. assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
  754. "Reg operand expected");
  755. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  756. decodeRm(insn))));
  757. NumOpsAdded = 1;
  758. return true;
  759. }
  760. // MSR take a mask, followed by one GPR reg Rm. The mask contains the R Bit in
  761. // bit 4, and the special register fields in bits 3-0.
  762. if (Opcode == ARM::MSR) {
  763. assert(NumOps >= 1 && OpInfo[1].RegClass == ARM::GPRRegClassID &&
  764. "Reg operand expected");
  765. MI.addOperand(MCOperand::CreateImm(slice(insn, 22, 22) << 4 /* R Bit */ |
  766. slice(insn, 19, 16) /* Special Reg */ ));
  767. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  768. decodeRm(insn))));
  769. NumOpsAdded = 2;
  770. return true;
  771. }
  772. // MSRi take a mask, followed by one so_imm operand. The mask contains the
  773. // R Bit in bit 4, and the special register fields in bits 3-0.
  774. if (Opcode == ARM::MSRi) {
  775. // A5.2.11 MSR (immediate), and hints & B6.1.6 MSR (immediate)
  776. // The hints instructions have more specific encodings, so if mask == 0,
  777. // we should reject this as an invalid instruction.
  778. if (slice(insn, 19, 16) == 0)
  779. return false;
  780. MI.addOperand(MCOperand::CreateImm(slice(insn, 22, 22) << 4 /* R Bit */ |
  781. slice(insn, 19, 16) /* Special Reg */ ));
  782. // SOImm is 4-bit rotate amount in bits 11-8 with 8-bit imm in bits 7-0.
  783. // A5.2.4 Rotate amount is twice the numeric value of Inst{11-8}.
  784. // See also ARMAddressingModes.h: getSOImmValImm() and getSOImmValRot().
  785. unsigned Rot = (insn >> ARMII::SoRotImmShift) & 0xF;
  786. unsigned Imm = insn & 0xFF;
  787. MI.addOperand(MCOperand::CreateImm(ARM_AM::rotr32(Imm, 2*Rot)));
  788. NumOpsAdded = 2;
  789. return true;
  790. }
  791. if (Opcode == ARM::SRSW || Opcode == ARM::SRS ||
  792. Opcode == ARM::RFEW || Opcode == ARM::RFE) {
  793. ARM_AM::AMSubMode SubMode = getAMSubModeForBits(getPUBits(insn));
  794. MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM4ModeImm(SubMode)));
  795. if (Opcode == ARM::SRSW || Opcode == ARM::SRS)
  796. MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0)));
  797. else
  798. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  799. decodeRn(insn))));
  800. NumOpsAdded = 3;
  801. return true;
  802. }
  803. assert((Opcode == ARM::Bcc || Opcode == ARM::BL || Opcode == ARM::BL_pred
  804. || Opcode == ARM::SMC || Opcode == ARM::SVC) &&
  805. "Unexpected Opcode");
  806. assert(NumOps >= 1 && OpInfo[0].RegClass < 0 && "Imm operand expected");
  807. int Imm32 = 0;
  808. if (Opcode == ARM::SMC) {
  809. // ZeroExtend(imm4, 32) where imm24 = Inst{3-0}.
  810. Imm32 = slice(insn, 3, 0);
  811. } else if (Opcode == ARM::SVC) {
  812. // ZeroExtend(imm24, 32) where imm24 = Inst{23-0}.
  813. Imm32 = slice(insn, 23, 0);
  814. } else {
  815. // SignExtend(imm24:'00', 32) where imm24 = Inst{23-0}.
  816. unsigned Imm26 = slice(insn, 23, 0) << 2;
  817. //Imm32 = signextend<signed int, 26>(Imm26);
  818. Imm32 = SignExtend32<26>(Imm26);
  819. }
  820. MI.addOperand(MCOperand::CreateImm(Imm32));
  821. NumOpsAdded = 1;
  822. return true;
  823. }
  824. // Misc. Branch Instructions.
  825. // BX_RET, MOVPCLR
  826. // BLX, BLX_pred, BX, BX_pred
  827. // BLXi
  828. static bool DisassembleBrMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
  829. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  830. const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
  831. if (!OpInfo) return false;
  832. unsigned &OpIdx = NumOpsAdded;
  833. OpIdx = 0;
  834. // BX_RET and MOVPCLR have only two predicate operands; do an early return.
  835. if (Opcode == ARM::BX_RET || Opcode == ARM::MOVPCLR)
  836. return true;
  837. // BLX and BX take one GPR reg.
  838. if (Opcode == ARM::BLX || Opcode == ARM::BLX_pred ||
  839. Opcode == ARM::BX || Opcode == ARM::BX_pred) {
  840. assert(NumOps >= 1 && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
  841. "Reg operand expected");
  842. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  843. decodeRm(insn))));
  844. OpIdx = 1;
  845. return true;
  846. }
  847. // BLXi takes imm32 (the PC offset).
  848. if (Opcode == ARM::BLXi) {
  849. assert(NumOps >= 1 && OpInfo[0].RegClass < 0 && "Imm operand expected");
  850. // SignExtend(imm24:H:'0', 32) where imm24 = Inst{23-0} and H = Inst{24}.
  851. unsigned Imm26 = slice(insn, 23, 0) << 2 | slice(insn, 24, 24) << 1;
  852. int Imm32 = SignExtend32<26>(Imm26);
  853. MI.addOperand(MCOperand::CreateImm(Imm32));
  854. OpIdx = 1;
  855. return true;
  856. }
  857. return false;
  858. }
  859. static inline bool getBFCInvMask(uint32_t insn, uint32_t &mask) {
  860. uint32_t lsb = slice(insn, 11, 7);
  861. uint32_t msb = slice(insn, 20, 16);
  862. uint32_t Val = 0;
  863. if (msb < lsb) {
  864. DEBUG(errs() << "Encoding error: msb < lsb\n");
  865. return false;
  866. }
  867. for (uint32_t i = lsb; i <= msb; ++i)
  868. Val |= (1 << i);
  869. mask = ~Val;
  870. return true;
  871. }
  872. // Standard data-processing instructions allow PC as a register specifier,
  873. // but we should reject other DPFrm instructions with PC as registers.
  874. static bool BadRegsDPFrm(unsigned Opcode, uint32_t insn) {
  875. switch (Opcode) {
  876. default:
  877. // Did we miss an opcode?
  878. if (decodeRd(insn) == 15 || decodeRn(insn) == 15 || decodeRm(insn) == 15) {
  879. DEBUG(errs() << "DPFrm with bad reg specifier(s)\n");
  880. return true;
  881. }
  882. case ARM::ADCrr: case ARM::ADDSrr: case ARM::ADDrr: case ARM::ANDrr:
  883. case ARM::BICrr: case ARM::CMNzrr: case ARM::CMPrr: case ARM::EORrr:
  884. case ARM::ORRrr: case ARM::RSBrr: case ARM::RSCrr: case ARM::SBCrr:
  885. case ARM::SUBSrr: case ARM::SUBrr: case ARM::TEQrr: case ARM::TSTrr:
  886. return false;
  887. }
  888. }
  889. // A major complication is the fact that some of the saturating add/subtract
  890. // operations have Rd Rm Rn, instead of the "normal" Rd Rn Rm.
  891. // They are QADD, QDADD, QDSUB, and QSUB.
  892. static bool DisassembleDPFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
  893. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  894. const MCInstrDesc &MCID = ARMInsts[Opcode];
  895. unsigned short NumDefs = MCID.getNumDefs();
  896. bool isUnary = isUnaryDP(MCID.TSFlags);
  897. const MCOperandInfo *OpInfo = MCID.OpInfo;
  898. unsigned &OpIdx = NumOpsAdded;
  899. OpIdx = 0;
  900. // Disassemble register def if there is one.
  901. if (NumDefs && (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID)) {
  902. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  903. decodeRd(insn))));
  904. ++OpIdx;
  905. }
  906. // Now disassemble the src operands.
  907. if (OpIdx >= NumOps)
  908. return false;
  909. // Special-case handling of BFC/BFI/SBFX/UBFX.
  910. if (Opcode == ARM::BFC || Opcode == ARM::BFI) {
  911. // A8.6.17 BFC & A8.6.18 BFI
  912. // Sanity check Rd.
  913. if (decodeRd(insn) == 15)
  914. return false;
  915. MI.addOperand(MCOperand::CreateReg(0));
  916. if (Opcode == ARM::BFI) {
  917. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  918. decodeRm(insn))));
  919. ++OpIdx;
  920. }
  921. uint32_t mask = 0;
  922. if (!getBFCInvMask(insn, mask))
  923. return false;
  924. MI.addOperand(MCOperand::CreateImm(mask));
  925. OpIdx += 2;
  926. return true;
  927. }
  928. if (Opcode == ARM::SBFX || Opcode == ARM::UBFX) {
  929. // Sanity check Rd and Rm.
  930. if (decodeRd(insn) == 15 || decodeRm(insn) == 15)
  931. return false;
  932. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  933. decodeRm(insn))));
  934. MI.addOperand(MCOperand::CreateImm(slice(insn, 11, 7)));
  935. MI.addOperand(MCOperand::CreateImm(slice(insn, 20, 16) + 1));
  936. OpIdx += 3;
  937. return true;
  938. }
  939. bool RmRn = (Opcode == ARM::QADD || Opcode == ARM::QDADD ||
  940. Opcode == ARM::QDSUB || Opcode == ARM::QSUB);
  941. // BinaryDP has an Rn operand.
  942. if (!isUnary) {
  943. assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
  944. "Reg operand expected");
  945. MI.addOperand(MCOperand::CreateReg(
  946. getRegisterEnum(B, ARM::GPRRegClassID,
  947. RmRn ? decodeRm(insn) : decodeRn(insn))));
  948. ++OpIdx;
  949. }
  950. // If this is a two-address operand, skip it, e.g., MOVCCr operand 1.
  951. if (isUnary && (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1)) {
  952. MI.addOperand(MCOperand::CreateReg(0));
  953. ++OpIdx;
  954. }
  955. // Now disassemble operand 2.
  956. if (OpIdx >= NumOps)
  957. return false;
  958. if (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) {
  959. // We have a reg/reg form.
  960. // Assert disabled because saturating operations, e.g., A8.6.127 QASX, are
  961. // routed here as well.
  962. // assert(getIBit(insn) == 0 && "I_Bit != '0' reg/reg form");
  963. if (BadRegsDPFrm(Opcode, insn))
  964. return false;
  965. MI.addOperand(MCOperand::CreateReg(
  966. getRegisterEnum(B, ARM::GPRRegClassID,
  967. RmRn? decodeRn(insn) : decodeRm(insn))));
  968. ++OpIdx;
  969. } else if (Opcode == ARM::MOVi16 || Opcode == ARM::MOVTi16) {
  970. // These two instructions don't allow d as 15.
  971. if (decodeRd(insn) == 15)
  972. return false;
  973. // We have an imm16 = imm4:imm12 (imm4=Inst{19:16}, imm12 = Inst{11:0}).
  974. assert(getIBit(insn) == 1 && "I_Bit != '1' reg/imm form");
  975. unsigned Imm16 = slice(insn, 19, 16) << 12 | slice(insn, 11, 0);
  976. if (!B->tryAddingSymbolicOperand(Imm16, 4, MI))
  977. MI.addOperand(MCOperand::CreateImm(Imm16));
  978. ++OpIdx;
  979. } else {
  980. // We have a reg/imm form.
  981. // SOImm is 4-bit rotate amount in bits 11-8 with 8-bit imm in bits 7-0.
  982. // A5.2.4 Rotate amount is twice the numeric value of Inst{11-8}.
  983. // See also ARMAddressingModes.h: getSOImmValImm() and getSOImmValRot().
  984. assert(getIBit(insn) == 1 && "I_Bit != '1' reg/imm form");
  985. unsigned Rot = (insn >> ARMII::SoRotImmShift) & 0xF;
  986. unsigned Imm = insn & 0xFF;
  987. MI.addOperand(MCOperand::CreateImm(ARM_AM::rotr32(Imm, 2*Rot)));
  988. ++OpIdx;
  989. }
  990. return true;
  991. }
  992. static bool DisassembleDPSoRegFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
  993. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  994. const MCInstrDesc &MCID = ARMInsts[Opcode];
  995. unsigned short NumDefs = MCID.getNumDefs();
  996. bool isUnary = isUnaryDP(MCID.TSFlags);
  997. const MCOperandInfo *OpInfo = MCID.OpInfo;
  998. unsigned &OpIdx = NumOpsAdded;
  999. OpIdx = 0;
  1000. // Disassemble register def if there is one.
  1001. if (NumDefs && (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID)) {
  1002. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1003. decodeRd(insn))));
  1004. ++OpIdx;
  1005. }
  1006. // Disassemble the src operands.
  1007. if (OpIdx >= NumOps)
  1008. return false;
  1009. // BinaryDP has an Rn operand.
  1010. if (!isUnary) {
  1011. assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
  1012. "Reg operand expected");
  1013. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1014. decodeRn(insn))));
  1015. ++OpIdx;
  1016. }
  1017. // If this is a two-address operand, skip it, e.g., MOVCCs operand 1.
  1018. if (isUnary && (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1)) {
  1019. MI.addOperand(MCOperand::CreateReg(0));
  1020. ++OpIdx;
  1021. }
  1022. // Disassemble operand 2, which consists of three components.
  1023. if (OpIdx + 2 >= NumOps)
  1024. return false;
  1025. assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
  1026. (OpInfo[OpIdx+1].RegClass == ARM::GPRRegClassID) &&
  1027. (OpInfo[OpIdx+2].RegClass < 0) &&
  1028. "Expect 3 reg operands");
  1029. // Register-controlled shifts have Inst{7} = 0 and Inst{4} = 1.
  1030. unsigned Rs = slice(insn, 4, 4);
  1031. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1032. decodeRm(insn))));
  1033. if (Rs) {
  1034. // If Inst{7} != 0, we should reject this insn as an invalid encoding.
  1035. if (slice(insn, 7, 7))
  1036. return false;
  1037. // A8.6.3 ADC (register-shifted register)
  1038. // if d == 15 || n == 15 || m == 15 || s == 15 then UNPREDICTABLE;
  1039. //
  1040. // This also accounts for shift instructions (register) where, fortunately,
  1041. // Inst{19-16} = 0b0000.
  1042. // A8.6.89 LSL (register)
  1043. // if d == 15 || n == 15 || m == 15 then UNPREDICTABLE;
  1044. if (decodeRd(insn) == 15 || decodeRn(insn) == 15 ||
  1045. decodeRm(insn) == 15 || decodeRs(insn) == 15)
  1046. return false;
  1047. // Register-controlled shifts: [Rm, Rs, shift].
  1048. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1049. decodeRs(insn))));
  1050. // Inst{6-5} encodes the shift opcode.
  1051. ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
  1052. MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(ShOp, 0)));
  1053. } else {
  1054. // Constant shifts: [Rm, reg0, shift_imm].
  1055. MI.addOperand(MCOperand::CreateReg(0)); // NoRegister
  1056. // Inst{6-5} encodes the shift opcode.
  1057. ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
  1058. // Inst{11-7} encodes the imm5 shift amount.
  1059. unsigned ShImm = slice(insn, 11, 7);
  1060. // A8.4.1. Possible rrx or shift amount of 32...
  1061. getImmShiftSE(ShOp, ShImm);
  1062. MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(ShOp, ShImm)));
  1063. }
  1064. OpIdx += 3;
  1065. return true;
  1066. }
  1067. static bool BadRegsLdStFrm(unsigned Opcode, uint32_t insn, bool Store, bool WBack,
  1068. bool Imm) {
  1069. const StringRef Name = ARMInsts[Opcode].Name;
  1070. unsigned Rt = decodeRd(insn);
  1071. unsigned Rn = decodeRn(insn);
  1072. unsigned Rm = decodeRm(insn);
  1073. unsigned P = getPBit(insn);
  1074. unsigned W = getWBit(insn);
  1075. if (Store) {
  1076. // Only STR (immediate, register) allows PC as the source.
  1077. if (Name.startswith("STRB") && Rt == 15) {
  1078. DEBUG(errs() << "if t == 15 then UNPREDICTABLE\n");
  1079. return true;
  1080. }
  1081. if (WBack && (Rn == 15 || Rn == Rt)) {
  1082. DEBUG(errs() << "if wback && (n == 15 || n == t) then UNPREDICTABLE\n");
  1083. return true;
  1084. }
  1085. if (!Imm && Rm == 15) {
  1086. DEBUG(errs() << "if m == 15 then UNPREDICTABLE\n");
  1087. return true;
  1088. }
  1089. } else {
  1090. // Only LDR (immediate, register) allows PC as the destination.
  1091. if (Name.startswith("LDRB") && Rt == 15) {
  1092. DEBUG(errs() << "if t == 15 then UNPREDICTABLE\n");
  1093. return true;
  1094. }
  1095. if (Imm) {
  1096. // Immediate
  1097. if (Rn == 15) {
  1098. // The literal form must be in offset mode; it's an encoding error
  1099. // otherwise.
  1100. if (!(P == 1 && W == 0)) {
  1101. DEBUG(errs() << "Ld literal form with !(P == 1 && W == 0)\n");
  1102. return true;
  1103. }
  1104. // LDRB (literal) does not allow PC as the destination.
  1105. if (Opcode != ARM::LDRi12 && Rt == 15) {
  1106. DEBUG(errs() << "if t == 15 then UNPREDICTABLE\n");
  1107. return true;
  1108. }
  1109. } else {
  1110. // Write back while Rn == Rt does not make sense.
  1111. if (WBack && (Rn == Rt)) {
  1112. DEBUG(errs() << "if wback && n == t then UNPREDICTABLE\n");
  1113. return true;
  1114. }
  1115. }
  1116. } else {
  1117. // Register
  1118. if (Rm == 15) {
  1119. DEBUG(errs() << "if m == 15 then UNPREDICTABLE\n");
  1120. return true;
  1121. }
  1122. if (WBack && (Rn == 15 || Rn == Rt)) {
  1123. DEBUG(errs() << "if wback && (n == 15 || n == t) then UNPREDICTABLE\n");
  1124. return true;
  1125. }
  1126. }
  1127. }
  1128. return false;
  1129. }
  1130. static bool DisassembleLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
  1131. unsigned short NumOps, unsigned &NumOpsAdded, bool isStore, BO B) {
  1132. const MCInstrDesc &MCID = ARMInsts[Opcode];
  1133. bool isPrePost = isPrePostLdSt(MCID.TSFlags);
  1134. const MCOperandInfo *OpInfo = MCID.OpInfo;
  1135. if (!OpInfo) return false;
  1136. unsigned &OpIdx = NumOpsAdded;
  1137. OpIdx = 0;
  1138. assert(((!isStore && MCID.getNumDefs() > 0) ||
  1139. (isStore && (MCID.getNumDefs() == 0 || isPrePost)))
  1140. && "Invalid arguments");
  1141. // Operand 0 of a pre- and post-indexed store is the address base writeback.
  1142. if (isPrePost && isStore) {
  1143. assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
  1144. "Reg operand expected");
  1145. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1146. decodeRn(insn))));
  1147. ++OpIdx;
  1148. }
  1149. // Disassemble the dst/src operand.
  1150. if (OpIdx >= NumOps)
  1151. return false;
  1152. assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
  1153. "Reg operand expected");
  1154. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1155. decodeRd(insn))));
  1156. ++OpIdx;
  1157. // After dst of a pre- and post-indexed load is the address base writeback.
  1158. if (isPrePost && !isStore) {
  1159. assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
  1160. "Reg operand expected");
  1161. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1162. decodeRn(insn))));
  1163. ++OpIdx;
  1164. }
  1165. // Disassemble the base operand.
  1166. if (OpIdx >= NumOps)
  1167. return false;
  1168. assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
  1169. "Reg operand expected");
  1170. assert((!isPrePost || (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1))
  1171. && "Index mode or tied_to operand expected");
  1172. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1173. decodeRn(insn))));
  1174. ++OpIdx;
  1175. // For reg/reg form, base reg is followed by +/- reg shop imm.
  1176. // For immediate form, it is followed by +/- imm12.
  1177. // See also ARMAddressingModes.h (Addressing Mode #2).
  1178. if (OpIdx + 1 >= NumOps)
  1179. return false;
  1180. if (BadRegsLdStFrm(Opcode, insn, isStore, isPrePost, getIBit(insn)==0))
  1181. return false;
  1182. ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
  1183. unsigned IndexMode =
  1184. (MCID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
  1185. if (getIBit(insn) == 0) {
  1186. // For pre- and post-indexed case, add a reg0 operand (Addressing Mode #2).
  1187. // Otherwise, skip the reg operand since for addrmode_imm12, Rn has already
  1188. // been populated.
  1189. if (isPrePost) {
  1190. MI.addOperand(MCOperand::CreateReg(0));
  1191. OpIdx += 1;
  1192. }
  1193. unsigned Imm12 = slice(insn, 11, 0);
  1194. if (Opcode == ARM::LDRBi12 || Opcode == ARM::LDRi12 ||
  1195. Opcode == ARM::STRBi12 || Opcode == ARM::STRi12) {
  1196. // Disassemble the 12-bit immediate offset, which is the second operand in
  1197. // $addrmode_imm12 => (ops GPR:$base, i32imm:$offsimm).
  1198. int Offset = AddrOpcode == ARM_AM::add ? 1 * Imm12 : -1 * Imm12;
  1199. MI.addOperand(MCOperand::CreateImm(Offset));
  1200. } else {
  1201. // Disassemble the 12-bit immediate offset, which is the second operand in
  1202. // $am2offset => (ops GPR, i32imm).
  1203. unsigned Offset = ARM_AM::getAM2Opc(AddrOpcode, Imm12, ARM_AM::no_shift,
  1204. IndexMode);
  1205. MI.addOperand(MCOperand::CreateImm(Offset));
  1206. }
  1207. OpIdx += 1;
  1208. } else {
  1209. // If Inst{25} = 1 and Inst{4} != 0, we should reject this as invalid.
  1210. if (slice(insn,4,4) == 1)
  1211. return false;
  1212. // Disassemble the offset reg (Rm), shift type, and immediate shift length.
  1213. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1214. decodeRm(insn))));
  1215. // Inst{6-5} encodes the shift opcode.
  1216. ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
  1217. // Inst{11-7} encodes the imm5 shift amount.
  1218. unsigned ShImm = slice(insn, 11, 7);
  1219. // A8.4.1. Possible rrx or shift amount of 32...
  1220. getImmShiftSE(ShOp, ShImm);
  1221. MI.addOperand(MCOperand::CreateImm(
  1222. ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp, IndexMode)));
  1223. OpIdx += 2;
  1224. }
  1225. return true;
  1226. }
  1227. static bool DisassembleLdFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
  1228. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  1229. return DisassembleLdStFrm(MI, Opcode, insn, NumOps, NumOpsAdded, false, B);
  1230. }
  1231. static bool DisassembleStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
  1232. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  1233. return DisassembleLdStFrm(MI, Opcode, insn, NumOps, NumOpsAdded, true, B);
  1234. }
  1235. static bool HasDualReg(unsigned Opcode) {
  1236. switch (Opcode) {
  1237. default:
  1238. return false;
  1239. case ARM::LDRD: case ARM::LDRD_PRE: case ARM::LDRD_POST:
  1240. case ARM::STRD: case ARM::STRD_PRE: case ARM::STRD_POST:
  1241. return true;
  1242. }
  1243. }
  1244. static bool DisassembleLdStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
  1245. unsigned short NumOps, unsigned &NumOpsAdded, bool isStore, BO B) {
  1246. const MCInstrDesc &MCID = ARMInsts[Opcode];
  1247. bool isPrePost = isPrePostLdSt(MCID.TSFlags);
  1248. const MCOperandInfo *OpInfo = MCID.OpInfo;
  1249. if (!OpInfo) return false;
  1250. unsigned &OpIdx = NumOpsAdded;
  1251. OpIdx = 0;
  1252. assert(((!isStore && MCID.getNumDefs() > 0) ||
  1253. (isStore && (MCID.getNumDefs() == 0 || isPrePost)))
  1254. && "Invalid arguments");
  1255. // Operand 0 of a pre- and post-indexed store is the address base writeback.
  1256. if (isPrePost && isStore) {
  1257. assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
  1258. "Reg operand expected");
  1259. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1260. decodeRn(insn))));
  1261. ++OpIdx;
  1262. }
  1263. // Disassemble the dst/src operand.
  1264. if (OpIdx >= NumOps)
  1265. return false;
  1266. assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
  1267. "Reg operand expected");
  1268. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1269. decodeRd(insn))));
  1270. ++OpIdx;
  1271. // Fill in LDRD and STRD's second operand Rt operand.
  1272. if (HasDualReg(Opcode)) {
  1273. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1274. decodeRd(insn) + 1)));
  1275. ++OpIdx;
  1276. }
  1277. // After dst of a pre- and post-indexed load is the address base writeback.
  1278. if (isPrePost && !isStore) {
  1279. assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
  1280. "Reg operand expected");
  1281. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1282. decodeRn(insn))));
  1283. ++OpIdx;
  1284. }
  1285. // Disassemble the base operand.
  1286. if (OpIdx >= NumOps)
  1287. return false;
  1288. assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
  1289. "Reg operand expected");
  1290. assert((!isPrePost || (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1))
  1291. && "Offset mode or tied_to operand expected");
  1292. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1293. decodeRn(insn))));
  1294. ++OpIdx;
  1295. // For reg/reg form, base reg is followed by +/- reg.
  1296. // For immediate form, it is followed by +/- imm8.
  1297. // See also ARMAddressingModes.h (Addressing Mode #3).
  1298. if (OpIdx + 1 >= NumOps)
  1299. return false;
  1300. assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
  1301. (OpInfo[OpIdx+1].RegClass < 0) &&
  1302. "Expect 1 reg operand followed by 1 imm operand");
  1303. ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
  1304. unsigned IndexMode =
  1305. (MCID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
  1306. if (getAM3IBit(insn) == 1) {
  1307. MI.addOperand(MCOperand::CreateReg(0));
  1308. // Disassemble the 8-bit immediate offset.
  1309. unsigned Imm4H = (insn >> ARMII::ImmHiShift) & 0xF;
  1310. unsigned Imm4L = insn & 0xF;
  1311. unsigned Offset = ARM_AM::getAM3Opc(AddrOpcode, (Imm4H << 4) | Imm4L,
  1312. IndexMode);
  1313. MI.addOperand(MCOperand::CreateImm(Offset));
  1314. } else {
  1315. // Disassemble the offset reg (Rm).
  1316. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1317. decodeRm(insn))));
  1318. unsigned Offset = ARM_AM::getAM3Opc(AddrOpcode, 0, IndexMode);
  1319. MI.addOperand(MCOperand::CreateImm(Offset));
  1320. }
  1321. OpIdx += 2;
  1322. return true;
  1323. }
  1324. static bool DisassembleLdMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
  1325. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  1326. return DisassembleLdStMiscFrm(MI, Opcode, insn, NumOps, NumOpsAdded, false,
  1327. B);
  1328. }
  1329. static bool DisassembleStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
  1330. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  1331. return DisassembleLdStMiscFrm(MI, Opcode, insn, NumOps, NumOpsAdded, true, B);
  1332. }
  1333. // The algorithm for disassembly of LdStMulFrm is different from others because
  1334. // it explicitly populates the two predicate operands after the base register.
  1335. // After that, we need to populate the reglist with each affected register
  1336. // encoded as an MCOperand.
  1337. static bool DisassembleLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
  1338. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  1339. assert(NumOps >= 4 && "LdStMulFrm expects NumOps >= 4");
  1340. NumOpsAdded = 0;
  1341. unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
  1342. // Writeback to base, if necessary.
  1343. if (Opcode == ARM::LDMIA_UPD || Opcode == ARM::STMIA_UPD ||
  1344. Opcode == ARM::LDMDA_UPD || Opcode == ARM::STMDA_UPD ||
  1345. Opcode == ARM::LDMDB_UPD || Opcode == ARM::STMDB_UPD ||
  1346. Opcode == ARM::LDMIB_UPD || Opcode == ARM::STMIB_UPD) {
  1347. MI.addOperand(MCOperand::CreateReg(Base));
  1348. ++NumOpsAdded;
  1349. }
  1350. // Add the base register operand.
  1351. MI.addOperand(MCOperand::CreateReg(Base));
  1352. // Handling the two predicate operands before the reglist.
  1353. int64_t CondVal = getCondField(insn);
  1354. if (CondVal == 0xF)
  1355. return false;
  1356. MI.addOperand(MCOperand::CreateImm(CondVal));
  1357. MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
  1358. NumOpsAdded += 3;
  1359. // Fill the variadic part of reglist.
  1360. unsigned RegListBits = insn & ((1 << 16) - 1);
  1361. for (unsigned i = 0; i < 16; ++i) {
  1362. if ((RegListBits >> i) & 1) {
  1363. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1364. i)));
  1365. ++NumOpsAdded;
  1366. }
  1367. }
  1368. return true;
  1369. }
  1370. // LDREX, LDREXB, LDREXH: Rd Rn
  1371. // LDREXD: Rd Rd+1 Rn
  1372. // STREX, STREXB, STREXH: Rd Rm Rn
  1373. // STREXD: Rd Rm Rm+1 Rn
  1374. //
  1375. // SWP, SWPB: Rd Rm Rn
  1376. static bool DisassembleLdStExFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
  1377. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  1378. const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
  1379. if (!OpInfo) return false;
  1380. unsigned &OpIdx = NumOpsAdded;
  1381. OpIdx = 0;
  1382. assert(NumOps >= 2
  1383. && OpInfo[0].RegClass == ARM::GPRRegClassID
  1384. && OpInfo[1].RegClass == ARM::GPRRegClassID
  1385. && "Expect 2 reg operands");
  1386. bool isStore = slice(insn, 20, 20) == 0;
  1387. bool isDW = (Opcode == ARM::LDREXD || Opcode == ARM::STREXD);
  1388. // Add the destination operand.
  1389. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1390. decodeRd(insn))));
  1391. ++OpIdx;
  1392. // Store register Exclusive needs a source operand.
  1393. if (isStore) {
  1394. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1395. decodeRm(insn))));
  1396. ++OpIdx;
  1397. if (isDW) {
  1398. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1399. decodeRm(insn)+1)));
  1400. ++OpIdx;
  1401. }
  1402. } else if (isDW) {
  1403. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1404. decodeRd(insn)+1)));
  1405. ++OpIdx;
  1406. }
  1407. // Finally add the pointer operand.
  1408. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1409. decodeRn(insn))));
  1410. ++OpIdx;
  1411. return true;
  1412. }
  1413. // Misc. Arithmetic Instructions.
  1414. // CLZ: Rd Rm
  1415. // PKHBT, PKHTB: Rd Rn Rm , LSL/ASR #imm5
  1416. // RBIT, REV, REV16, REVSH: Rd Rm
  1417. static bool DisassembleArithMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
  1418. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  1419. const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
  1420. unsigned &OpIdx = NumOpsAdded;
  1421. OpIdx = 0;
  1422. assert(NumOps >= 2
  1423. && OpInfo[0].RegClass == ARM::GPRRegClassID
  1424. && OpInfo[1].RegClass == ARM::GPRRegClassID
  1425. && "Expect 2 reg operands");
  1426. bool ThreeReg = NumOps > 2 && OpInfo[2].RegClass == ARM::GPRRegClassID;
  1427. // Sanity check the registers, which should not be 15.
  1428. if (decodeRd(insn) == 15 || decodeRm(insn) == 15)
  1429. return false;
  1430. if (ThreeReg && decodeRn(insn) == 15)
  1431. return false;
  1432. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1433. decodeRd(insn))));
  1434. ++OpIdx;
  1435. if (ThreeReg) {
  1436. assert(NumOps >= 4 && "Expect >= 4 operands");
  1437. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1438. decodeRn(insn))));
  1439. ++OpIdx;
  1440. }
  1441. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1442. decodeRm(insn))));
  1443. ++OpIdx;
  1444. // If there is still an operand info left which is an immediate operand, add
  1445. // an additional imm5 LSL/ASR operand.
  1446. if (ThreeReg && OpInfo[OpIdx].RegClass < 0
  1447. && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
  1448. // Extract the 5-bit immediate field Inst{11-7}.
  1449. unsigned ShiftAmt = (insn >> ARMII::ShiftShift) & 0x1F;
  1450. ARM_AM::ShiftOpc Opc = ARM_AM::no_shift;
  1451. if (Opcode == ARM::PKHBT)
  1452. Opc = ARM_AM::lsl;
  1453. else if (Opcode == ARM::PKHTB)
  1454. Opc = ARM_AM::asr;
  1455. getImmShiftSE(Opc, ShiftAmt);
  1456. MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(Opc, ShiftAmt)));
  1457. ++OpIdx;
  1458. }
  1459. return true;
  1460. }
  1461. /// DisassembleSatFrm - Disassemble saturate instructions:
  1462. /// SSAT, SSAT16, USAT, and USAT16.
  1463. static bool DisassembleSatFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
  1464. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  1465. // A8.6.183 SSAT
  1466. // if d == 15 || n == 15 then UNPREDICTABLE;
  1467. if (decodeRd(insn) == 15 || decodeRm(insn) == 15)
  1468. return false;
  1469. const MCInstrDesc &MCID = ARMInsts[Opcode];
  1470. NumOpsAdded = MCID.getNumOperands() - 2; // ignore predicate operands
  1471. // Disassemble register def.
  1472. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1473. decodeRd(insn))));
  1474. unsigned Pos = slice(insn, 20, 16);
  1475. if (Opcode == ARM::SSAT || Opcode == ARM::SSAT16)
  1476. Pos += 1;
  1477. MI.addOperand(MCOperand::CreateImm(Pos));
  1478. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1479. decodeRm(insn))));
  1480. if (NumOpsAdded == 4) {
  1481. ARM_AM::ShiftOpc Opc = (slice(insn, 6, 6) != 0 ? ARM_AM::asr : ARM_AM::lsl);
  1482. // Inst{11-7} encodes the imm5 shift amount.
  1483. unsigned ShAmt = slice(insn, 11, 7);
  1484. if (ShAmt == 0) {
  1485. // A8.6.183. Possible ASR shift amount of 32...
  1486. if (Opc == ARM_AM::asr)
  1487. ShAmt = 32;
  1488. else
  1489. Opc = ARM_AM::no_shift;
  1490. }
  1491. MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(Opc, ShAmt)));
  1492. }
  1493. return true;
  1494. }
  1495. // Extend instructions.
  1496. // SXT* and UXT*: Rd [Rn] Rm [rot_imm].
  1497. // The 2nd operand register is Rn and the 3rd operand regsiter is Rm for the
  1498. // three register operand form. Otherwise, Rn=0b1111 and only Rm is used.
  1499. static bool DisassembleExtFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
  1500. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  1501. // A8.6.220 SXTAB
  1502. // if d == 15 || m == 15 then UNPREDICTABLE;
  1503. if (decodeRd(insn) == 15 || decodeRm(insn) == 15)
  1504. return false;
  1505. const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
  1506. unsigned &OpIdx = NumOpsAdded;
  1507. OpIdx = 0;
  1508. assert(NumOps >= 2
  1509. && OpInfo[0].RegClass == ARM::GPRRegClassID
  1510. && OpInfo[1].RegClass == ARM::GPRRegClassID
  1511. && "Expect 2 reg operands");
  1512. bool ThreeReg = NumOps > 2 && OpInfo[2].RegClass == ARM::GPRRegClassID;
  1513. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1514. decodeRd(insn))));
  1515. ++OpIdx;
  1516. if (ThreeReg) {
  1517. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1518. decodeRn(insn))));
  1519. ++OpIdx;
  1520. }
  1521. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1522. decodeRm(insn))));
  1523. ++OpIdx;
  1524. // If there is still an operand info left which is an immediate operand, add
  1525. // an additional rotate immediate operand.
  1526. if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
  1527. && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
  1528. // Extract the 2-bit rotate field Inst{11-10}.
  1529. unsigned rot = (insn >> ARMII::ExtRotImmShift) & 3;
  1530. // Rotation by 8, 16, or 24 bits.
  1531. MI.addOperand(MCOperand::CreateImm(rot << 3));
  1532. ++OpIdx;
  1533. }
  1534. return true;
  1535. }
  1536. /////////////////////////////////////
  1537. // //
  1538. // Utility Functions For VFP //
  1539. // //
  1540. /////////////////////////////////////
  1541. // Extract/Decode Dd/Sd:
  1542. //
  1543. // SP => d = UInt(Vd:D)
  1544. // DP => d = UInt(D:Vd)
  1545. static unsigned decodeVFPRd(uint32_t insn, bool isSPVFP) {
  1546. return isSPVFP ? (decodeRd(insn) << 1 | getDBit(insn))
  1547. : (decodeRd(insn) | getDBit(insn) << 4);
  1548. }
  1549. // Extract/Decode Dn/Sn:
  1550. //
  1551. // SP => n = UInt(Vn:N)
  1552. // DP => n = UInt(N:Vn)
  1553. static unsigned decodeVFPRn(uint32_t insn, bool isSPVFP) {
  1554. return isSPVFP ? (decodeRn(insn) << 1 | getNBit(insn))
  1555. : (decodeRn(insn) | getNBit(insn) << 4);
  1556. }
  1557. // Extract/Decode Dm/Sm:
  1558. //
  1559. // SP => m = UInt(Vm:M)
  1560. // DP => m = UInt(M:Vm)
  1561. static unsigned decodeVFPRm(uint32_t insn, bool isSPVFP) {
  1562. return isSPVFP ? (decodeRm(insn) << 1 | getMBit(insn))
  1563. : (decodeRm(insn) | getMBit(insn) << 4);
  1564. }
  1565. // A7.5.1
  1566. static APInt VFPExpandImm(unsigned char byte, unsigned N) {
  1567. assert(N == 32 || N == 64);
  1568. uint64_t Result;
  1569. unsigned bit6 = slice(byte, 6, 6);
  1570. if (N == 32) {
  1571. Result = slice(byte, 7, 7) << 31 | slice(byte, 5, 0) << 19;
  1572. if (bit6)
  1573. Result |= 0x1f << 25;
  1574. else
  1575. Result |= 0x1 << 30;
  1576. } else {
  1577. Result = (uint64_t)slice(byte, 7, 7) << 63 |
  1578. (uint64_t)slice(byte, 5, 0) << 48;
  1579. if (bit6)
  1580. Result |= 0xffULL << 54;
  1581. else
  1582. Result |= 0x1ULL << 62;
  1583. }
  1584. return APInt(N, Result);
  1585. }
  1586. // VFP Unary Format Instructions:
  1587. //
  1588. // VCMP[E]ZD, VCMP[E]ZS: compares one floating-point register with zero
  1589. // VCVTDS, VCVTSD: converts between double-precision and single-precision
  1590. // The rest of the instructions have homogeneous [VFP]Rd and [VFP]Rm registers.
  1591. static bool DisassembleVFPUnaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
  1592. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  1593. assert(NumOps >= 1 && "VFPUnaryFrm expects NumOps >= 1");
  1594. const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
  1595. unsigned &OpIdx = NumOpsAdded;
  1596. OpIdx = 0;
  1597. unsigned RegClass = OpInfo[OpIdx].RegClass;
  1598. assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
  1599. "Reg operand expected");
  1600. bool isSP = (RegClass == ARM::SPRRegClassID);
  1601. MI.addOperand(MCOperand::CreateReg(
  1602. getRegisterEnum(B, RegClass, decodeVFPRd(insn, isSP))));
  1603. ++OpIdx;
  1604. // Early return for compare with zero instructions.
  1605. if (Opcode == ARM::VCMPEZD || Opcode == ARM::VCMPEZS
  1606. || Opcode == ARM::VCMPZD || Opcode == ARM::VCMPZS)
  1607. return true;
  1608. RegClass = OpInfo[OpIdx].RegClass;
  1609. assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
  1610. "Reg operand expected");
  1611. isSP = (RegClass == ARM::SPRRegClassID);
  1612. MI.addOperand(MCOperand::CreateReg(
  1613. getRegisterEnum(B, RegClass, decodeVFPRm(insn, isSP))));
  1614. ++OpIdx;
  1615. return true;
  1616. }
  1617. // All the instructions have homogeneous [VFP]Rd, [VFP]Rn, and [VFP]Rm regs.
  1618. // Some of them have operand constraints which tie the first operand in the
  1619. // InOperandList to that of the dst. As far as asm printing is concerned, this
  1620. // tied_to operand is simply skipped.
  1621. static bool DisassembleVFPBinaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
  1622. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  1623. assert(NumOps >= 3 && "VFPBinaryFrm expects NumOps >= 3");
  1624. const MCInstrDesc &MCID = ARMInsts[Opcode];
  1625. const MCOperandInfo *OpInfo = MCID.OpInfo;
  1626. unsigned &OpIdx = NumOpsAdded;
  1627. OpIdx = 0;
  1628. unsigned RegClass = OpInfo[OpIdx].RegClass;
  1629. assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
  1630. "Reg operand expected");
  1631. bool isSP = (RegClass == ARM::SPRRegClassID);
  1632. MI.addOperand(MCOperand::CreateReg(
  1633. getRegisterEnum(B, RegClass, decodeVFPRd(insn, isSP))));
  1634. ++OpIdx;
  1635. // Skip tied_to operand constraint.
  1636. if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) {
  1637. assert(NumOps >= 4 && "Expect >=4 operands");
  1638. MI.addOperand(MCOperand::CreateReg(0));
  1639. ++OpIdx;
  1640. }
  1641. MI.addOperand(MCOperand::CreateReg(
  1642. getRegisterEnum(B, RegClass, decodeVFPRn(insn, isSP))));
  1643. ++OpIdx;
  1644. MI.addOperand(MCOperand::CreateReg(
  1645. getRegisterEnum(B, RegClass, decodeVFPRm(insn, isSP))));
  1646. ++OpIdx;
  1647. return true;
  1648. }
  1649. // A8.6.295 vcvt (floating-point <-> integer)
  1650. // Int to FP: VSITOD, VSITOS, VUITOD, VUITOS
  1651. // FP to Int: VTOSI[Z|R]D, VTOSI[Z|R]S, VTOUI[Z|R]D, VTOUI[Z|R]S
  1652. //
  1653. // A8.6.297 vcvt (floating-point and fixed-point)
  1654. // Dd|Sd Dd|Sd(TIED_TO) #fbits(= 16|32 - UInt(imm4:i))
  1655. static bool DisassembleVFPConv1Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
  1656. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  1657. assert(NumOps >= 2 && "VFPConv1Frm expects NumOps >= 2");
  1658. const MCInstrDesc &MCID = ARMInsts[Opcode];
  1659. const MCOperandInfo *OpInfo = MCID.OpInfo;
  1660. if (!OpInfo) return false;
  1661. bool SP = slice(insn, 8, 8) == 0; // A8.6.295 & A8.6.297
  1662. bool fixed_point = slice(insn, 17, 17) == 1; // A8.6.297
  1663. unsigned RegClassID = SP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
  1664. if (fixed_point) {
  1665. // A8.6.297
  1666. assert(NumOps >= 3 && "Expect >= 3 operands");
  1667. int size = slice(insn, 7, 7) == 0 ? 16 : 32;
  1668. int fbits = size - (slice(insn,3,0) << 1 | slice(insn,5,5));
  1669. MI.addOperand(MCOperand::CreateReg(
  1670. getRegisterEnum(B, RegClassID,
  1671. decodeVFPRd(insn, SP))));
  1672. assert(MCID.getOperandConstraint(1, MCOI::TIED_TO) != -1 &&
  1673. "Tied to operand expected");
  1674. MI.addOperand(MI.getOperand(0));
  1675. assert(OpInfo[2].RegClass < 0 && !OpInfo[2].isPredicate() &&
  1676. !OpInfo[2].isOptionalDef() && "Imm operand expected");
  1677. MI.addOperand(MCOperand::CreateImm(fbits));
  1678. NumOpsAdded = 3;
  1679. } else {
  1680. // A8.6.295
  1681. // The Rd (destination) and Rm (source) bits have different interpretations
  1682. // depending on their single-precisonness.
  1683. unsigned d, m;
  1684. if (slice(insn, 18, 18) == 1) { // to_integer operation
  1685. d = decodeVFPRd(insn, true /* Is Single Precision */);
  1686. MI.addOperand(MCOperand::CreateReg(
  1687. getRegisterEnum(B, ARM::SPRRegClassID, d)));
  1688. m = decodeVFPRm(insn, SP);
  1689. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID, m)));
  1690. } else {
  1691. d = decodeVFPRd(insn, SP);
  1692. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID, d)));
  1693. m = decodeVFPRm(insn, true /* Is Single Precision */);
  1694. MI.addOperand(MCOperand::CreateReg(
  1695. getRegisterEnum(B, ARM::SPRRegClassID, m)));
  1696. }
  1697. NumOpsAdded = 2;
  1698. }
  1699. return true;
  1700. }
  1701. // VMOVRS - A8.6.330
  1702. // Rt => Rd; Sn => UInt(Vn:N)
  1703. static bool DisassembleVFPConv2Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
  1704. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  1705. assert(NumOps >= 2 && "VFPConv2Frm expects NumOps >= 2");
  1706. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1707. decodeRd(insn))));
  1708. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
  1709. decodeVFPRn(insn, true))));
  1710. NumOpsAdded = 2;
  1711. return true;
  1712. }
  1713. // VMOVRRD - A8.6.332
  1714. // Rt => Rd; Rt2 => Rn; Dm => UInt(M:Vm)
  1715. //
  1716. // VMOVRRS - A8.6.331
  1717. // Rt => Rd; Rt2 => Rn; Sm => UInt(Vm:M); Sm1 = Sm+1
  1718. static bool DisassembleVFPConv3Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
  1719. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  1720. assert(NumOps >= 3 && "VFPConv3Frm expects NumOps >= 3");
  1721. const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
  1722. unsigned &OpIdx = NumOpsAdded;
  1723. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1724. decodeRd(insn))));
  1725. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1726. decodeRn(insn))));
  1727. OpIdx = 2;
  1728. if (OpInfo[OpIdx].RegClass == ARM::SPRRegClassID) {
  1729. unsigned Sm = decodeVFPRm(insn, true);
  1730. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
  1731. Sm)));
  1732. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
  1733. Sm+1)));
  1734. OpIdx += 2;
  1735. } else {
  1736. MI.addOperand(MCOperand::CreateReg(
  1737. getRegisterEnum(B, ARM::DPRRegClassID,
  1738. decodeVFPRm(insn, false))));
  1739. ++OpIdx;
  1740. }
  1741. return true;
  1742. }
  1743. // VMOVSR - A8.6.330
  1744. // Rt => Rd; Sn => UInt(Vn:N)
  1745. static bool DisassembleVFPConv4Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
  1746. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  1747. assert(NumOps >= 2 && "VFPConv4Frm expects NumOps >= 2");
  1748. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
  1749. decodeVFPRn(insn, true))));
  1750. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1751. decodeRd(insn))));
  1752. NumOpsAdded = 2;
  1753. return true;
  1754. }
  1755. // VMOVDRR - A8.6.332
  1756. // Rt => Rd; Rt2 => Rn; Dm => UInt(M:Vm)
  1757. //
  1758. // VMOVRRS - A8.6.331
  1759. // Rt => Rd; Rt2 => Rn; Sm => UInt(Vm:M); Sm1 = Sm+1
  1760. static bool DisassembleVFPConv5Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
  1761. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  1762. assert(NumOps >= 3 && "VFPConv5Frm expects NumOps >= 3");
  1763. const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
  1764. unsigned &OpIdx = NumOpsAdded;
  1765. OpIdx = 0;
  1766. if (OpInfo[OpIdx].RegClass == ARM::SPRRegClassID) {
  1767. unsigned Sm = decodeVFPRm(insn, true);
  1768. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
  1769. Sm)));
  1770. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
  1771. Sm+1)));
  1772. OpIdx += 2;
  1773. } else {
  1774. MI.addOperand(MCOperand::CreateReg(
  1775. getRegisterEnum(B, ARM::DPRRegClassID,
  1776. decodeVFPRm(insn, false))));
  1777. ++OpIdx;
  1778. }
  1779. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1780. decodeRd(insn))));
  1781. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  1782. decodeRn(insn))));
  1783. OpIdx += 2;
  1784. return true;
  1785. }
  1786. // VFP Load/Store Instructions.
  1787. // VLDRD, VLDRS, VSTRD, VSTRS
  1788. static bool DisassembleVFPLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
  1789. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  1790. assert(NumOps >= 3 && "VFPLdStFrm expects NumOps >= 3");
  1791. bool isSPVFP = (Opcode == ARM::VLDRS || Opcode == ARM::VSTRS);
  1792. unsigned RegClassID = isSPVFP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
  1793. // Extract Dd/Sd for operand 0.
  1794. unsigned RegD = decodeVFPRd(insn, isSPVFP);
  1795. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID, RegD)));
  1796. unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
  1797. MI.addOperand(MCOperand::CreateReg(Base));
  1798. // Next comes the AM5 Opcode.
  1799. ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
  1800. unsigned char Imm8 = insn & 0xFF;
  1801. MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM5Opc(AddrOpcode, Imm8)));
  1802. NumOpsAdded = 3;
  1803. return true;
  1804. }
  1805. // VFP Load/Store Multiple Instructions.
  1806. // We have an optional write back reg, the base, and two predicate operands.
  1807. // It is then followed by a reglist of either DPR(s) or SPR(s).
  1808. //
  1809. // VLDMD[_UPD], VLDMS[_UPD], VSTMD[_UPD], VSTMS[_UPD]
  1810. static bool DisassembleVFPLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
  1811. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  1812. assert(NumOps >= 4 && "VFPLdStMulFrm expects NumOps >= 4");
  1813. unsigned &OpIdx = NumOpsAdded;
  1814. OpIdx = 0;
  1815. unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
  1816. // Writeback to base, if necessary.
  1817. if (Opcode == ARM::VLDMDIA_UPD || Opcode == ARM::VLDMSIA_UPD ||
  1818. Opcode == ARM::VLDMDDB_UPD || Opcode == ARM::VLDMSDB_UPD ||
  1819. Opcode == ARM::VSTMDIA_UPD || Opcode == ARM::VSTMSIA_UPD ||
  1820. Opcode == ARM::VSTMDDB_UPD || Opcode == ARM::VSTMSDB_UPD) {
  1821. MI.addOperand(MCOperand::CreateReg(Base));
  1822. ++OpIdx;
  1823. }
  1824. MI.addOperand(MCOperand::CreateReg(Base));
  1825. // Handling the two predicate operands before the reglist.
  1826. int64_t CondVal = getCondField(insn);
  1827. if (CondVal == 0xF)
  1828. return false;
  1829. MI.addOperand(MCOperand::CreateImm(CondVal));
  1830. MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
  1831. OpIdx += 3;
  1832. bool isSPVFP = (Opcode == ARM::VLDMSIA ||
  1833. Opcode == ARM::VLDMSIA_UPD || Opcode == ARM::VLDMSDB_UPD ||
  1834. Opcode == ARM::VSTMSIA ||
  1835. Opcode == ARM::VSTMSIA_UPD || Opcode == ARM::VSTMSDB_UPD);
  1836. unsigned RegClassID = isSPVFP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
  1837. // Extract Dd/Sd.
  1838. unsigned RegD = decodeVFPRd(insn, isSPVFP);
  1839. // Fill the variadic part of reglist.
  1840. unsigned char Imm8 = insn & 0xFF;
  1841. unsigned Regs = isSPVFP ? Imm8 : Imm8/2;
  1842. // Apply some sanity checks before proceeding.
  1843. if (Regs == 0 || (RegD + Regs) > 32 || (!isSPVFP && Regs > 16))
  1844. return false;
  1845. for (unsigned i = 0; i < Regs; ++i) {
  1846. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID,
  1847. RegD + i)));
  1848. ++OpIdx;
  1849. }
  1850. return true;
  1851. }
  1852. // Misc. VFP Instructions.
  1853. // FMSTAT (vmrs with Rt=0b1111, i.e., to apsr_nzcv and no register operand)
  1854. // FCONSTD (DPR and a VFPf64Imm operand)
  1855. // FCONSTS (SPR and a VFPf32Imm operand)
  1856. // VMRS/VMSR (GPR operand)
  1857. static bool DisassembleVFPMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
  1858. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  1859. const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
  1860. unsigned &OpIdx = NumOpsAdded;
  1861. OpIdx = 0;
  1862. if (Opcode == ARM::FMSTAT)
  1863. return true;
  1864. assert(NumOps >= 2 && "VFPMiscFrm expects >=2 operands");
  1865. unsigned RegEnum = 0;
  1866. switch (OpInfo[0].RegClass) {
  1867. case ARM::DPRRegClassID:
  1868. RegEnum = getRegisterEnum(B, ARM::DPRRegClassID, decodeVFPRd(insn, false));
  1869. break;
  1870. case ARM::SPRRegClassID:
  1871. RegEnum = getRegisterEnum(B, ARM::SPRRegClassID, decodeVFPRd(insn, true));
  1872. break;
  1873. case ARM::GPRRegClassID:
  1874. RegEnum = getRegisterEnum(B, ARM::GPRRegClassID, decodeRd(insn));
  1875. break;
  1876. default:
  1877. assert(0 && "Invalid reg class id");
  1878. return false;
  1879. }
  1880. MI.addOperand(MCOperand::CreateReg(RegEnum));
  1881. ++OpIdx;
  1882. // Extract/decode the f64/f32 immediate.
  1883. if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
  1884. && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
  1885. // The asm syntax specifies the floating point value, not the 8-bit literal.
  1886. APInt immRaw = VFPExpandImm(slice(insn,19,16) << 4 | slice(insn, 3, 0),
  1887. Opcode == ARM::FCONSTD ? 64 : 32);
  1888. APFloat immFP = APFloat(immRaw, true);
  1889. double imm = Opcode == ARM::FCONSTD ? immFP.convertToDouble() :
  1890. immFP.convertToFloat();
  1891. MI.addOperand(MCOperand::CreateFPImm(imm));
  1892. ++OpIdx;
  1893. }
  1894. return true;
  1895. }
  1896. // DisassembleThumbFrm() is defined in ThumbDisassemblerCore.h file.
  1897. #include "ThumbDisassemblerCore.h"
  1898. /////////////////////////////////////////////////////
  1899. // //
  1900. // Utility Functions For ARM Advanced SIMD //
  1901. // //
  1902. /////////////////////////////////////////////////////
  1903. // The following NEON namings are based on A8.6.266 VABA, VABAL. Notice that
  1904. // A8.6.303 VDUP (ARM core register)'s D/Vd pair is the N/Vn pair of VABA/VABAL.
  1905. // A7.3 Register encoding
  1906. // Extract/Decode NEON D/Vd:
  1907. //
  1908. // Note that for quadword, Qd = UInt(D:Vd<3:1>) = Inst{22:15-13}, whereas for
  1909. // doubleword, Dd = UInt(D:Vd). We compensate for this difference by
  1910. // handling it in the getRegisterEnum() utility function.
  1911. // D = Inst{22}, Vd = Inst{15-12}
  1912. static unsigned decodeNEONRd(uint32_t insn) {
  1913. return ((insn >> ARMII::NEON_D_BitShift) & 1) << 4
  1914. | ((insn >> ARMII::NEON_RegRdShift) & ARMII::NEONRegMask);
  1915. }
  1916. // Extract/Decode NEON N/Vn:
  1917. //
  1918. // Note that for quadword, Qn = UInt(N:Vn<3:1>) = Inst{7:19-17}, whereas for
  1919. // doubleword, Dn = UInt(N:Vn). We compensate for this difference by
  1920. // handling it in the getRegisterEnum() utility function.
  1921. // N = Inst{7}, Vn = Inst{19-16}
  1922. static unsigned decodeNEONRn(uint32_t insn) {
  1923. return ((insn >> ARMII::NEON_N_BitShift) & 1) << 4
  1924. | ((insn >> ARMII::NEON_RegRnShift) & ARMII::NEONRegMask);
  1925. }
  1926. // Extract/Decode NEON M/Vm:
  1927. //
  1928. // Note that for quadword, Qm = UInt(M:Vm<3:1>) = Inst{5:3-1}, whereas for
  1929. // doubleword, Dm = UInt(M:Vm). We compensate for this difference by
  1930. // handling it in the getRegisterEnum() utility function.
  1931. // M = Inst{5}, Vm = Inst{3-0}
  1932. static unsigned decodeNEONRm(uint32_t insn) {
  1933. return ((insn >> ARMII::NEON_M_BitShift) & 1) << 4
  1934. | ((insn >> ARMII::NEON_RegRmShift) & ARMII::NEONRegMask);
  1935. }
  1936. namespace {
  1937. enum ElemSize {
  1938. ESizeNA = 0,
  1939. ESize8 = 8,
  1940. ESize16 = 16,
  1941. ESize32 = 32,
  1942. ESize64 = 64
  1943. };
  1944. } // End of unnamed namespace
  1945. // size field -> Inst{11-10}
  1946. // index_align field -> Inst{7-4}
  1947. //
  1948. // The Lane Index interpretation depends on the Data Size:
  1949. // 8 (encoded as size = 0b00) -> Index = index_align[3:1]
  1950. // 16 (encoded as size = 0b01) -> Index = index_align[3:2]
  1951. // 32 (encoded as size = 0b10) -> Index = index_align[3]
  1952. //
  1953. // Ref: A8.6.317 VLD4 (single 4-element structure to one lane).
  1954. static unsigned decodeLaneIndex(uint32_t insn) {
  1955. unsigned size = insn >> 10 & 3;
  1956. assert((size == 0 || size == 1 || size == 2) &&
  1957. "Encoding error: size should be either 0, 1, or 2");
  1958. unsigned index_align = insn >> 4 & 0xF;
  1959. return (index_align >> 1) >> size;
  1960. }
  1961. // imm64 = AdvSIMDExpandImm(op, cmode, i:imm3:imm4)
  1962. // op = Inst{5}, cmode = Inst{11-8}
  1963. // i = Inst{24} (ARM architecture)
  1964. // imm3 = Inst{18-16}, imm4 = Inst{3-0}
  1965. // Ref: Table A7-15 Modified immediate values for Advanced SIMD instructions.
  1966. static uint64_t decodeN1VImm(uint32_t insn, ElemSize esize) {
  1967. unsigned char op = (insn >> 5) & 1;
  1968. unsigned char cmode = (insn >> 8) & 0xF;
  1969. unsigned char Imm8 = ((insn >> 24) & 1) << 7 |
  1970. ((insn >> 16) & 7) << 4 |
  1971. (insn & 0xF);
  1972. return (op << 12) | (cmode << 8) | Imm8;
  1973. }
  1974. // A8.6.339 VMUL, VMULL (by scalar)
  1975. // ESize16 => m = Inst{2-0} (Vm<2:0>) D0-D7
  1976. // ESize32 => m = Inst{3-0} (Vm<3:0>) D0-D15
  1977. static unsigned decodeRestrictedDm(uint32_t insn, ElemSize esize) {
  1978. switch (esize) {
  1979. case ESize16:
  1980. return insn & 7;
  1981. case ESize32:
  1982. return insn & 0xF;
  1983. default:
  1984. assert(0 && "Unreachable code!");
  1985. return 0;
  1986. }
  1987. }
  1988. // A8.6.339 VMUL, VMULL (by scalar)
  1989. // ESize16 => index = Inst{5:3} (M:Vm<3>) D0-D7
  1990. // ESize32 => index = Inst{5} (M) D0-D15
  1991. static unsigned decodeRestrictedDmIndex(uint32_t insn, ElemSize esize) {
  1992. switch (esize) {
  1993. case ESize16:
  1994. return (((insn >> 5) & 1) << 1) | ((insn >> 3) & 1);
  1995. case ESize32:
  1996. return (insn >> 5) & 1;
  1997. default:
  1998. assert(0 && "Unreachable code!");
  1999. return 0;
  2000. }
  2001. }
  2002. // A8.6.296 VCVT (between floating-point and fixed-point, Advanced SIMD)
  2003. // (64 - <fbits>) is encoded as imm6, i.e., Inst{21-16}.
  2004. static unsigned decodeVCVTFractionBits(uint32_t insn) {
  2005. return 64 - ((insn >> 16) & 0x3F);
  2006. }
  2007. // A8.6.302 VDUP (scalar)
  2008. // ESize8 => index = Inst{19-17}
  2009. // ESize16 => index = Inst{19-18}
  2010. // ESize32 => index = Inst{19}
  2011. static unsigned decodeNVLaneDupIndex(uint32_t insn, ElemSize esize) {
  2012. switch (esize) {
  2013. case ESize8:
  2014. return (insn >> 17) & 7;
  2015. case ESize16:
  2016. return (insn >> 18) & 3;
  2017. case ESize32:
  2018. return (insn >> 19) & 1;
  2019. default:
  2020. assert(0 && "Unspecified element size!");
  2021. return 0;
  2022. }
  2023. }
  2024. // A8.6.328 VMOV (ARM core register to scalar)
  2025. // A8.6.329 VMOV (scalar to ARM core register)
  2026. // ESize8 => index = Inst{21:6-5}
  2027. // ESize16 => index = Inst{21:6}
  2028. // ESize32 => index = Inst{21}
  2029. static unsigned decodeNVLaneOpIndex(uint32_t insn, ElemSize esize) {
  2030. switch (esize) {
  2031. case ESize8:
  2032. return ((insn >> 21) & 1) << 2 | ((insn >> 5) & 3);
  2033. case ESize16:
  2034. return ((insn >> 21) & 1) << 1 | ((insn >> 6) & 1);
  2035. case ESize32:
  2036. return ((insn >> 21) & 1);
  2037. default:
  2038. assert(0 && "Unspecified element size!");
  2039. return 0;
  2040. }
  2041. }
  2042. // Imm6 = Inst{21-16}, L = Inst{7}
  2043. //
  2044. // LeftShift == true (A8.6.367 VQSHL, A8.6.387 VSLI):
  2045. // case L:imm6 of
  2046. // '0001xxx' => esize = 8; shift_amount = imm6 - 8
  2047. // '001xxxx' => esize = 16; shift_amount = imm6 - 16
  2048. // '01xxxxx' => esize = 32; shift_amount = imm6 - 32
  2049. // '1xxxxxx' => esize = 64; shift_amount = imm6
  2050. //
  2051. // LeftShift == false (A8.6.376 VRSHR, A8.6.368 VQSHRN):
  2052. // case L:imm6 of
  2053. // '0001xxx' => esize = 8; shift_amount = 16 - imm6
  2054. // '001xxxx' => esize = 16; shift_amount = 32 - imm6
  2055. // '01xxxxx' => esize = 32; shift_amount = 64 - imm6
  2056. // '1xxxxxx' => esize = 64; shift_amount = 64 - imm6
  2057. //
  2058. static unsigned decodeNVSAmt(uint32_t insn, bool LeftShift) {
  2059. ElemSize esize = ESizeNA;
  2060. unsigned L = (insn >> 7) & 1;
  2061. unsigned imm6 = (insn >> 16) & 0x3F;
  2062. if (L == 0) {
  2063. if (imm6 >> 3 == 1)
  2064. esize = ESize8;
  2065. else if (imm6 >> 4 == 1)
  2066. esize = ESize16;
  2067. else if (imm6 >> 5 == 1)
  2068. esize = ESize32;
  2069. else
  2070. assert(0 && "Wrong encoding of Inst{7:21-16}!");
  2071. } else
  2072. esize = ESize64;
  2073. if (LeftShift)
  2074. return esize == ESize64 ? imm6 : (imm6 - esize);
  2075. else
  2076. return esize == ESize64 ? (esize - imm6) : (2*esize - imm6);
  2077. }
  2078. // A8.6.305 VEXT
  2079. // Imm4 = Inst{11-8}
  2080. static unsigned decodeN3VImm(uint32_t insn) {
  2081. return (insn >> 8) & 0xF;
  2082. }
  2083. // VLD*
  2084. // D[d] D[d2] ... Rn [TIED_TO Rn] align [Rm]
  2085. // VLD*LN*
  2086. // D[d] D[d2] ... Rn [TIED_TO Rn] align [Rm] TIED_TO ... imm(idx)
  2087. // VST*
  2088. // Rn [TIED_TO Rn] align [Rm] D[d] D[d2] ...
  2089. // VST*LN*
  2090. // Rn [TIED_TO Rn] align [Rm] D[d] D[d2] ... [imm(idx)]
  2091. //
  2092. // Correctly set VLD*/VST*'s TIED_TO GPR, as the asm printer needs it.
  2093. static bool DisassembleNLdSt0(MCInst &MI, unsigned Opcode, uint32_t insn,
  2094. unsigned short NumOps, unsigned &NumOpsAdded, bool Store, bool DblSpaced,
  2095. unsigned alignment, BO B) {
  2096. const MCInstrDesc &MCID = ARMInsts[Opcode];
  2097. const MCOperandInfo *OpInfo = MCID.OpInfo;
  2098. // At least one DPR register plus addressing mode #6.
  2099. assert(NumOps >= 3 && "Expect >= 3 operands");
  2100. unsigned &OpIdx = NumOpsAdded;
  2101. OpIdx = 0;
  2102. // We have homogeneous NEON registers for Load/Store.
  2103. unsigned RegClass = 0;
  2104. // Double-spaced registers have increments of 2.
  2105. unsigned Inc = DblSpaced ? 2 : 1;
  2106. unsigned Rn = decodeRn(insn);
  2107. unsigned Rm = decodeRm(insn);
  2108. unsigned Rd = decodeNEONRd(insn);
  2109. // A7.7.1 Advanced SIMD addressing mode.
  2110. bool WB = Rm != 15;
  2111. // LLVM Addressing Mode #6.
  2112. unsigned RmEnum = 0;
  2113. if (WB && Rm != 13)
  2114. RmEnum = getRegisterEnum(B, ARM::GPRRegClassID, Rm);
  2115. if (Store) {
  2116. // Consume possible WB, AddrMode6, possible increment reg, the DPR/QPR's,
  2117. // then possible lane index.
  2118. assert(OpIdx < NumOps && OpInfo[0].RegClass == ARM::GPRRegClassID &&
  2119. "Reg operand expected");
  2120. if (WB) {
  2121. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  2122. Rn)));
  2123. ++OpIdx;
  2124. }
  2125. assert((OpIdx+1) < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
  2126. OpInfo[OpIdx + 1].RegClass < 0 && "Addrmode #6 Operands expected");
  2127. // addrmode6 := (ops GPR:$addr, i32imm)
  2128. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  2129. Rn)));
  2130. MI.addOperand(MCOperand::CreateImm(alignment)); // Alignment
  2131. OpIdx += 2;
  2132. if (WB) {
  2133. MI.addOperand(MCOperand::CreateReg(RmEnum));
  2134. ++OpIdx;
  2135. }
  2136. assert(OpIdx < NumOps &&
  2137. (OpInfo[OpIdx].RegClass == ARM::DPRRegClassID ||
  2138. OpInfo[OpIdx].RegClass == ARM::QPRRegClassID) &&
  2139. "Reg operand expected");
  2140. RegClass = OpInfo[OpIdx].RegClass;
  2141. while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
  2142. MI.addOperand(MCOperand::CreateReg(
  2143. getRegisterEnum(B, RegClass, Rd)));
  2144. Rd += Inc;
  2145. ++OpIdx;
  2146. }
  2147. // Handle possible lane index.
  2148. if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
  2149. && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
  2150. MI.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn)));
  2151. ++OpIdx;
  2152. }
  2153. } else {
  2154. // Consume the DPR/QPR's, possible WB, AddrMode6, possible incrment reg,
  2155. // possible TIED_TO DPR/QPR's (ignored), then possible lane index.
  2156. RegClass = OpInfo[0].RegClass;
  2157. while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
  2158. MI.addOperand(MCOperand::CreateReg(
  2159. getRegisterEnum(B, RegClass, Rd)));
  2160. Rd += Inc;
  2161. ++OpIdx;
  2162. }
  2163. if (WB) {
  2164. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  2165. Rn)));
  2166. ++OpIdx;
  2167. }
  2168. assert((OpIdx+1) < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
  2169. OpInfo[OpIdx + 1].RegClass < 0 && "Addrmode #6 Operands expected");
  2170. // addrmode6 := (ops GPR:$addr, i32imm)
  2171. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  2172. Rn)));
  2173. MI.addOperand(MCOperand::CreateImm(alignment)); // Alignment
  2174. OpIdx += 2;
  2175. if (WB) {
  2176. MI.addOperand(MCOperand::CreateReg(RmEnum));
  2177. ++OpIdx;
  2178. }
  2179. while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
  2180. assert(MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1 &&
  2181. "Tied to operand expected");
  2182. MI.addOperand(MCOperand::CreateReg(0));
  2183. ++OpIdx;
  2184. }
  2185. // Handle possible lane index.
  2186. if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
  2187. && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
  2188. MI.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn)));
  2189. ++OpIdx;
  2190. }
  2191. }
  2192. // Accessing registers past the end of the NEON register file is not
  2193. // defined.
  2194. if (Rd > 32)
  2195. return false;
  2196. return true;
  2197. }
  2198. // A8.6.308, A8.6.311, A8.6.314, A8.6.317.
  2199. static bool Align4OneLaneInst(unsigned elem, unsigned size,
  2200. unsigned index_align, unsigned & alignment) {
  2201. unsigned bits = 0;
  2202. switch (elem) {
  2203. default:
  2204. return false;
  2205. case 1:
  2206. // A8.6.308
  2207. if (size == 0)
  2208. return slice(index_align, 0, 0) == 0;
  2209. else if (size == 1) {
  2210. bits = slice(index_align, 1, 0);
  2211. if (bits != 0 && bits != 1)
  2212. return false;
  2213. if (bits == 1)
  2214. alignment = 16;
  2215. return true;
  2216. } else if (size == 2) {
  2217. bits = slice(index_align, 2, 0);
  2218. if (bits != 0 && bits != 3)
  2219. return false;
  2220. if (bits == 3)
  2221. alignment = 32;
  2222. return true;;
  2223. }
  2224. return true;
  2225. case 2:
  2226. // A8.6.311
  2227. if (size == 0) {
  2228. if (slice(index_align, 0, 0) == 1)
  2229. alignment = 16;
  2230. return true;
  2231. } if (size == 1) {
  2232. if (slice(index_align, 0, 0) == 1)
  2233. alignment = 32;
  2234. return true;
  2235. } else if (size == 2) {
  2236. if (slice(index_align, 1, 1) != 0)
  2237. return false;
  2238. if (slice(index_align, 0, 0) == 1)
  2239. alignment = 64;
  2240. return true;;
  2241. }
  2242. return true;
  2243. case 3:
  2244. // A8.6.314
  2245. if (size == 0) {
  2246. if (slice(index_align, 0, 0) != 0)
  2247. return false;
  2248. return true;
  2249. } if (size == 1) {
  2250. if (slice(index_align, 0, 0) != 0)
  2251. return false;
  2252. return true;
  2253. return true;
  2254. } else if (size == 2) {
  2255. if (slice(index_align, 1, 0) != 0)
  2256. return false;
  2257. return true;;
  2258. }
  2259. return true;
  2260. case 4:
  2261. // A8.6.317
  2262. if (size == 0) {
  2263. if (slice(index_align, 0, 0) == 1)
  2264. alignment = 32;
  2265. return true;
  2266. } if (size == 1) {
  2267. if (slice(index_align, 0, 0) == 1)
  2268. alignment = 64;
  2269. return true;
  2270. } else if (size == 2) {
  2271. bits = slice(index_align, 1, 0);
  2272. if (bits == 3)
  2273. return false;
  2274. if (bits == 1)
  2275. alignment = 64;
  2276. else if (bits == 2)
  2277. alignment = 128;
  2278. return true;;
  2279. }
  2280. return true;
  2281. }
  2282. }
  2283. // A7.7
  2284. // If L (Inst{21}) == 0, store instructions.
  2285. // Find out about double-spaced-ness of the Opcode and pass it on to
  2286. // DisassembleNLdSt0().
  2287. static bool DisassembleNLdSt(MCInst &MI, unsigned Opcode, uint32_t insn,
  2288. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  2289. const StringRef Name = ARMInsts[Opcode].Name;
  2290. bool DblSpaced = false;
  2291. // 0 represents standard alignment, i.e., unaligned data access.
  2292. unsigned alignment = 0;
  2293. unsigned elem = 0; // legal values: {1, 2, 3, 4}
  2294. if (Name.startswith("VST1") || Name.startswith("VLD1"))
  2295. elem = 1;
  2296. if (Name.startswith("VST2") || Name.startswith("VLD2"))
  2297. elem = 2;
  2298. if (Name.startswith("VST3") || Name.startswith("VLD3"))
  2299. elem = 3;
  2300. if (Name.startswith("VST4") || Name.startswith("VLD4"))
  2301. elem = 4;
  2302. if (Name.find("LN") != std::string::npos) {
  2303. // To one lane instructions.
  2304. // See, for example, 8.6.317 VLD4 (single 4-element structure to one lane).
  2305. // Utility function takes number of elements, size, and index_align.
  2306. if (!Align4OneLaneInst(elem,
  2307. slice(insn, 11, 10),
  2308. slice(insn, 7, 4),
  2309. alignment))
  2310. return false;
  2311. // <size> == 16 && Inst{5} == 1 --> DblSpaced = true
  2312. if (Name.endswith("16") || Name.endswith("16_UPD"))
  2313. DblSpaced = slice(insn, 5, 5) == 1;
  2314. // <size> == 32 && Inst{6} == 1 --> DblSpaced = true
  2315. if (Name.endswith("32") || Name.endswith("32_UPD"))
  2316. DblSpaced = slice(insn, 6, 6) == 1;
  2317. } else if (Name.find("DUP") != std::string::npos) {
  2318. // Single element (or structure) to all lanes.
  2319. // Inst{9-8} encodes the number of element(s) in the structure, with:
  2320. // 0b00 (VLD1DUP) (for this, a bit makes sense only for data size 16 and 32.
  2321. // 0b01 (VLD2DUP)
  2322. // 0b10 (VLD3DUP) (for this, a bit must be encoded as 0)
  2323. // 0b11 (VLD4DUP)
  2324. //
  2325. // Inst{7-6} encodes the data size, with:
  2326. // 0b00 => 8, 0b01 => 16, 0b10 => 32
  2327. //
  2328. // Inst{4} (the a bit) encodes the align action (0: standard alignment)
  2329. unsigned elem = slice(insn, 9, 8) + 1;
  2330. unsigned a = slice(insn, 4, 4);
  2331. if (elem != 3) {
  2332. // 0b11 is not a valid encoding for Inst{7-6}.
  2333. if (slice(insn, 7, 6) == 3)
  2334. return false;
  2335. unsigned data_size = 8 << slice(insn, 7, 6);
  2336. // For VLD1DUP, a bit makes sense only for data size of 16 and 32.
  2337. if (a && data_size == 8)
  2338. return false;
  2339. // Now we can calculate the alignment!
  2340. if (a)
  2341. alignment = elem * data_size;
  2342. } else {
  2343. if (a) {
  2344. // A8.6.315 VLD3 (single 3-element structure to all lanes)
  2345. // The a bit must be encoded as 0.
  2346. return false;
  2347. }
  2348. }
  2349. } else {
  2350. // Multiple n-element structures with type encoded as Inst{11-8}.
  2351. // See, for example, A8.6.316 VLD4 (multiple 4-element structures).
  2352. // Inst{5-4} encodes alignment.
  2353. unsigned align = slice(insn, 5, 4);
  2354. switch (align) {
  2355. default:
  2356. break;
  2357. case 1:
  2358. alignment = 64; break;
  2359. case 2:
  2360. alignment = 128; break;
  2361. case 3:
  2362. alignment = 256; break;
  2363. }
  2364. unsigned type = slice(insn, 11, 8);
  2365. // Reject UNDEFINED instructions based on type and align.
  2366. // Plus set DblSpaced flag where appropriate.
  2367. switch (elem) {
  2368. default:
  2369. break;
  2370. case 1:
  2371. // n == 1
  2372. // A8.6.307 & A8.6.391
  2373. if ((type == 7 && slice(align, 1, 1) == 1) ||
  2374. (type == 10 && align == 3) ||
  2375. (type == 6 && slice(align, 1, 1) == 1))
  2376. return false;
  2377. break;
  2378. case 2:
  2379. // n == 2 && type == 0b1001 -> DblSpaced = true
  2380. // A8.6.310 & A8.6.393
  2381. if ((type == 8 || type == 9) && align == 3)
  2382. return false;
  2383. DblSpaced = (type == 9);
  2384. break;
  2385. case 3:
  2386. // n == 3 && type == 0b0101 -> DblSpaced = true
  2387. // A8.6.313 & A8.6.395
  2388. if (slice(insn, 7, 6) == 3 || slice(align, 1, 1) == 1)
  2389. return false;
  2390. DblSpaced = (type == 5);
  2391. break;
  2392. case 4:
  2393. // n == 4 && type == 0b0001 -> DblSpaced = true
  2394. // A8.6.316 & A8.6.397
  2395. if (slice(insn, 7, 6) == 3)
  2396. return false;
  2397. DblSpaced = (type == 1);
  2398. break;
  2399. }
  2400. }
  2401. return DisassembleNLdSt0(MI, Opcode, insn, NumOps, NumOpsAdded,
  2402. slice(insn, 21, 21) == 0, DblSpaced, alignment/8, B);
  2403. }
  2404. // VMOV (immediate)
  2405. // Qd/Dd imm
  2406. // VBIC (immediate)
  2407. // VORR (immediate)
  2408. // Qd/Dd imm src(=Qd/Dd)
  2409. static bool DisassembleN1RegModImmFrm(MCInst &MI, unsigned Opcode,
  2410. uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  2411. const MCInstrDesc &MCID = ARMInsts[Opcode];
  2412. const MCOperandInfo *OpInfo = MCID.OpInfo;
  2413. assert(NumOps >= 2 &&
  2414. (OpInfo[0].RegClass == ARM::DPRRegClassID ||
  2415. OpInfo[0].RegClass == ARM::QPRRegClassID) &&
  2416. (OpInfo[1].RegClass < 0) &&
  2417. "Expect 1 reg operand followed by 1 imm operand");
  2418. // Qd/Dd = Inst{22:15-12} => NEON Rd
  2419. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[0].RegClass,
  2420. decodeNEONRd(insn))));
  2421. ElemSize esize = ESizeNA;
  2422. switch (Opcode) {
  2423. case ARM::VMOVv8i8:
  2424. case ARM::VMOVv16i8:
  2425. esize = ESize8;
  2426. break;
  2427. case ARM::VMOVv4i16:
  2428. case ARM::VMOVv8i16:
  2429. case ARM::VMVNv4i16:
  2430. case ARM::VMVNv8i16:
  2431. case ARM::VBICiv4i16:
  2432. case ARM::VBICiv8i16:
  2433. case ARM::VORRiv4i16:
  2434. case ARM::VORRiv8i16:
  2435. esize = ESize16;
  2436. break;
  2437. case ARM::VMOVv2i32:
  2438. case ARM::VMOVv4i32:
  2439. case ARM::VMVNv2i32:
  2440. case ARM::VMVNv4i32:
  2441. case ARM::VBICiv2i32:
  2442. case ARM::VBICiv4i32:
  2443. case ARM::VORRiv2i32:
  2444. case ARM::VORRiv4i32:
  2445. esize = ESize32;
  2446. break;
  2447. case ARM::VMOVv1i64:
  2448. case ARM::VMOVv2i64:
  2449. esize = ESize64;
  2450. break;
  2451. default:
  2452. assert(0 && "Unexpected opcode!");
  2453. return false;
  2454. }
  2455. // One register and a modified immediate value.
  2456. // Add the imm operand.
  2457. MI.addOperand(MCOperand::CreateImm(decodeN1VImm(insn, esize)));
  2458. NumOpsAdded = 2;
  2459. // VBIC/VORRiv*i* variants have an extra $src = $Vd to be filled in.
  2460. if (NumOps >= 3 &&
  2461. (OpInfo[2].RegClass == ARM::DPRRegClassID ||
  2462. OpInfo[2].RegClass == ARM::QPRRegClassID)) {
  2463. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[0].RegClass,
  2464. decodeNEONRd(insn))));
  2465. NumOpsAdded += 1;
  2466. }
  2467. return true;
  2468. }
  2469. namespace {
  2470. enum N2VFlag {
  2471. N2V_None,
  2472. N2V_VectorDupLane,
  2473. N2V_VectorConvert_Between_Float_Fixed
  2474. };
  2475. } // End of unnamed namespace
  2476. // Vector Convert [between floating-point and fixed-point]
  2477. // Qd/Dd Qm/Dm [fbits]
  2478. //
  2479. // Vector Duplicate Lane (from scalar to all elements) Instructions.
  2480. // VDUPLN16d, VDUPLN16q, VDUPLN32d, VDUPLN32q, VDUPLN8d, VDUPLN8q:
  2481. // Qd/Dd Dm index
  2482. //
  2483. // Vector Move Long:
  2484. // Qd Dm
  2485. //
  2486. // Vector Move Narrow:
  2487. // Dd Qm
  2488. //
  2489. // Others
  2490. static bool DisassembleNVdVmOptImm(MCInst &MI, unsigned Opc, uint32_t insn,
  2491. unsigned short NumOps, unsigned &NumOpsAdded, N2VFlag Flag, BO B) {
  2492. const MCInstrDesc &MCID = ARMInsts[Opc];
  2493. const MCOperandInfo *OpInfo = MCID.OpInfo;
  2494. assert(NumOps >= 2 &&
  2495. (OpInfo[0].RegClass == ARM::DPRRegClassID ||
  2496. OpInfo[0].RegClass == ARM::QPRRegClassID) &&
  2497. (OpInfo[1].RegClass == ARM::DPRRegClassID ||
  2498. OpInfo[1].RegClass == ARM::QPRRegClassID) &&
  2499. "Expect >= 2 operands and first 2 as reg operands");
  2500. unsigned &OpIdx = NumOpsAdded;
  2501. OpIdx = 0;
  2502. ElemSize esize = ESizeNA;
  2503. if (Flag == N2V_VectorDupLane) {
  2504. // VDUPLN has its index embedded. Its size can be inferred from the Opcode.
  2505. assert(Opc >= ARM::VDUPLN16d && Opc <= ARM::VDUPLN8q &&
  2506. "Unexpected Opcode");
  2507. esize = (Opc == ARM::VDUPLN8d || Opc == ARM::VDUPLN8q) ? ESize8
  2508. : ((Opc == ARM::VDUPLN16d || Opc == ARM::VDUPLN16q) ? ESize16
  2509. : ESize32);
  2510. }
  2511. // Qd/Dd = Inst{22:15-12} => NEON Rd
  2512. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
  2513. decodeNEONRd(insn))));
  2514. ++OpIdx;
  2515. // VPADAL...
  2516. if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) {
  2517. // TIED_TO operand.
  2518. MI.addOperand(MCOperand::CreateReg(0));
  2519. ++OpIdx;
  2520. }
  2521. // Dm = Inst{5:3-0} => NEON Rm
  2522. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
  2523. decodeNEONRm(insn))));
  2524. ++OpIdx;
  2525. // VZIP and others have two TIED_TO reg operands.
  2526. int Idx;
  2527. while (OpIdx < NumOps &&
  2528. (Idx = MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO)) != -1) {
  2529. // Add TIED_TO operand.
  2530. MI.addOperand(MI.getOperand(Idx));
  2531. ++OpIdx;
  2532. }
  2533. // Add the imm operand, if required.
  2534. if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
  2535. && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
  2536. unsigned imm = 0xFFFFFFFF;
  2537. if (Flag == N2V_VectorDupLane)
  2538. imm = decodeNVLaneDupIndex(insn, esize);
  2539. if (Flag == N2V_VectorConvert_Between_Float_Fixed)
  2540. imm = decodeVCVTFractionBits(insn);
  2541. assert(imm != 0xFFFFFFFF && "Internal error");
  2542. MI.addOperand(MCOperand::CreateImm(imm));
  2543. ++OpIdx;
  2544. }
  2545. return true;
  2546. }
  2547. static bool DisassembleN2RegFrm(MCInst &MI, unsigned Opc, uint32_t insn,
  2548. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  2549. return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
  2550. N2V_None, B);
  2551. }
  2552. static bool DisassembleNVCVTFrm(MCInst &MI, unsigned Opc, uint32_t insn,
  2553. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  2554. return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
  2555. N2V_VectorConvert_Between_Float_Fixed, B);
  2556. }
  2557. static bool DisassembleNVecDupLnFrm(MCInst &MI, unsigned Opc, uint32_t insn,
  2558. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  2559. return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
  2560. N2V_VectorDupLane, B);
  2561. }
  2562. // Vector Shift [Accumulate] Instructions.
  2563. // Qd/Dd [Qd/Dd (TIED_TO)] Qm/Dm ShiftAmt
  2564. //
  2565. // Vector Shift Left Long (with maximum shift count) Instructions.
  2566. // VSHLLi16, VSHLLi32, VSHLLi8: Qd Dm imm (== size)
  2567. //
  2568. static bool DisassembleNVectorShift(MCInst &MI, unsigned Opcode, uint32_t insn,
  2569. unsigned short NumOps, unsigned &NumOpsAdded, bool LeftShift, BO B) {
  2570. const MCInstrDesc &MCID = ARMInsts[Opcode];
  2571. const MCOperandInfo *OpInfo = MCID.OpInfo;
  2572. assert(NumOps >= 3 &&
  2573. (OpInfo[0].RegClass == ARM::DPRRegClassID ||
  2574. OpInfo[0].RegClass == ARM::QPRRegClassID) &&
  2575. (OpInfo[1].RegClass == ARM::DPRRegClassID ||
  2576. OpInfo[1].RegClass == ARM::QPRRegClassID) &&
  2577. "Expect >= 3 operands and first 2 as reg operands");
  2578. unsigned &OpIdx = NumOpsAdded;
  2579. OpIdx = 0;
  2580. // Qd/Dd = Inst{22:15-12} => NEON Rd
  2581. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
  2582. decodeNEONRd(insn))));
  2583. ++OpIdx;
  2584. if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) {
  2585. // TIED_TO operand.
  2586. MI.addOperand(MCOperand::CreateReg(0));
  2587. ++OpIdx;
  2588. }
  2589. assert((OpInfo[OpIdx].RegClass == ARM::DPRRegClassID ||
  2590. OpInfo[OpIdx].RegClass == ARM::QPRRegClassID) &&
  2591. "Reg operand expected");
  2592. // Qm/Dm = Inst{5:3-0} => NEON Rm
  2593. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
  2594. decodeNEONRm(insn))));
  2595. ++OpIdx;
  2596. assert(OpInfo[OpIdx].RegClass < 0 && "Imm operand expected");
  2597. // Add the imm operand.
  2598. // VSHLL has maximum shift count as the imm, inferred from its size.
  2599. unsigned Imm;
  2600. switch (Opcode) {
  2601. default:
  2602. Imm = decodeNVSAmt(insn, LeftShift);
  2603. break;
  2604. case ARM::VSHLLi8:
  2605. Imm = 8;
  2606. break;
  2607. case ARM::VSHLLi16:
  2608. Imm = 16;
  2609. break;
  2610. case ARM::VSHLLi32:
  2611. Imm = 32;
  2612. break;
  2613. }
  2614. MI.addOperand(MCOperand::CreateImm(Imm));
  2615. ++OpIdx;
  2616. return true;
  2617. }
  2618. // Left shift instructions.
  2619. static bool DisassembleN2RegVecShLFrm(MCInst &MI, unsigned Opcode,
  2620. uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  2621. return DisassembleNVectorShift(MI, Opcode, insn, NumOps, NumOpsAdded, true,
  2622. B);
  2623. }
  2624. // Right shift instructions have different shift amount interpretation.
  2625. static bool DisassembleN2RegVecShRFrm(MCInst &MI, unsigned Opcode,
  2626. uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  2627. return DisassembleNVectorShift(MI, Opcode, insn, NumOps, NumOpsAdded, false,
  2628. B);
  2629. }
  2630. namespace {
  2631. enum N3VFlag {
  2632. N3V_None,
  2633. N3V_VectorExtract,
  2634. N3V_VectorShift,
  2635. N3V_Multiply_By_Scalar
  2636. };
  2637. } // End of unnamed namespace
  2638. // NEON Three Register Instructions with Optional Immediate Operand
  2639. //
  2640. // Vector Extract Instructions.
  2641. // Qd/Dd Qn/Dn Qm/Dm imm4
  2642. //
  2643. // Vector Shift (Register) Instructions.
  2644. // Qd/Dd Qm/Dm Qn/Dn (notice the order of m, n)
  2645. //
  2646. // Vector Multiply [Accumulate/Subtract] [Long] By Scalar Instructions.
  2647. // Qd/Dd Qn/Dn RestrictedDm index
  2648. //
  2649. // Others
  2650. static bool DisassembleNVdVnVmOptImm(MCInst &MI, unsigned Opcode, uint32_t insn,
  2651. unsigned short NumOps, unsigned &NumOpsAdded, N3VFlag Flag, BO B) {
  2652. const MCInstrDesc &MCID = ARMInsts[Opcode];
  2653. const MCOperandInfo *OpInfo = MCID.OpInfo;
  2654. // No checking for OpInfo[2] because of MOVDneon/MOVQ with only two regs.
  2655. assert(NumOps >= 3 &&
  2656. (OpInfo[0].RegClass == ARM::DPRRegClassID ||
  2657. OpInfo[0].RegClass == ARM::QPRRegClassID) &&
  2658. (OpInfo[1].RegClass == ARM::DPRRegClassID ||
  2659. OpInfo[1].RegClass == ARM::QPRRegClassID) &&
  2660. "Expect >= 3 operands and first 2 as reg operands");
  2661. unsigned &OpIdx = NumOpsAdded;
  2662. OpIdx = 0;
  2663. bool VdVnVm = Flag == N3V_VectorShift ? false : true;
  2664. bool IsImm4 = Flag == N3V_VectorExtract ? true : false;
  2665. bool IsDmRestricted = Flag == N3V_Multiply_By_Scalar ? true : false;
  2666. ElemSize esize = ESizeNA;
  2667. if (Flag == N3V_Multiply_By_Scalar) {
  2668. unsigned size = (insn >> 20) & 3;
  2669. if (size == 1) esize = ESize16;
  2670. if (size == 2) esize = ESize32;
  2671. assert (esize == ESize16 || esize == ESize32);
  2672. }
  2673. // Qd/Dd = Inst{22:15-12} => NEON Rd
  2674. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
  2675. decodeNEONRd(insn))));
  2676. ++OpIdx;
  2677. // VABA, VABAL, VBSLd, VBSLq, ...
  2678. if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) {
  2679. // TIED_TO operand.
  2680. MI.addOperand(MCOperand::CreateReg(0));
  2681. ++OpIdx;
  2682. }
  2683. // Dn = Inst{7:19-16} => NEON Rn
  2684. // or
  2685. // Dm = Inst{5:3-0} => NEON Rm
  2686. MI.addOperand(MCOperand::CreateReg(
  2687. getRegisterEnum(B, OpInfo[OpIdx].RegClass,
  2688. VdVnVm ? decodeNEONRn(insn)
  2689. : decodeNEONRm(insn))));
  2690. ++OpIdx;
  2691. // Special case handling for VMOVDneon and VMOVQ because they are marked as
  2692. // N3RegFrm.
  2693. if (Opcode == ARM::VMOVDneon || Opcode == ARM::VMOVQ)
  2694. return true;
  2695. // Dm = Inst{5:3-0} => NEON Rm
  2696. // or
  2697. // Dm is restricted to D0-D7 if size is 16, D0-D15 otherwise
  2698. // or
  2699. // Dn = Inst{7:19-16} => NEON Rn
  2700. unsigned m = VdVnVm ? (IsDmRestricted ? decodeRestrictedDm(insn, esize)
  2701. : decodeNEONRm(insn))
  2702. : decodeNEONRn(insn);
  2703. MI.addOperand(MCOperand::CreateReg(
  2704. getRegisterEnum(B, OpInfo[OpIdx].RegClass, m)));
  2705. ++OpIdx;
  2706. if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
  2707. && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
  2708. // Add the imm operand.
  2709. unsigned Imm = 0;
  2710. if (IsImm4)
  2711. Imm = decodeN3VImm(insn);
  2712. else if (IsDmRestricted)
  2713. Imm = decodeRestrictedDmIndex(insn, esize);
  2714. else {
  2715. assert(0 && "Internal error: unreachable code!");
  2716. return false;
  2717. }
  2718. MI.addOperand(MCOperand::CreateImm(Imm));
  2719. ++OpIdx;
  2720. }
  2721. return true;
  2722. }
  2723. static bool DisassembleN3RegFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
  2724. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  2725. return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
  2726. N3V_None, B);
  2727. }
  2728. static bool DisassembleN3RegVecShFrm(MCInst &MI, unsigned Opcode,
  2729. uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  2730. return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
  2731. N3V_VectorShift, B);
  2732. }
  2733. static bool DisassembleNVecExtractFrm(MCInst &MI, unsigned Opcode,
  2734. uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  2735. return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
  2736. N3V_VectorExtract, B);
  2737. }
  2738. static bool DisassembleNVecMulScalarFrm(MCInst &MI, unsigned Opcode,
  2739. uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  2740. return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
  2741. N3V_Multiply_By_Scalar, B);
  2742. }
  2743. // Vector Table Lookup
  2744. //
  2745. // VTBL1, VTBX1: Dd [Dd(TIED_TO)] Dn Dm
  2746. // VTBL2, VTBX2: Dd [Dd(TIED_TO)] Dn Dn+1 Dm
  2747. // VTBL3, VTBX3: Dd [Dd(TIED_TO)] Dn Dn+1 Dn+2 Dm
  2748. // VTBL4, VTBX4: Dd [Dd(TIED_TO)] Dn Dn+1 Dn+2 Dn+3 Dm
  2749. static bool DisassembleNVTBLFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
  2750. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  2751. const MCInstrDesc &MCID = ARMInsts[Opcode];
  2752. const MCOperandInfo *OpInfo = MCID.OpInfo;
  2753. if (!OpInfo) return false;
  2754. assert(NumOps >= 3 &&
  2755. OpInfo[0].RegClass == ARM::DPRRegClassID &&
  2756. OpInfo[1].RegClass == ARM::DPRRegClassID &&
  2757. OpInfo[2].RegClass == ARM::DPRRegClassID &&
  2758. "Expect >= 3 operands and first 3 as reg operands");
  2759. unsigned &OpIdx = NumOpsAdded;
  2760. OpIdx = 0;
  2761. unsigned Rn = decodeNEONRn(insn);
  2762. // {Dn} encoded as len = 0b00
  2763. // {Dn Dn+1} encoded as len = 0b01
  2764. // {Dn Dn+1 Dn+2 } encoded as len = 0b10
  2765. // {Dn Dn+1 Dn+2 Dn+3} encoded as len = 0b11
  2766. unsigned Len = slice(insn, 9, 8) + 1;
  2767. // Dd (the destination vector)
  2768. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
  2769. decodeNEONRd(insn))));
  2770. ++OpIdx;
  2771. // Process tied_to operand constraint.
  2772. int Idx;
  2773. if ((Idx = MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO)) != -1) {
  2774. MI.addOperand(MI.getOperand(Idx));
  2775. ++OpIdx;
  2776. }
  2777. // Do the <list> now.
  2778. for (unsigned i = 0; i < Len; ++i) {
  2779. assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::DPRRegClassID &&
  2780. "Reg operand expected");
  2781. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
  2782. Rn + i)));
  2783. ++OpIdx;
  2784. }
  2785. // Dm (the index vector)
  2786. assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::DPRRegClassID &&
  2787. "Reg operand (index vector) expected");
  2788. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
  2789. decodeNEONRm(insn))));
  2790. ++OpIdx;
  2791. return true;
  2792. }
  2793. // Vector Get Lane (move scalar to ARM core register) Instructions.
  2794. // VGETLNi32, VGETLNs16, VGETLNs8, VGETLNu16, VGETLNu8: Rt Dn index
  2795. static bool DisassembleNGetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
  2796. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  2797. const MCInstrDesc &MCID = ARMInsts[Opcode];
  2798. const MCOperandInfo *OpInfo = MCID.OpInfo;
  2799. if (!OpInfo) return false;
  2800. assert(MCID.getNumDefs() == 1 && NumOps >= 3 &&
  2801. OpInfo[0].RegClass == ARM::GPRRegClassID &&
  2802. OpInfo[1].RegClass == ARM::DPRRegClassID &&
  2803. OpInfo[2].RegClass < 0 &&
  2804. "Expect >= 3 operands with one dst operand");
  2805. ElemSize esize =
  2806. Opcode == ARM::VGETLNi32 ? ESize32
  2807. : ((Opcode == ARM::VGETLNs16 || Opcode == ARM::VGETLNu16) ? ESize16
  2808. : ESize8);
  2809. // Rt = Inst{15-12} => ARM Rd
  2810. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  2811. decodeRd(insn))));
  2812. // Dn = Inst{7:19-16} => NEON Rn
  2813. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
  2814. decodeNEONRn(insn))));
  2815. MI.addOperand(MCOperand::CreateImm(decodeNVLaneOpIndex(insn, esize)));
  2816. NumOpsAdded = 3;
  2817. return true;
  2818. }
  2819. // Vector Set Lane (move ARM core register to scalar) Instructions.
  2820. // VSETLNi16, VSETLNi32, VSETLNi8: Dd Dd (TIED_TO) Rt index
  2821. static bool DisassembleNSetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
  2822. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  2823. const MCInstrDesc &MCID = ARMInsts[Opcode];
  2824. const MCOperandInfo *OpInfo = MCID.OpInfo;
  2825. if (!OpInfo) return false;
  2826. assert(MCID.getNumDefs() == 1 && NumOps >= 3 &&
  2827. OpInfo[0].RegClass == ARM::DPRRegClassID &&
  2828. OpInfo[1].RegClass == ARM::DPRRegClassID &&
  2829. MCID.getOperandConstraint(1, MCOI::TIED_TO) != -1 &&
  2830. OpInfo[2].RegClass == ARM::GPRRegClassID &&
  2831. OpInfo[3].RegClass < 0 &&
  2832. "Expect >= 3 operands with one dst operand");
  2833. ElemSize esize =
  2834. Opcode == ARM::VSETLNi8 ? ESize8
  2835. : (Opcode == ARM::VSETLNi16 ? ESize16
  2836. : ESize32);
  2837. // Dd = Inst{7:19-16} => NEON Rn
  2838. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
  2839. decodeNEONRn(insn))));
  2840. // TIED_TO operand.
  2841. MI.addOperand(MCOperand::CreateReg(0));
  2842. // Rt = Inst{15-12} => ARM Rd
  2843. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  2844. decodeRd(insn))));
  2845. MI.addOperand(MCOperand::CreateImm(decodeNVLaneOpIndex(insn, esize)));
  2846. NumOpsAdded = 4;
  2847. return true;
  2848. }
  2849. // Vector Duplicate Instructions (from ARM core register to all elements).
  2850. // VDUP8d, VDUP16d, VDUP32d, VDUP8q, VDUP16q, VDUP32q: Qd/Dd Rt
  2851. static bool DisassembleNDupFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
  2852. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  2853. const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
  2854. assert(NumOps >= 2 &&
  2855. (OpInfo[0].RegClass == ARM::DPRRegClassID ||
  2856. OpInfo[0].RegClass == ARM::QPRRegClassID) &&
  2857. OpInfo[1].RegClass == ARM::GPRRegClassID &&
  2858. "Expect >= 2 operands and first 2 as reg operand");
  2859. unsigned RegClass = OpInfo[0].RegClass;
  2860. // Qd/Dd = Inst{7:19-16} => NEON Rn
  2861. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClass,
  2862. decodeNEONRn(insn))));
  2863. // Rt = Inst{15-12} => ARM Rd
  2864. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  2865. decodeRd(insn))));
  2866. NumOpsAdded = 2;
  2867. return true;
  2868. }
  2869. static inline bool PreLoadOpcode(unsigned Opcode) {
  2870. switch(Opcode) {
  2871. case ARM::PLDi12: case ARM::PLDrs:
  2872. case ARM::PLDWi12: case ARM::PLDWrs:
  2873. case ARM::PLIi12: case ARM::PLIrs:
  2874. return true;
  2875. default:
  2876. return false;
  2877. }
  2878. }
  2879. static bool DisassemblePreLoadFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
  2880. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  2881. // Preload Data/Instruction requires either 2 or 3 operands.
  2882. // PLDi12, PLDWi12, PLIi12: addrmode_imm12
  2883. // PLDrs, PLDWrs, PLIrs: ldst_so_reg
  2884. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  2885. decodeRn(insn))));
  2886. if (Opcode == ARM::PLDi12 || Opcode == ARM::PLDWi12
  2887. || Opcode == ARM::PLIi12) {
  2888. unsigned Imm12 = slice(insn, 11, 0);
  2889. bool Negative = getUBit(insn) == 0;
  2890. // A8.6.118 PLD (literal) PLDWi12 with Rn=PC is transformed to PLDi12.
  2891. if (Opcode == ARM::PLDWi12 && slice(insn, 19, 16) == 0xF) {
  2892. DEBUG(errs() << "Rn == '1111': PLDWi12 morphed to PLDi12\n");
  2893. MI.setOpcode(ARM::PLDi12);
  2894. }
  2895. // -0 is represented specially. All other values are as normal.
  2896. int Offset = Negative ? -1 * Imm12 : Imm12;
  2897. if (Imm12 == 0 && Negative)
  2898. Offset = INT32_MIN;
  2899. MI.addOperand(MCOperand::CreateImm(Offset));
  2900. NumOpsAdded = 2;
  2901. } else {
  2902. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
  2903. decodeRm(insn))));
  2904. ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
  2905. // Inst{6-5} encodes the shift opcode.
  2906. ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
  2907. // Inst{11-7} encodes the imm5 shift amount.
  2908. unsigned ShImm = slice(insn, 11, 7);
  2909. // A8.4.1. Possible rrx or shift amount of 32...
  2910. getImmShiftSE(ShOp, ShImm);
  2911. MI.addOperand(MCOperand::CreateImm(
  2912. ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp)));
  2913. NumOpsAdded = 3;
  2914. }
  2915. return true;
  2916. }
  2917. static bool DisassembleMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
  2918. unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
  2919. if (Opcode == ARM::DMB || Opcode == ARM::DSB) {
  2920. // Inst{3-0} encodes the memory barrier option for the variants.
  2921. unsigned opt = slice(insn, 3, 0);
  2922. switch (opt) {
  2923. case ARM_MB::SY: case ARM_MB::ST:
  2924. case ARM_MB::ISH: case ARM_MB::ISHST:
  2925. case ARM_MB::NSH: case ARM_MB::NSHST:
  2926. case ARM_MB::OSH: case ARM_MB::OSHST:
  2927. MI.addOperand(MCOperand::CreateImm(opt));
  2928. NumOpsAdded = 1;
  2929. return true;
  2930. default:
  2931. return false;
  2932. }
  2933. }
  2934. switch (Opcode) {
  2935. case ARM::CLREX:
  2936. case ARM::NOP:
  2937. case ARM::TRAP:
  2938. case ARM::YIELD:
  2939. case ARM::WFE:
  2940. case ARM::WFI:
  2941. case ARM::SEV:
  2942. return true;
  2943. case ARM::SWP:
  2944. case ARM::SWPB:
  2945. // SWP, SWPB: Rd Rm Rn
  2946. // Delegate to DisassembleLdStExFrm()....
  2947. return DisassembleLdStExFrm(MI, Opcode, insn, NumOps, NumOpsAdded, B);
  2948. default:
  2949. break;
  2950. }
  2951. if (Opcode == ARM::SETEND) {
  2952. NumOpsAdded = 1;
  2953. MI.addOperand(MCOperand::CreateImm(slice(insn, 9, 9)));
  2954. return true;
  2955. }
  2956. // FIXME: To enable correct asm parsing and disasm of CPS we need 3 different
  2957. // opcodes which match the same real instruction. This is needed since there's
  2958. // no current handling of optional arguments. Fix here when a better handling
  2959. // of optional arguments is implemented.
  2960. if (Opcode == ARM::CPS3p) { // M = 1
  2961. // Let's reject these impossible imod values by returning false:
  2962. // 1. (imod=0b01)
  2963. //
  2964. // AsmPrinter cannot handle imod=0b00, plus (imod=0b00,M=1,iflags!=0) is an
  2965. // invalid combination, so we just check for imod=0b00 here.
  2966. if (slice(insn, 19, 18) == 0 || slice(insn, 19, 18) == 1)
  2967. return false;
  2968. MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 18))); // imod
  2969. MI.addOperand(MCOperand::CreateImm(slice(insn, 8, 6))); // iflags
  2970. MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0))); // mode
  2971. NumOpsAdded = 3;
  2972. return true;
  2973. }
  2974. if (Opcode == ARM::CPS2p) { // mode = 0, M = 0
  2975. // Let's reject these impossible imod values by returning false:
  2976. // 1. (imod=0b00,M=0)
  2977. // 2. (imod=0b01)
  2978. if (slice(insn, 19, 18) == 0 || slice(insn, 19, 18) == 1)
  2979. return false;
  2980. MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 18))); // imod
  2981. MI.addOperand(MCOperand::CreateImm(slice(insn, 8, 6))); // iflags
  2982. NumOpsAdded = 2;
  2983. return true;
  2984. }
  2985. if (Opcode == ARM::CPS1p) { // imod = 0, iflags = 0, M = 1
  2986. MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0))); // mode
  2987. NumOpsAdded = 1;
  2988. return true;
  2989. }
  2990. // DBG has its option specified in Inst{3-0}.
  2991. if (Opcode == ARM::DBG) {
  2992. MI.addOperand(MCOperand::CreateImm(slice(insn, 3, 0)));
  2993. NumOpsAdded = 1;
  2994. return true;
  2995. }
  2996. // BKPT takes an imm32 val equal to ZeroExtend(Inst{19-8:3-0}).
  2997. if (Opcode == ARM::BKPT) {
  2998. MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 8) << 4 |
  2999. slice(insn, 3, 0)));
  3000. NumOpsAdded = 1;
  3001. return true;
  3002. }
  3003. if (PreLoadOpcode(Opcode))
  3004. return DisassemblePreLoadFrm(MI, Opcode, insn, NumOps, NumOpsAdded, B);
  3005. assert(0 && "Unexpected misc instruction!");
  3006. return false;
  3007. }
  3008. /// FuncPtrs - FuncPtrs maps ARMFormat to its corresponding DisassembleFP.
  3009. /// We divide the disassembly task into different categories, with each one
  3010. /// corresponding to a specific instruction encoding format. There could be
  3011. /// exceptions when handling a specific format, and that is why the Opcode is
  3012. /// also present in the function prototype.
  3013. static const DisassembleFP FuncPtrs[] = {
  3014. &DisassemblePseudo,
  3015. &DisassembleMulFrm,
  3016. &DisassembleBrFrm,
  3017. &DisassembleBrMiscFrm,
  3018. &DisassembleDPFrm,
  3019. &DisassembleDPSoRegFrm,
  3020. &DisassembleLdFrm,
  3021. &DisassembleStFrm,
  3022. &DisassembleLdMiscFrm,
  3023. &DisassembleStMiscFrm,
  3024. &DisassembleLdStMulFrm,
  3025. &DisassembleLdStExFrm,
  3026. &DisassembleArithMiscFrm,
  3027. &DisassembleSatFrm,
  3028. &DisassembleExtFrm,
  3029. &DisassembleVFPUnaryFrm,
  3030. &DisassembleVFPBinaryFrm,
  3031. &DisassembleVFPConv1Frm,
  3032. &DisassembleVFPConv2Frm,
  3033. &DisassembleVFPConv3Frm,
  3034. &DisassembleVFPConv4Frm,
  3035. &DisassembleVFPConv5Frm,
  3036. &DisassembleVFPLdStFrm,
  3037. &DisassembleVFPLdStMulFrm,
  3038. &DisassembleVFPMiscFrm,
  3039. &DisassembleThumbFrm,
  3040. &DisassembleMiscFrm,
  3041. &DisassembleNGetLnFrm,
  3042. &DisassembleNSetLnFrm,
  3043. &DisassembleNDupFrm,
  3044. // VLD and VST (including one lane) Instructions.
  3045. &DisassembleNLdSt,
  3046. // A7.4.6 One register and a modified immediate value
  3047. // 1-Register Instructions with imm.
  3048. // LLVM only defines VMOVv instructions.
  3049. &DisassembleN1RegModImmFrm,
  3050. // 2-Register Instructions with no imm.
  3051. &DisassembleN2RegFrm,
  3052. // 2-Register Instructions with imm (vector convert float/fixed point).
  3053. &DisassembleNVCVTFrm,
  3054. // 2-Register Instructions with imm (vector dup lane).
  3055. &DisassembleNVecDupLnFrm,
  3056. // Vector Shift Left Instructions.
  3057. &DisassembleN2RegVecShLFrm,
  3058. // Vector Shift Righ Instructions, which has different interpretation of the
  3059. // shift amount from the imm6 field.
  3060. &DisassembleN2RegVecShRFrm,
  3061. // 3-Register Data-Processing Instructions.
  3062. &DisassembleN3RegFrm,
  3063. // Vector Shift (Register) Instructions.
  3064. // D:Vd M:Vm N:Vn (notice that M:Vm is the first operand)
  3065. &DisassembleN3RegVecShFrm,
  3066. // Vector Extract Instructions.
  3067. &DisassembleNVecExtractFrm,
  3068. // Vector [Saturating Rounding Doubling] Multiply [Accumulate/Subtract] [Long]
  3069. // By Scalar Instructions.
  3070. &DisassembleNVecMulScalarFrm,
  3071. // Vector Table Lookup uses byte indexes in a control vector to look up byte
  3072. // values in a table and generate a new vector.
  3073. &DisassembleNVTBLFrm,
  3074. NULL
  3075. };
  3076. /// BuildIt - BuildIt performs the build step for this ARM Basic MC Builder.
  3077. /// The general idea is to set the Opcode for the MCInst, followed by adding
  3078. /// the appropriate MCOperands to the MCInst. ARM Basic MC Builder delegates
  3079. /// to the Format-specific disassemble function for disassembly, followed by
  3080. /// TryPredicateAndSBitModifier() to do PredicateOperand and OptionalDefOperand
  3081. /// which follow the Dst/Src Operands.
  3082. bool ARMBasicMCBuilder::BuildIt(MCInst &MI, uint32_t insn) {
  3083. // Stage 1 sets the Opcode.
  3084. MI.setOpcode(Opcode);
  3085. // If the number of operands is zero, we're done!
  3086. if (NumOps == 0)
  3087. return true;
  3088. // Stage 2 calls the format-specific disassemble function to build the operand
  3089. // list.
  3090. if (Disasm == NULL)
  3091. return false;
  3092. unsigned NumOpsAdded = 0;
  3093. bool OK = (*Disasm)(MI, Opcode, insn, NumOps, NumOpsAdded, this);
  3094. if (!OK || this->Err != 0) return false;
  3095. if (NumOpsAdded >= NumOps)
  3096. return true;
  3097. // Stage 3 deals with operands unaccounted for after stage 2 is finished.
  3098. // FIXME: Should this be done selectively?
  3099. return TryPredicateAndSBitModifier(MI, Opcode, insn, NumOps - NumOpsAdded);
  3100. }
  3101. // A8.3 Conditional execution
  3102. // A8.3.1 Pseudocode details of conditional execution
  3103. // Condition bits '111x' indicate the instruction is always executed.
  3104. static uint32_t CondCode(uint32_t CondField) {
  3105. if (CondField == 0xF)
  3106. return ARMCC::AL;
  3107. return CondField;
  3108. }
  3109. /// DoPredicateOperands - DoPredicateOperands process the predicate operands
  3110. /// of some Thumb instructions which come before the reglist operands. It
  3111. /// returns true if the two predicate operands have been processed.
  3112. bool ARMBasicMCBuilder::DoPredicateOperands(MCInst& MI, unsigned Opcode,
  3113. uint32_t /* insn */, unsigned short NumOpsRemaining) {
  3114. assert(NumOpsRemaining > 0 && "Invalid argument");
  3115. const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
  3116. unsigned Idx = MI.getNumOperands();
  3117. // First, we check whether this instr specifies the PredicateOperand through
  3118. // a pair of MCOperandInfos with isPredicate() property.
  3119. if (NumOpsRemaining >= 2 &&
  3120. OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() &&
  3121. OpInfo[Idx].RegClass < 0 &&
  3122. OpInfo[Idx+1].RegClass == ARM::CCRRegClassID)
  3123. {
  3124. // If we are inside an IT block, get the IT condition bits maintained via
  3125. // ARMBasicMCBuilder::ITState[7:0], through ARMBasicMCBuilder::GetITCond().
  3126. // See also A2.5.2.
  3127. if (InITBlock())
  3128. MI.addOperand(MCOperand::CreateImm(GetITCond()));
  3129. else
  3130. MI.addOperand(MCOperand::CreateImm(ARMCC::AL));
  3131. MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
  3132. return true;
  3133. }
  3134. return false;
  3135. }
  3136. /// TryPredicateAndSBitModifier - TryPredicateAndSBitModifier tries to process
  3137. /// the possible Predicate and SBitModifier, to build the remaining MCOperand
  3138. /// constituents.
  3139. bool ARMBasicMCBuilder::TryPredicateAndSBitModifier(MCInst& MI, unsigned Opcode,
  3140. uint32_t insn, unsigned short NumOpsRemaining) {
  3141. assert(NumOpsRemaining > 0 && "Invalid argument");
  3142. const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
  3143. const std::string &Name = ARMInsts[Opcode].Name;
  3144. unsigned Idx = MI.getNumOperands();
  3145. uint64_t TSFlags = ARMInsts[Opcode].TSFlags;
  3146. // First, we check whether this instr specifies the PredicateOperand through
  3147. // a pair of MCOperandInfos with isPredicate() property.
  3148. if (NumOpsRemaining >= 2 &&
  3149. OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() &&
  3150. OpInfo[Idx].RegClass < 0 &&
  3151. OpInfo[Idx+1].RegClass == ARM::CCRRegClassID)
  3152. {
  3153. // If we are inside an IT block, get the IT condition bits maintained via
  3154. // ARMBasicMCBuilder::ITState[7:0], through ARMBasicMCBuilder::GetITCond().
  3155. // See also A2.5.2.
  3156. if (InITBlock())
  3157. MI.addOperand(MCOperand::CreateImm(GetITCond()));
  3158. else {
  3159. if (Name.length() > 1 && Name[0] == 't') {
  3160. // Thumb conditional branch instructions have their cond field embedded,
  3161. // like ARM.
  3162. //
  3163. // A8.6.16 B
  3164. // Check for undefined encodings.
  3165. unsigned cond;
  3166. if (Name == "t2Bcc") {
  3167. if ((cond = slice(insn, 25, 22)) >= 14)
  3168. return false;
  3169. MI.addOperand(MCOperand::CreateImm(CondCode(cond)));
  3170. } else if (Name == "tBcc") {
  3171. if ((cond = slice(insn, 11, 8)) == 14)
  3172. return false;
  3173. MI.addOperand(MCOperand::CreateImm(CondCode(cond)));
  3174. } else
  3175. MI.addOperand(MCOperand::CreateImm(ARMCC::AL));
  3176. } else {
  3177. // ARM instructions get their condition field from Inst{31-28}.
  3178. // We should reject Inst{31-28} = 0b1111 as invalid encoding.
  3179. if (!isNEONDomain(TSFlags) && getCondField(insn) == 0xF)
  3180. return false;
  3181. MI.addOperand(MCOperand::CreateImm(CondCode(getCondField(insn))));
  3182. }
  3183. }
  3184. MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
  3185. Idx += 2;
  3186. NumOpsRemaining -= 2;
  3187. }
  3188. if (NumOpsRemaining == 0)
  3189. return true;
  3190. // Next, if OptionalDefOperand exists, we check whether the 'S' bit is set.
  3191. if (OpInfo[Idx].isOptionalDef() && OpInfo[Idx].RegClass==ARM::CCRRegClassID) {
  3192. MI.addOperand(MCOperand::CreateReg(getSBit(insn) == 1 ? ARM::CPSR : 0));
  3193. --NumOpsRemaining;
  3194. }
  3195. if (NumOpsRemaining == 0)
  3196. return true;
  3197. else
  3198. return false;
  3199. }
  3200. /// RunBuildAfterHook - RunBuildAfterHook performs operations deemed necessary
  3201. /// after BuildIt is finished.
  3202. bool ARMBasicMCBuilder::RunBuildAfterHook(bool Status, MCInst &MI,
  3203. uint32_t insn) {
  3204. if (!SP) return Status;
  3205. if (Opcode == ARM::t2IT)
  3206. Status = SP->InitIT(slice(insn, 7, 0)) ? Status : false;
  3207. else if (InITBlock())
  3208. SP->UpdateIT();
  3209. return Status;
  3210. }
  3211. /// Opcode, Format, and NumOperands make up an ARM Basic MCBuilder.
  3212. ARMBasicMCBuilder::ARMBasicMCBuilder(unsigned opc, ARMFormat format,
  3213. unsigned short num)
  3214. : Opcode(opc), Format(format), NumOps(num), SP(0), Err(0) {
  3215. unsigned Idx = (unsigned)format;
  3216. assert(Idx < (array_lengthof(FuncPtrs) - 1) && "Unknown format");
  3217. Disasm = FuncPtrs[Idx];
  3218. }
  3219. /// CreateMCBuilder - Return an ARMBasicMCBuilder that can build up the MC
  3220. /// infrastructure of an MCInst given the Opcode and Format of the instr.
  3221. /// Return NULL if it fails to create/return a proper builder. API clients
  3222. /// are responsible for freeing up of the allocated memory. Cacheing can be
  3223. /// performed by the API clients to improve performance.
  3224. ARMBasicMCBuilder *llvm::CreateMCBuilder(unsigned Opcode, ARMFormat Format) {
  3225. // For "Unknown format", fail by returning a NULL pointer.
  3226. if ((unsigned)Format >= (array_lengthof(FuncPtrs) - 1)) {
  3227. DEBUG(errs() << "Unknown format\n");
  3228. return 0;
  3229. }
  3230. return new ARMBasicMCBuilder(Opcode, Format,
  3231. ARMInsts[Opcode].getNumOperands());
  3232. }
  3233. /// tryAddingSymbolicOperand - tryAddingSymbolicOperand trys to add a symbolic
  3234. /// operand in place of the immediate Value in the MCInst. The immediate
  3235. /// Value has had any PC adjustment made by the caller. If the getOpInfo()
  3236. /// function was set as part of the setupBuilderForSymbolicDisassembly() call
  3237. /// then that function is called to get any symbolic information at the
  3238. /// builder's Address for this instrution. If that returns non-zero then the
  3239. /// symbolic information it returns is used to create an MCExpr and that is
  3240. /// added as an operand to the MCInst. This function returns true if it adds
  3241. /// an operand to the MCInst and false otherwise.
  3242. bool ARMBasicMCBuilder::tryAddingSymbolicOperand(uint64_t Value,
  3243. uint64_t InstSize,
  3244. MCInst &MI) {
  3245. if (!GetOpInfo)
  3246. return false;
  3247. struct LLVMOpInfo1 SymbolicOp;
  3248. SymbolicOp.Value = Value;
  3249. if (!GetOpInfo(DisInfo, Address, 0 /* Offset */, InstSize, 1, &SymbolicOp))
  3250. return false;
  3251. const MCExpr *Add = NULL;
  3252. if (SymbolicOp.AddSymbol.Present) {
  3253. if (SymbolicOp.AddSymbol.Name) {
  3254. StringRef Name(SymbolicOp.AddSymbol.Name);
  3255. MCSymbol *Sym = Ctx->GetOrCreateSymbol(Name);
  3256. Add = MCSymbolRefExpr::Create(Sym, *Ctx);
  3257. } else {
  3258. Add = MCConstantExpr::Create(SymbolicOp.AddSymbol.Value, *Ctx);
  3259. }
  3260. }
  3261. const MCExpr *Sub = NULL;
  3262. if (SymbolicOp.SubtractSymbol.Present) {
  3263. if (SymbolicOp.SubtractSymbol.Name) {
  3264. StringRef Name(SymbolicOp.SubtractSymbol.Name);
  3265. MCSymbol *Sym = Ctx->GetOrCreateSymbol(Name);
  3266. Sub = MCSymbolRefExpr::Create(Sym, *Ctx);
  3267. } else {
  3268. Sub = MCConstantExpr::Create(SymbolicOp.SubtractSymbol.Value, *Ctx);
  3269. }
  3270. }
  3271. const MCExpr *Off = NULL;
  3272. if (SymbolicOp.Value != 0)
  3273. Off = MCConstantExpr::Create(SymbolicOp.Value, *Ctx);
  3274. const MCExpr *Expr;
  3275. if (Sub) {
  3276. const MCExpr *LHS;
  3277. if (Add)
  3278. LHS = MCBinaryExpr::CreateSub(Add, Sub, *Ctx);
  3279. else
  3280. LHS = MCUnaryExpr::CreateMinus(Sub, *Ctx);
  3281. if (Off != 0)
  3282. Expr = MCBinaryExpr::CreateAdd(LHS, Off, *Ctx);
  3283. else
  3284. Expr = LHS;
  3285. } else if (Add) {
  3286. if (Off != 0)
  3287. Expr = MCBinaryExpr::CreateAdd(Add, Off, *Ctx);
  3288. else
  3289. Expr = Add;
  3290. } else {
  3291. if (Off != 0)
  3292. Expr = Off;
  3293. else
  3294. Expr = MCConstantExpr::Create(0, *Ctx);
  3295. }
  3296. if (SymbolicOp.VariantKind == LLVMDisassembler_VariantKind_ARM_HI16)
  3297. MI.addOperand(MCOperand::CreateExpr(ARMMCExpr::CreateUpper16(Expr, *Ctx)));
  3298. else if (SymbolicOp.VariantKind == LLVMDisassembler_VariantKind_ARM_LO16)
  3299. MI.addOperand(MCOperand::CreateExpr(ARMMCExpr::CreateLower16(Expr, *Ctx)));
  3300. else if (SymbolicOp.VariantKind == LLVMDisassembler_VariantKind_None)
  3301. MI.addOperand(MCOperand::CreateExpr(Expr));
  3302. else
  3303. assert("bad SymbolicOp.VariantKind");
  3304. return true;
  3305. }