ARMLoadStoreOptimizer.cpp 62 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847
  1. //===-- ARMLoadStoreOptimizer.cpp - ARM load / store opt. pass ----*- C++ -*-=//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This file contains a pass that performs load / store related peephole
  11. // optimizations. This pass should be run after register allocation.
  12. //
  13. //===----------------------------------------------------------------------===//
  14. #define DEBUG_TYPE "arm-ldst-opt"
  15. #include "ARM.h"
  16. #include "ARMAddressingModes.h"
  17. #include "ARMBaseInstrInfo.h"
  18. #include "ARMMachineFunctionInfo.h"
  19. #include "ARMRegisterInfo.h"
  20. #include "llvm/DerivedTypes.h"
  21. #include "llvm/Function.h"
  22. #include "llvm/CodeGen/MachineBasicBlock.h"
  23. #include "llvm/CodeGen/MachineFunctionPass.h"
  24. #include "llvm/CodeGen/MachineInstr.h"
  25. #include "llvm/CodeGen/MachineInstrBuilder.h"
  26. #include "llvm/CodeGen/MachineRegisterInfo.h"
  27. #include "llvm/CodeGen/RegisterScavenging.h"
  28. #include "llvm/Target/TargetData.h"
  29. #include "llvm/Target/TargetInstrInfo.h"
  30. #include "llvm/Target/TargetMachine.h"
  31. #include "llvm/Target/TargetRegisterInfo.h"
  32. #include "llvm/Support/ErrorHandling.h"
  33. #include "llvm/ADT/DenseMap.h"
  34. #include "llvm/ADT/STLExtras.h"
  35. #include "llvm/ADT/SmallPtrSet.h"
  36. #include "llvm/ADT/SmallSet.h"
  37. #include "llvm/ADT/SmallVector.h"
  38. #include "llvm/ADT/Statistic.h"
  39. using namespace llvm;
  40. STATISTIC(NumLDMGened , "Number of ldm instructions generated");
  41. STATISTIC(NumSTMGened , "Number of stm instructions generated");
  42. STATISTIC(NumVLDMGened, "Number of vldm instructions generated");
  43. STATISTIC(NumVSTMGened, "Number of vstm instructions generated");
  44. STATISTIC(NumLdStMoved, "Number of load / store instructions moved");
  45. STATISTIC(NumLDRDFormed,"Number of ldrd created before allocation");
  46. STATISTIC(NumSTRDFormed,"Number of strd created before allocation");
  47. STATISTIC(NumLDRD2LDM, "Number of ldrd instructions turned back into ldm");
  48. STATISTIC(NumSTRD2STM, "Number of strd instructions turned back into stm");
  49. STATISTIC(NumLDRD2LDR, "Number of ldrd instructions turned back into ldr's");
  50. STATISTIC(NumSTRD2STR, "Number of strd instructions turned back into str's");
  51. /// ARMAllocLoadStoreOpt - Post- register allocation pass the combine
  52. /// load / store instructions to form ldm / stm instructions.
  53. namespace {
  54. struct ARMLoadStoreOpt : public MachineFunctionPass {
  55. static char ID;
  56. ARMLoadStoreOpt() : MachineFunctionPass(ID) {}
  57. const TargetInstrInfo *TII;
  58. const TargetRegisterInfo *TRI;
  59. ARMFunctionInfo *AFI;
  60. RegScavenger *RS;
  61. bool isThumb2;
  62. virtual bool runOnMachineFunction(MachineFunction &Fn);
  63. virtual const char *getPassName() const {
  64. return "ARM load / store optimization pass";
  65. }
  66. private:
  67. struct MemOpQueueEntry {
  68. int Offset;
  69. unsigned Reg;
  70. bool isKill;
  71. unsigned Position;
  72. MachineBasicBlock::iterator MBBI;
  73. bool Merged;
  74. MemOpQueueEntry(int o, unsigned r, bool k, unsigned p,
  75. MachineBasicBlock::iterator i)
  76. : Offset(o), Reg(r), isKill(k), Position(p), MBBI(i), Merged(false) {}
  77. };
  78. typedef SmallVector<MemOpQueueEntry,8> MemOpQueue;
  79. typedef MemOpQueue::iterator MemOpQueueIter;
  80. bool MergeOps(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
  81. int Offset, unsigned Base, bool BaseKill, int Opcode,
  82. ARMCC::CondCodes Pred, unsigned PredReg, unsigned Scratch,
  83. DebugLoc dl, SmallVector<std::pair<unsigned, bool>, 8> &Regs);
  84. void MergeOpsUpdate(MachineBasicBlock &MBB,
  85. MemOpQueue &MemOps,
  86. unsigned memOpsBegin,
  87. unsigned memOpsEnd,
  88. unsigned insertAfter,
  89. int Offset,
  90. unsigned Base,
  91. bool BaseKill,
  92. int Opcode,
  93. ARMCC::CondCodes Pred,
  94. unsigned PredReg,
  95. unsigned Scratch,
  96. DebugLoc dl,
  97. SmallVector<MachineBasicBlock::iterator, 4> &Merges);
  98. void MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex, unsigned Base,
  99. int Opcode, unsigned Size,
  100. ARMCC::CondCodes Pred, unsigned PredReg,
  101. unsigned Scratch, MemOpQueue &MemOps,
  102. SmallVector<MachineBasicBlock::iterator, 4> &Merges);
  103. void AdvanceRS(MachineBasicBlock &MBB, MemOpQueue &MemOps);
  104. bool FixInvalidRegPairOp(MachineBasicBlock &MBB,
  105. MachineBasicBlock::iterator &MBBI);
  106. bool MergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
  107. MachineBasicBlock::iterator MBBI,
  108. const TargetInstrInfo *TII,
  109. bool &Advance,
  110. MachineBasicBlock::iterator &I);
  111. bool MergeBaseUpdateLSMultiple(MachineBasicBlock &MBB,
  112. MachineBasicBlock::iterator MBBI,
  113. bool &Advance,
  114. MachineBasicBlock::iterator &I);
  115. bool LoadStoreMultipleOpti(MachineBasicBlock &MBB);
  116. bool MergeReturnIntoLDM(MachineBasicBlock &MBB);
  117. };
  118. char ARMLoadStoreOpt::ID = 0;
  119. }
  120. static int getLoadStoreMultipleOpcode(int Opcode, ARM_AM::AMSubMode Mode) {
  121. switch (Opcode) {
  122. default: llvm_unreachable("Unhandled opcode!");
  123. case ARM::LDRi12:
  124. ++NumLDMGened;
  125. switch (Mode) {
  126. default: llvm_unreachable("Unhandled submode!");
  127. case ARM_AM::ia: return ARM::LDMIA;
  128. case ARM_AM::da: return ARM::LDMDA;
  129. case ARM_AM::db: return ARM::LDMDB;
  130. case ARM_AM::ib: return ARM::LDMIB;
  131. }
  132. break;
  133. case ARM::STRi12:
  134. ++NumSTMGened;
  135. switch (Mode) {
  136. default: llvm_unreachable("Unhandled submode!");
  137. case ARM_AM::ia: return ARM::STMIA;
  138. case ARM_AM::da: return ARM::STMDA;
  139. case ARM_AM::db: return ARM::STMDB;
  140. case ARM_AM::ib: return ARM::STMIB;
  141. }
  142. break;
  143. case ARM::t2LDRi8:
  144. case ARM::t2LDRi12:
  145. ++NumLDMGened;
  146. switch (Mode) {
  147. default: llvm_unreachable("Unhandled submode!");
  148. case ARM_AM::ia: return ARM::t2LDMIA;
  149. case ARM_AM::db: return ARM::t2LDMDB;
  150. }
  151. break;
  152. case ARM::t2STRi8:
  153. case ARM::t2STRi12:
  154. ++NumSTMGened;
  155. switch (Mode) {
  156. default: llvm_unreachable("Unhandled submode!");
  157. case ARM_AM::ia: return ARM::t2STMIA;
  158. case ARM_AM::db: return ARM::t2STMDB;
  159. }
  160. break;
  161. case ARM::VLDRS:
  162. ++NumVLDMGened;
  163. switch (Mode) {
  164. default: llvm_unreachable("Unhandled submode!");
  165. case ARM_AM::ia: return ARM::VLDMSIA;
  166. case ARM_AM::db: return 0; // Only VLDMSDB_UPD exists.
  167. }
  168. break;
  169. case ARM::VSTRS:
  170. ++NumVSTMGened;
  171. switch (Mode) {
  172. default: llvm_unreachable("Unhandled submode!");
  173. case ARM_AM::ia: return ARM::VSTMSIA;
  174. case ARM_AM::db: return 0; // Only VSTMSDB_UPD exists.
  175. }
  176. break;
  177. case ARM::VLDRD:
  178. ++NumVLDMGened;
  179. switch (Mode) {
  180. default: llvm_unreachable("Unhandled submode!");
  181. case ARM_AM::ia: return ARM::VLDMDIA;
  182. case ARM_AM::db: return 0; // Only VLDMDDB_UPD exists.
  183. }
  184. break;
  185. case ARM::VSTRD:
  186. ++NumVSTMGened;
  187. switch (Mode) {
  188. default: llvm_unreachable("Unhandled submode!");
  189. case ARM_AM::ia: return ARM::VSTMDIA;
  190. case ARM_AM::db: return 0; // Only VSTMDDB_UPD exists.
  191. }
  192. break;
  193. }
  194. return 0;
  195. }
  196. namespace llvm {
  197. namespace ARM_AM {
  198. AMSubMode getLoadStoreMultipleSubMode(int Opcode) {
  199. switch (Opcode) {
  200. default: llvm_unreachable("Unhandled opcode!");
  201. case ARM::LDMIA_RET:
  202. case ARM::LDMIA:
  203. case ARM::LDMIA_UPD:
  204. case ARM::STMIA:
  205. case ARM::STMIA_UPD:
  206. case ARM::t2LDMIA_RET:
  207. case ARM::t2LDMIA:
  208. case ARM::t2LDMIA_UPD:
  209. case ARM::t2STMIA:
  210. case ARM::t2STMIA_UPD:
  211. case ARM::VLDMSIA:
  212. case ARM::VLDMSIA_UPD:
  213. case ARM::VSTMSIA:
  214. case ARM::VSTMSIA_UPD:
  215. case ARM::VLDMDIA:
  216. case ARM::VLDMDIA_UPD:
  217. case ARM::VSTMDIA:
  218. case ARM::VSTMDIA_UPD:
  219. return ARM_AM::ia;
  220. case ARM::LDMDA:
  221. case ARM::LDMDA_UPD:
  222. case ARM::STMDA:
  223. case ARM::STMDA_UPD:
  224. return ARM_AM::da;
  225. case ARM::LDMDB:
  226. case ARM::LDMDB_UPD:
  227. case ARM::STMDB:
  228. case ARM::STMDB_UPD:
  229. case ARM::t2LDMDB:
  230. case ARM::t2LDMDB_UPD:
  231. case ARM::t2STMDB:
  232. case ARM::t2STMDB_UPD:
  233. case ARM::VLDMSDB_UPD:
  234. case ARM::VSTMSDB_UPD:
  235. case ARM::VLDMDDB_UPD:
  236. case ARM::VSTMDDB_UPD:
  237. return ARM_AM::db;
  238. case ARM::LDMIB:
  239. case ARM::LDMIB_UPD:
  240. case ARM::STMIB:
  241. case ARM::STMIB_UPD:
  242. return ARM_AM::ib;
  243. }
  244. return ARM_AM::bad_am_submode;
  245. }
  246. } // end namespace ARM_AM
  247. } // end namespace llvm
  248. static bool isT2i32Load(unsigned Opc) {
  249. return Opc == ARM::t2LDRi12 || Opc == ARM::t2LDRi8;
  250. }
  251. static bool isi32Load(unsigned Opc) {
  252. return Opc == ARM::LDRi12 || isT2i32Load(Opc);
  253. }
  254. static bool isT2i32Store(unsigned Opc) {
  255. return Opc == ARM::t2STRi12 || Opc == ARM::t2STRi8;
  256. }
  257. static bool isi32Store(unsigned Opc) {
  258. return Opc == ARM::STRi12 || isT2i32Store(Opc);
  259. }
  260. /// MergeOps - Create and insert a LDM or STM with Base as base register and
  261. /// registers in Regs as the register operands that would be loaded / stored.
  262. /// It returns true if the transformation is done.
  263. bool
  264. ARMLoadStoreOpt::MergeOps(MachineBasicBlock &MBB,
  265. MachineBasicBlock::iterator MBBI,
  266. int Offset, unsigned Base, bool BaseKill,
  267. int Opcode, ARMCC::CondCodes Pred,
  268. unsigned PredReg, unsigned Scratch, DebugLoc dl,
  269. SmallVector<std::pair<unsigned, bool>, 8> &Regs) {
  270. // Only a single register to load / store. Don't bother.
  271. unsigned NumRegs = Regs.size();
  272. if (NumRegs <= 1)
  273. return false;
  274. ARM_AM::AMSubMode Mode = ARM_AM::ia;
  275. // VFP and Thumb2 do not support IB or DA modes.
  276. bool isNotVFP = isi32Load(Opcode) || isi32Store(Opcode);
  277. bool haveIBAndDA = isNotVFP && !isThumb2;
  278. if (Offset == 4 && haveIBAndDA)
  279. Mode = ARM_AM::ib;
  280. else if (Offset == -4 * (int)NumRegs + 4 && haveIBAndDA)
  281. Mode = ARM_AM::da;
  282. else if (Offset == -4 * (int)NumRegs && isNotVFP)
  283. // VLDM/VSTM do not support DB mode without also updating the base reg.
  284. Mode = ARM_AM::db;
  285. else if (Offset != 0) {
  286. // Check if this is a supported opcode before we insert instructions to
  287. // calculate a new base register.
  288. if (!getLoadStoreMultipleOpcode(Opcode, Mode)) return false;
  289. // If starting offset isn't zero, insert a MI to materialize a new base.
  290. // But only do so if it is cost effective, i.e. merging more than two
  291. // loads / stores.
  292. if (NumRegs <= 2)
  293. return false;
  294. unsigned NewBase;
  295. if (isi32Load(Opcode))
  296. // If it is a load, then just use one of the destination register to
  297. // use as the new base.
  298. NewBase = Regs[NumRegs-1].first;
  299. else {
  300. // Use the scratch register to use as a new base.
  301. NewBase = Scratch;
  302. if (NewBase == 0)
  303. return false;
  304. }
  305. int BaseOpc = !isThumb2
  306. ? ARM::ADDri
  307. : ((Base == ARM::SP) ? ARM::t2ADDrSPi : ARM::t2ADDri);
  308. if (Offset < 0) {
  309. BaseOpc = !isThumb2
  310. ? ARM::SUBri
  311. : ((Base == ARM::SP) ? ARM::t2SUBrSPi : ARM::t2SUBri);
  312. Offset = - Offset;
  313. }
  314. int ImmedOffset = isThumb2
  315. ? ARM_AM::getT2SOImmVal(Offset) : ARM_AM::getSOImmVal(Offset);
  316. if (ImmedOffset == -1)
  317. // FIXME: Try t2ADDri12 or t2SUBri12?
  318. return false; // Probably not worth it then.
  319. BuildMI(MBB, MBBI, dl, TII->get(BaseOpc), NewBase)
  320. .addReg(Base, getKillRegState(BaseKill)).addImm(Offset)
  321. .addImm(Pred).addReg(PredReg).addReg(0);
  322. Base = NewBase;
  323. BaseKill = true; // New base is always killed right its use.
  324. }
  325. bool isDef = (isi32Load(Opcode) || Opcode == ARM::VLDRS ||
  326. Opcode == ARM::VLDRD);
  327. Opcode = getLoadStoreMultipleOpcode(Opcode, Mode);
  328. if (!Opcode) return false;
  329. MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII->get(Opcode))
  330. .addReg(Base, getKillRegState(BaseKill))
  331. .addImm(Pred).addReg(PredReg);
  332. for (unsigned i = 0; i != NumRegs; ++i)
  333. MIB = MIB.addReg(Regs[i].first, getDefRegState(isDef)
  334. | getKillRegState(Regs[i].second));
  335. return true;
  336. }
  337. // MergeOpsUpdate - call MergeOps and update MemOps and merges accordingly on
  338. // success.
  339. void ARMLoadStoreOpt::MergeOpsUpdate(MachineBasicBlock &MBB,
  340. MemOpQueue &memOps,
  341. unsigned memOpsBegin, unsigned memOpsEnd,
  342. unsigned insertAfter, int Offset,
  343. unsigned Base, bool BaseKill,
  344. int Opcode,
  345. ARMCC::CondCodes Pred, unsigned PredReg,
  346. unsigned Scratch,
  347. DebugLoc dl,
  348. SmallVector<MachineBasicBlock::iterator, 4> &Merges) {
  349. // First calculate which of the registers should be killed by the merged
  350. // instruction.
  351. const unsigned insertPos = memOps[insertAfter].Position;
  352. SmallSet<unsigned, 4> KilledRegs;
  353. DenseMap<unsigned, unsigned> Killer;
  354. for (unsigned i = 0, e = memOps.size(); i != e; ++i) {
  355. if (i == memOpsBegin) {
  356. i = memOpsEnd;
  357. if (i == e)
  358. break;
  359. }
  360. if (memOps[i].Position < insertPos && memOps[i].isKill) {
  361. unsigned Reg = memOps[i].Reg;
  362. KilledRegs.insert(Reg);
  363. Killer[Reg] = i;
  364. }
  365. }
  366. SmallVector<std::pair<unsigned, bool>, 8> Regs;
  367. for (unsigned i = memOpsBegin; i < memOpsEnd; ++i) {
  368. unsigned Reg = memOps[i].Reg;
  369. // If we are inserting the merged operation after an operation that
  370. // uses the same register, make sure to transfer any kill flag.
  371. bool isKill = memOps[i].isKill || KilledRegs.count(Reg);
  372. Regs.push_back(std::make_pair(Reg, isKill));
  373. }
  374. // Try to do the merge.
  375. MachineBasicBlock::iterator Loc = memOps[insertAfter].MBBI;
  376. ++Loc;
  377. if (!MergeOps(MBB, Loc, Offset, Base, BaseKill, Opcode,
  378. Pred, PredReg, Scratch, dl, Regs))
  379. return;
  380. // Merge succeeded, update records.
  381. Merges.push_back(prior(Loc));
  382. for (unsigned i = memOpsBegin; i < memOpsEnd; ++i) {
  383. // Remove kill flags from any memops that come before insertPos.
  384. if (Regs[i-memOpsBegin].second) {
  385. unsigned Reg = Regs[i-memOpsBegin].first;
  386. if (KilledRegs.count(Reg)) {
  387. unsigned j = Killer[Reg];
  388. int Idx = memOps[j].MBBI->findRegisterUseOperandIdx(Reg, true);
  389. assert(Idx >= 0 && "Cannot find killing operand");
  390. memOps[j].MBBI->getOperand(Idx).setIsKill(false);
  391. memOps[j].isKill = false;
  392. }
  393. memOps[i].isKill = true;
  394. }
  395. MBB.erase(memOps[i].MBBI);
  396. // Update this memop to refer to the merged instruction.
  397. // We may need to move kill flags again.
  398. memOps[i].Merged = true;
  399. memOps[i].MBBI = Merges.back();
  400. memOps[i].Position = insertPos;
  401. }
  402. }
  403. /// MergeLDR_STR - Merge a number of load / store instructions into one or more
  404. /// load / store multiple instructions.
  405. void
  406. ARMLoadStoreOpt::MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex,
  407. unsigned Base, int Opcode, unsigned Size,
  408. ARMCC::CondCodes Pred, unsigned PredReg,
  409. unsigned Scratch, MemOpQueue &MemOps,
  410. SmallVector<MachineBasicBlock::iterator, 4> &Merges) {
  411. bool isNotVFP = isi32Load(Opcode) || isi32Store(Opcode);
  412. int Offset = MemOps[SIndex].Offset;
  413. int SOffset = Offset;
  414. unsigned insertAfter = SIndex;
  415. MachineBasicBlock::iterator Loc = MemOps[SIndex].MBBI;
  416. DebugLoc dl = Loc->getDebugLoc();
  417. const MachineOperand &PMO = Loc->getOperand(0);
  418. unsigned PReg = PMO.getReg();
  419. unsigned PRegNum = PMO.isUndef() ? UINT_MAX
  420. : getARMRegisterNumbering(PReg);
  421. unsigned Count = 1;
  422. unsigned Limit = ~0U;
  423. // vldm / vstm limit are 32 for S variants, 16 for D variants.
  424. switch (Opcode) {
  425. default: break;
  426. case ARM::VSTRS:
  427. Limit = 32;
  428. break;
  429. case ARM::VSTRD:
  430. Limit = 16;
  431. break;
  432. case ARM::VLDRD:
  433. Limit = 16;
  434. break;
  435. case ARM::VLDRS:
  436. Limit = 32;
  437. break;
  438. }
  439. for (unsigned i = SIndex+1, e = MemOps.size(); i != e; ++i) {
  440. int NewOffset = MemOps[i].Offset;
  441. const MachineOperand &MO = MemOps[i].MBBI->getOperand(0);
  442. unsigned Reg = MO.getReg();
  443. unsigned RegNum = MO.isUndef() ? UINT_MAX
  444. : getARMRegisterNumbering(Reg);
  445. // Register numbers must be in ascending order. For VFP / NEON load and
  446. // store multiples, the registers must also be consecutive and within the
  447. // limit on the number of registers per instruction.
  448. if (Reg != ARM::SP &&
  449. NewOffset == Offset + (int)Size &&
  450. ((isNotVFP && RegNum > PRegNum) ||
  451. ((Count < Limit) && RegNum == PRegNum+1))) {
  452. Offset += Size;
  453. PRegNum = RegNum;
  454. ++Count;
  455. } else {
  456. // Can't merge this in. Try merge the earlier ones first.
  457. MergeOpsUpdate(MBB, MemOps, SIndex, i, insertAfter, SOffset,
  458. Base, false, Opcode, Pred, PredReg, Scratch, dl, Merges);
  459. MergeLDR_STR(MBB, i, Base, Opcode, Size, Pred, PredReg, Scratch,
  460. MemOps, Merges);
  461. return;
  462. }
  463. if (MemOps[i].Position > MemOps[insertAfter].Position)
  464. insertAfter = i;
  465. }
  466. bool BaseKill = Loc->findRegisterUseOperandIdx(Base, true) != -1;
  467. MergeOpsUpdate(MBB, MemOps, SIndex, MemOps.size(), insertAfter, SOffset,
  468. Base, BaseKill, Opcode, Pred, PredReg, Scratch, dl, Merges);
  469. return;
  470. }
  471. static inline bool isMatchingDecrement(MachineInstr *MI, unsigned Base,
  472. unsigned Bytes, unsigned Limit,
  473. ARMCC::CondCodes Pred, unsigned PredReg){
  474. unsigned MyPredReg = 0;
  475. if (!MI)
  476. return false;
  477. if (MI->getOpcode() != ARM::t2SUBri &&
  478. MI->getOpcode() != ARM::t2SUBrSPi &&
  479. MI->getOpcode() != ARM::t2SUBrSPi12 &&
  480. MI->getOpcode() != ARM::tSUBspi &&
  481. MI->getOpcode() != ARM::SUBri)
  482. return false;
  483. // Make sure the offset fits in 8 bits.
  484. if (Bytes == 0 || (Limit && Bytes >= Limit))
  485. return false;
  486. unsigned Scale = (MI->getOpcode() == ARM::tSUBspi) ? 4 : 1; // FIXME
  487. return (MI->getOperand(0).getReg() == Base &&
  488. MI->getOperand(1).getReg() == Base &&
  489. (MI->getOperand(2).getImm()*Scale) == Bytes &&
  490. llvm::getInstrPredicate(MI, MyPredReg) == Pred &&
  491. MyPredReg == PredReg);
  492. }
  493. static inline bool isMatchingIncrement(MachineInstr *MI, unsigned Base,
  494. unsigned Bytes, unsigned Limit,
  495. ARMCC::CondCodes Pred, unsigned PredReg){
  496. unsigned MyPredReg = 0;
  497. if (!MI)
  498. return false;
  499. if (MI->getOpcode() != ARM::t2ADDri &&
  500. MI->getOpcode() != ARM::t2ADDrSPi &&
  501. MI->getOpcode() != ARM::t2ADDrSPi12 &&
  502. MI->getOpcode() != ARM::tADDspi &&
  503. MI->getOpcode() != ARM::ADDri)
  504. return false;
  505. if (Bytes == 0 || (Limit && Bytes >= Limit))
  506. // Make sure the offset fits in 8 bits.
  507. return false;
  508. unsigned Scale = (MI->getOpcode() == ARM::tADDspi) ? 4 : 1; // FIXME
  509. return (MI->getOperand(0).getReg() == Base &&
  510. MI->getOperand(1).getReg() == Base &&
  511. (MI->getOperand(2).getImm()*Scale) == Bytes &&
  512. llvm::getInstrPredicate(MI, MyPredReg) == Pred &&
  513. MyPredReg == PredReg);
  514. }
  515. static inline unsigned getLSMultipleTransferSize(MachineInstr *MI) {
  516. switch (MI->getOpcode()) {
  517. default: return 0;
  518. case ARM::LDRi12:
  519. case ARM::STRi12:
  520. case ARM::t2LDRi8:
  521. case ARM::t2LDRi12:
  522. case ARM::t2STRi8:
  523. case ARM::t2STRi12:
  524. case ARM::VLDRS:
  525. case ARM::VSTRS:
  526. return 4;
  527. case ARM::VLDRD:
  528. case ARM::VSTRD:
  529. return 8;
  530. case ARM::LDMIA:
  531. case ARM::LDMDA:
  532. case ARM::LDMDB:
  533. case ARM::LDMIB:
  534. case ARM::STMIA:
  535. case ARM::STMDA:
  536. case ARM::STMDB:
  537. case ARM::STMIB:
  538. case ARM::t2LDMIA:
  539. case ARM::t2LDMDB:
  540. case ARM::t2STMIA:
  541. case ARM::t2STMDB:
  542. case ARM::VLDMSIA:
  543. case ARM::VSTMSIA:
  544. return (MI->getNumOperands() - MI->getDesc().getNumOperands() + 1) * 4;
  545. case ARM::VLDMDIA:
  546. case ARM::VSTMDIA:
  547. return (MI->getNumOperands() - MI->getDesc().getNumOperands() + 1) * 8;
  548. }
  549. }
  550. static unsigned getUpdatingLSMultipleOpcode(unsigned Opc,
  551. ARM_AM::AMSubMode Mode) {
  552. switch (Opc) {
  553. default: llvm_unreachable("Unhandled opcode!");
  554. case ARM::LDMIA:
  555. case ARM::LDMDA:
  556. case ARM::LDMDB:
  557. case ARM::LDMIB:
  558. switch (Mode) {
  559. default: llvm_unreachable("Unhandled submode!");
  560. case ARM_AM::ia: return ARM::LDMIA_UPD;
  561. case ARM_AM::ib: return ARM::LDMIB_UPD;
  562. case ARM_AM::da: return ARM::LDMDA_UPD;
  563. case ARM_AM::db: return ARM::LDMDB_UPD;
  564. }
  565. break;
  566. case ARM::STMIA:
  567. case ARM::STMDA:
  568. case ARM::STMDB:
  569. case ARM::STMIB:
  570. switch (Mode) {
  571. default: llvm_unreachable("Unhandled submode!");
  572. case ARM_AM::ia: return ARM::STMIA_UPD;
  573. case ARM_AM::ib: return ARM::STMIB_UPD;
  574. case ARM_AM::da: return ARM::STMDA_UPD;
  575. case ARM_AM::db: return ARM::STMDB_UPD;
  576. }
  577. break;
  578. case ARM::t2LDMIA:
  579. case ARM::t2LDMDB:
  580. switch (Mode) {
  581. default: llvm_unreachable("Unhandled submode!");
  582. case ARM_AM::ia: return ARM::t2LDMIA_UPD;
  583. case ARM_AM::db: return ARM::t2LDMDB_UPD;
  584. }
  585. break;
  586. case ARM::t2STMIA:
  587. case ARM::t2STMDB:
  588. switch (Mode) {
  589. default: llvm_unreachable("Unhandled submode!");
  590. case ARM_AM::ia: return ARM::t2STMIA_UPD;
  591. case ARM_AM::db: return ARM::t2STMDB_UPD;
  592. }
  593. break;
  594. case ARM::VLDMSIA:
  595. switch (Mode) {
  596. default: llvm_unreachable("Unhandled submode!");
  597. case ARM_AM::ia: return ARM::VLDMSIA_UPD;
  598. case ARM_AM::db: return ARM::VLDMSDB_UPD;
  599. }
  600. break;
  601. case ARM::VLDMDIA:
  602. switch (Mode) {
  603. default: llvm_unreachable("Unhandled submode!");
  604. case ARM_AM::ia: return ARM::VLDMDIA_UPD;
  605. case ARM_AM::db: return ARM::VLDMDDB_UPD;
  606. }
  607. break;
  608. case ARM::VSTMSIA:
  609. switch (Mode) {
  610. default: llvm_unreachable("Unhandled submode!");
  611. case ARM_AM::ia: return ARM::VSTMSIA_UPD;
  612. case ARM_AM::db: return ARM::VSTMSDB_UPD;
  613. }
  614. break;
  615. case ARM::VSTMDIA:
  616. switch (Mode) {
  617. default: llvm_unreachable("Unhandled submode!");
  618. case ARM_AM::ia: return ARM::VSTMDIA_UPD;
  619. case ARM_AM::db: return ARM::VSTMDDB_UPD;
  620. }
  621. break;
  622. }
  623. return 0;
  624. }
  625. /// MergeBaseUpdateLSMultiple - Fold proceeding/trailing inc/dec of base
  626. /// register into the LDM/STM/VLDM{D|S}/VSTM{D|S} op when possible:
  627. ///
  628. /// stmia rn, <ra, rb, rc>
  629. /// rn := rn + 4 * 3;
  630. /// =>
  631. /// stmia rn!, <ra, rb, rc>
  632. ///
  633. /// rn := rn - 4 * 3;
  634. /// ldmia rn, <ra, rb, rc>
  635. /// =>
  636. /// ldmdb rn!, <ra, rb, rc>
  637. bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineBasicBlock &MBB,
  638. MachineBasicBlock::iterator MBBI,
  639. bool &Advance,
  640. MachineBasicBlock::iterator &I) {
  641. MachineInstr *MI = MBBI;
  642. unsigned Base = MI->getOperand(0).getReg();
  643. bool BaseKill = MI->getOperand(0).isKill();
  644. unsigned Bytes = getLSMultipleTransferSize(MI);
  645. unsigned PredReg = 0;
  646. ARMCC::CondCodes Pred = llvm::getInstrPredicate(MI, PredReg);
  647. int Opcode = MI->getOpcode();
  648. DebugLoc dl = MI->getDebugLoc();
  649. // Can't use an updating ld/st if the base register is also a dest
  650. // register. e.g. ldmdb r0!, {r0, r1, r2}. The behavior is undefined.
  651. for (unsigned i = 2, e = MI->getNumOperands(); i != e; ++i)
  652. if (MI->getOperand(i).getReg() == Base)
  653. return false;
  654. bool DoMerge = false;
  655. ARM_AM::AMSubMode Mode = ARM_AM::getLoadStoreMultipleSubMode(Opcode);
  656. // Try merging with the previous instruction.
  657. MachineBasicBlock::iterator BeginMBBI = MBB.begin();
  658. if (MBBI != BeginMBBI) {
  659. MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
  660. while (PrevMBBI != BeginMBBI && PrevMBBI->isDebugValue())
  661. --PrevMBBI;
  662. if (Mode == ARM_AM::ia &&
  663. isMatchingDecrement(PrevMBBI, Base, Bytes, 0, Pred, PredReg)) {
  664. Mode = ARM_AM::db;
  665. DoMerge = true;
  666. } else if (Mode == ARM_AM::ib &&
  667. isMatchingDecrement(PrevMBBI, Base, Bytes, 0, Pred, PredReg)) {
  668. Mode = ARM_AM::da;
  669. DoMerge = true;
  670. }
  671. if (DoMerge)
  672. MBB.erase(PrevMBBI);
  673. }
  674. // Try merging with the next instruction.
  675. MachineBasicBlock::iterator EndMBBI = MBB.end();
  676. if (!DoMerge && MBBI != EndMBBI) {
  677. MachineBasicBlock::iterator NextMBBI = llvm::next(MBBI);
  678. while (NextMBBI != EndMBBI && NextMBBI->isDebugValue())
  679. ++NextMBBI;
  680. if ((Mode == ARM_AM::ia || Mode == ARM_AM::ib) &&
  681. isMatchingIncrement(NextMBBI, Base, Bytes, 0, Pred, PredReg)) {
  682. DoMerge = true;
  683. } else if ((Mode == ARM_AM::da || Mode == ARM_AM::db) &&
  684. isMatchingDecrement(NextMBBI, Base, Bytes, 0, Pred, PredReg)) {
  685. DoMerge = true;
  686. }
  687. if (DoMerge) {
  688. if (NextMBBI == I) {
  689. Advance = true;
  690. ++I;
  691. }
  692. MBB.erase(NextMBBI);
  693. }
  694. }
  695. if (!DoMerge)
  696. return false;
  697. unsigned NewOpc = getUpdatingLSMultipleOpcode(Opcode, Mode);
  698. MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII->get(NewOpc))
  699. .addReg(Base, getDefRegState(true)) // WB base register
  700. .addReg(Base, getKillRegState(BaseKill))
  701. .addImm(Pred).addReg(PredReg);
  702. // Transfer the rest of operands.
  703. for (unsigned OpNum = 3, e = MI->getNumOperands(); OpNum != e; ++OpNum)
  704. MIB.addOperand(MI->getOperand(OpNum));
  705. // Transfer memoperands.
  706. MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
  707. MBB.erase(MBBI);
  708. return true;
  709. }
  710. static unsigned getPreIndexedLoadStoreOpcode(unsigned Opc,
  711. ARM_AM::AddrOpc Mode) {
  712. switch (Opc) {
  713. case ARM::LDRi12:
  714. return ARM::LDR_PRE;
  715. case ARM::STRi12:
  716. return ARM::STR_PRE;
  717. case ARM::VLDRS:
  718. return Mode == ARM_AM::add ? ARM::VLDMSIA_UPD : ARM::VLDMSDB_UPD;
  719. case ARM::VLDRD:
  720. return Mode == ARM_AM::add ? ARM::VLDMDIA_UPD : ARM::VLDMDDB_UPD;
  721. case ARM::VSTRS:
  722. return Mode == ARM_AM::add ? ARM::VSTMSIA_UPD : ARM::VSTMSDB_UPD;
  723. case ARM::VSTRD:
  724. return Mode == ARM_AM::add ? ARM::VSTMDIA_UPD : ARM::VSTMDDB_UPD;
  725. case ARM::t2LDRi8:
  726. case ARM::t2LDRi12:
  727. return ARM::t2LDR_PRE;
  728. case ARM::t2STRi8:
  729. case ARM::t2STRi12:
  730. return ARM::t2STR_PRE;
  731. default: llvm_unreachable("Unhandled opcode!");
  732. }
  733. return 0;
  734. }
  735. static unsigned getPostIndexedLoadStoreOpcode(unsigned Opc,
  736. ARM_AM::AddrOpc Mode) {
  737. switch (Opc) {
  738. case ARM::LDRi12:
  739. return ARM::LDR_POST;
  740. case ARM::STRi12:
  741. return ARM::STR_POST;
  742. case ARM::VLDRS:
  743. return Mode == ARM_AM::add ? ARM::VLDMSIA_UPD : ARM::VLDMSDB_UPD;
  744. case ARM::VLDRD:
  745. return Mode == ARM_AM::add ? ARM::VLDMDIA_UPD : ARM::VLDMDDB_UPD;
  746. case ARM::VSTRS:
  747. return Mode == ARM_AM::add ? ARM::VSTMSIA_UPD : ARM::VSTMSDB_UPD;
  748. case ARM::VSTRD:
  749. return Mode == ARM_AM::add ? ARM::VSTMDIA_UPD : ARM::VSTMDDB_UPD;
  750. case ARM::t2LDRi8:
  751. case ARM::t2LDRi12:
  752. return ARM::t2LDR_POST;
  753. case ARM::t2STRi8:
  754. case ARM::t2STRi12:
  755. return ARM::t2STR_POST;
  756. default: llvm_unreachable("Unhandled opcode!");
  757. }
  758. return 0;
  759. }
  760. /// MergeBaseUpdateLoadStore - Fold proceeding/trailing inc/dec of base
  761. /// register into the LDR/STR/FLD{D|S}/FST{D|S} op when possible:
  762. bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
  763. MachineBasicBlock::iterator MBBI,
  764. const TargetInstrInfo *TII,
  765. bool &Advance,
  766. MachineBasicBlock::iterator &I) {
  767. MachineInstr *MI = MBBI;
  768. unsigned Base = MI->getOperand(1).getReg();
  769. bool BaseKill = MI->getOperand(1).isKill();
  770. unsigned Bytes = getLSMultipleTransferSize(MI);
  771. int Opcode = MI->getOpcode();
  772. DebugLoc dl = MI->getDebugLoc();
  773. bool isAM5 = (Opcode == ARM::VLDRD || Opcode == ARM::VLDRS ||
  774. Opcode == ARM::VSTRD || Opcode == ARM::VSTRS);
  775. bool isAM2 = (Opcode == ARM::LDRi12 || Opcode == ARM::STRi12);
  776. if (isi32Load(Opcode) || isi32Store(Opcode))
  777. if (MI->getOperand(2).getImm() != 0)
  778. return false;
  779. if (isAM5 && ARM_AM::getAM5Offset(MI->getOperand(2).getImm()) != 0)
  780. return false;
  781. bool isLd = isi32Load(Opcode) || Opcode == ARM::VLDRS || Opcode == ARM::VLDRD;
  782. // Can't do the merge if the destination register is the same as the would-be
  783. // writeback register.
  784. if (isLd && MI->getOperand(0).getReg() == Base)
  785. return false;
  786. unsigned PredReg = 0;
  787. ARMCC::CondCodes Pred = llvm::getInstrPredicate(MI, PredReg);
  788. bool DoMerge = false;
  789. ARM_AM::AddrOpc AddSub = ARM_AM::add;
  790. unsigned NewOpc = 0;
  791. // AM2 - 12 bits, thumb2 - 8 bits.
  792. unsigned Limit = isAM5 ? 0 : (isAM2 ? 0x1000 : 0x100);
  793. // Try merging with the previous instruction.
  794. MachineBasicBlock::iterator BeginMBBI = MBB.begin();
  795. if (MBBI != BeginMBBI) {
  796. MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
  797. while (PrevMBBI != BeginMBBI && PrevMBBI->isDebugValue())
  798. --PrevMBBI;
  799. if (isMatchingDecrement(PrevMBBI, Base, Bytes, Limit, Pred, PredReg)) {
  800. DoMerge = true;
  801. AddSub = ARM_AM::sub;
  802. } else if (!isAM5 &&
  803. isMatchingIncrement(PrevMBBI, Base, Bytes, Limit,Pred,PredReg)) {
  804. DoMerge = true;
  805. }
  806. if (DoMerge) {
  807. NewOpc = getPreIndexedLoadStoreOpcode(Opcode, AddSub);
  808. MBB.erase(PrevMBBI);
  809. }
  810. }
  811. // Try merging with the next instruction.
  812. MachineBasicBlock::iterator EndMBBI = MBB.end();
  813. if (!DoMerge && MBBI != EndMBBI) {
  814. MachineBasicBlock::iterator NextMBBI = llvm::next(MBBI);
  815. while (NextMBBI != EndMBBI && NextMBBI->isDebugValue())
  816. ++NextMBBI;
  817. if (!isAM5 &&
  818. isMatchingDecrement(NextMBBI, Base, Bytes, Limit, Pred, PredReg)) {
  819. DoMerge = true;
  820. AddSub = ARM_AM::sub;
  821. } else if (isMatchingIncrement(NextMBBI, Base, Bytes, Limit,Pred,PredReg)) {
  822. DoMerge = true;
  823. }
  824. if (DoMerge) {
  825. NewOpc = getPostIndexedLoadStoreOpcode(Opcode, AddSub);
  826. if (NextMBBI == I) {
  827. Advance = true;
  828. ++I;
  829. }
  830. MBB.erase(NextMBBI);
  831. }
  832. }
  833. if (!DoMerge)
  834. return false;
  835. unsigned Offset = 0;
  836. if (isAM2)
  837. Offset = ARM_AM::getAM2Opc(AddSub, Bytes, ARM_AM::no_shift);
  838. else if (!isAM5)
  839. Offset = AddSub == ARM_AM::sub ? -Bytes : Bytes;
  840. if (isAM5) {
  841. // VLDM[SD}_UPD, VSTM[SD]_UPD
  842. // (There are no base-updating versions of VLDR/VSTR instructions, but the
  843. // updating load/store-multiple instructions can be used with only one
  844. // register.)
  845. MachineOperand &MO = MI->getOperand(0);
  846. BuildMI(MBB, MBBI, dl, TII->get(NewOpc))
  847. .addReg(Base, getDefRegState(true)) // WB base register
  848. .addReg(Base, getKillRegState(isLd ? BaseKill : false))
  849. .addImm(Pred).addReg(PredReg)
  850. .addReg(MO.getReg(), (isLd ? getDefRegState(true) :
  851. getKillRegState(MO.isKill())));
  852. } else if (isLd) {
  853. if (isAM2)
  854. // LDR_PRE, LDR_POST,
  855. BuildMI(MBB, MBBI, dl, TII->get(NewOpc), MI->getOperand(0).getReg())
  856. .addReg(Base, RegState::Define)
  857. .addReg(Base).addReg(0).addImm(Offset).addImm(Pred).addReg(PredReg);
  858. else
  859. // t2LDR_PRE, t2LDR_POST
  860. BuildMI(MBB, MBBI, dl, TII->get(NewOpc), MI->getOperand(0).getReg())
  861. .addReg(Base, RegState::Define)
  862. .addReg(Base).addImm(Offset).addImm(Pred).addReg(PredReg);
  863. } else {
  864. MachineOperand &MO = MI->getOperand(0);
  865. if (isAM2)
  866. // STR_PRE, STR_POST
  867. BuildMI(MBB, MBBI, dl, TII->get(NewOpc), Base)
  868. .addReg(MO.getReg(), getKillRegState(MO.isKill()))
  869. .addReg(Base).addReg(0).addImm(Offset).addImm(Pred).addReg(PredReg);
  870. else
  871. // t2STR_PRE, t2STR_POST
  872. BuildMI(MBB, MBBI, dl, TII->get(NewOpc), Base)
  873. .addReg(MO.getReg(), getKillRegState(MO.isKill()))
  874. .addReg(Base).addImm(Offset).addImm(Pred).addReg(PredReg);
  875. }
  876. MBB.erase(MBBI);
  877. return true;
  878. }
  879. /// isMemoryOp - Returns true if instruction is a memory operation that this
  880. /// pass is capable of operating on.
  881. static bool isMemoryOp(const MachineInstr *MI) {
  882. // When no memory operands are present, conservatively assume unaligned,
  883. // volatile, unfoldable.
  884. if (!MI->hasOneMemOperand())
  885. return false;
  886. const MachineMemOperand *MMO = *MI->memoperands_begin();
  887. // Don't touch volatile memory accesses - we may be changing their order.
  888. if (MMO->isVolatile())
  889. return false;
  890. // Unaligned ldr/str is emulated by some kernels, but unaligned ldm/stm is
  891. // not.
  892. if (MMO->getAlignment() < 4)
  893. return false;
  894. // str <undef> could probably be eliminated entirely, but for now we just want
  895. // to avoid making a mess of it.
  896. // FIXME: Use str <undef> as a wildcard to enable better stm folding.
  897. if (MI->getNumOperands() > 0 && MI->getOperand(0).isReg() &&
  898. MI->getOperand(0).isUndef())
  899. return false;
  900. // Likewise don't mess with references to undefined addresses.
  901. if (MI->getNumOperands() > 1 && MI->getOperand(1).isReg() &&
  902. MI->getOperand(1).isUndef())
  903. return false;
  904. int Opcode = MI->getOpcode();
  905. switch (Opcode) {
  906. default: break;
  907. case ARM::VLDRS:
  908. case ARM::VSTRS:
  909. return MI->getOperand(1).isReg();
  910. case ARM::VLDRD:
  911. case ARM::VSTRD:
  912. return MI->getOperand(1).isReg();
  913. case ARM::LDRi12:
  914. case ARM::STRi12:
  915. case ARM::t2LDRi8:
  916. case ARM::t2LDRi12:
  917. case ARM::t2STRi8:
  918. case ARM::t2STRi12:
  919. return MI->getOperand(1).isReg();
  920. }
  921. return false;
  922. }
  923. /// AdvanceRS - Advance register scavenger to just before the earliest memory
  924. /// op that is being merged.
  925. void ARMLoadStoreOpt::AdvanceRS(MachineBasicBlock &MBB, MemOpQueue &MemOps) {
  926. MachineBasicBlock::iterator Loc = MemOps[0].MBBI;
  927. unsigned Position = MemOps[0].Position;
  928. for (unsigned i = 1, e = MemOps.size(); i != e; ++i) {
  929. if (MemOps[i].Position < Position) {
  930. Position = MemOps[i].Position;
  931. Loc = MemOps[i].MBBI;
  932. }
  933. }
  934. if (Loc != MBB.begin())
  935. RS->forward(prior(Loc));
  936. }
  937. static int getMemoryOpOffset(const MachineInstr *MI) {
  938. int Opcode = MI->getOpcode();
  939. bool isAM3 = Opcode == ARM::LDRD || Opcode == ARM::STRD;
  940. unsigned NumOperands = MI->getDesc().getNumOperands();
  941. unsigned OffField = MI->getOperand(NumOperands-3).getImm();
  942. if (Opcode == ARM::t2LDRi12 || Opcode == ARM::t2LDRi8 ||
  943. Opcode == ARM::t2STRi12 || Opcode == ARM::t2STRi8 ||
  944. Opcode == ARM::t2LDRDi8 || Opcode == ARM::t2STRDi8 ||
  945. Opcode == ARM::LDRi12 || Opcode == ARM::STRi12)
  946. return OffField;
  947. int Offset = isAM3 ? ARM_AM::getAM3Offset(OffField)
  948. : ARM_AM::getAM5Offset(OffField) * 4;
  949. if (isAM3) {
  950. if (ARM_AM::getAM3Op(OffField) == ARM_AM::sub)
  951. Offset = -Offset;
  952. } else {
  953. if (ARM_AM::getAM5Op(OffField) == ARM_AM::sub)
  954. Offset = -Offset;
  955. }
  956. return Offset;
  957. }
  958. static void InsertLDR_STR(MachineBasicBlock &MBB,
  959. MachineBasicBlock::iterator &MBBI,
  960. int Offset, bool isDef,
  961. DebugLoc dl, unsigned NewOpc,
  962. unsigned Reg, bool RegDeadKill, bool RegUndef,
  963. unsigned BaseReg, bool BaseKill, bool BaseUndef,
  964. bool OffKill, bool OffUndef,
  965. ARMCC::CondCodes Pred, unsigned PredReg,
  966. const TargetInstrInfo *TII, bool isT2) {
  967. if (isDef) {
  968. MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MBBI->getDebugLoc(),
  969. TII->get(NewOpc))
  970. .addReg(Reg, getDefRegState(true) | getDeadRegState(RegDeadKill))
  971. .addReg(BaseReg, getKillRegState(BaseKill)|getUndefRegState(BaseUndef));
  972. MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
  973. } else {
  974. MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MBBI->getDebugLoc(),
  975. TII->get(NewOpc))
  976. .addReg(Reg, getKillRegState(RegDeadKill) | getUndefRegState(RegUndef))
  977. .addReg(BaseReg, getKillRegState(BaseKill)|getUndefRegState(BaseUndef));
  978. MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
  979. }
  980. }
  981. bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
  982. MachineBasicBlock::iterator &MBBI) {
  983. MachineInstr *MI = &*MBBI;
  984. unsigned Opcode = MI->getOpcode();
  985. if (Opcode == ARM::LDRD || Opcode == ARM::STRD ||
  986. Opcode == ARM::t2LDRDi8 || Opcode == ARM::t2STRDi8) {
  987. unsigned EvenReg = MI->getOperand(0).getReg();
  988. unsigned OddReg = MI->getOperand(1).getReg();
  989. unsigned EvenRegNum = TRI->getDwarfRegNum(EvenReg, false);
  990. unsigned OddRegNum = TRI->getDwarfRegNum(OddReg, false);
  991. if ((EvenRegNum & 1) == 0 && (EvenRegNum + 1) == OddRegNum)
  992. return false;
  993. MachineBasicBlock::iterator NewBBI = MBBI;
  994. bool isT2 = Opcode == ARM::t2LDRDi8 || Opcode == ARM::t2STRDi8;
  995. bool isLd = Opcode == ARM::LDRD || Opcode == ARM::t2LDRDi8;
  996. bool EvenDeadKill = isLd ?
  997. MI->getOperand(0).isDead() : MI->getOperand(0).isKill();
  998. bool EvenUndef = MI->getOperand(0).isUndef();
  999. bool OddDeadKill = isLd ?
  1000. MI->getOperand(1).isDead() : MI->getOperand(1).isKill();
  1001. bool OddUndef = MI->getOperand(1).isUndef();
  1002. const MachineOperand &BaseOp = MI->getOperand(2);
  1003. unsigned BaseReg = BaseOp.getReg();
  1004. bool BaseKill = BaseOp.isKill();
  1005. bool BaseUndef = BaseOp.isUndef();
  1006. bool OffKill = isT2 ? false : MI->getOperand(3).isKill();
  1007. bool OffUndef = isT2 ? false : MI->getOperand(3).isUndef();
  1008. int OffImm = getMemoryOpOffset(MI);
  1009. unsigned PredReg = 0;
  1010. ARMCC::CondCodes Pred = llvm::getInstrPredicate(MI, PredReg);
  1011. if (OddRegNum > EvenRegNum && OffImm == 0) {
  1012. // Ascending register numbers and no offset. It's safe to change it to a
  1013. // ldm or stm.
  1014. unsigned NewOpc = (isLd)
  1015. ? (isT2 ? ARM::t2LDMIA : ARM::LDMIA)
  1016. : (isT2 ? ARM::t2STMIA : ARM::STMIA);
  1017. if (isLd) {
  1018. BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(NewOpc))
  1019. .addReg(BaseReg, getKillRegState(BaseKill))
  1020. .addImm(Pred).addReg(PredReg)
  1021. .addReg(EvenReg, getDefRegState(isLd) | getDeadRegState(EvenDeadKill))
  1022. .addReg(OddReg, getDefRegState(isLd) | getDeadRegState(OddDeadKill));
  1023. ++NumLDRD2LDM;
  1024. } else {
  1025. BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(NewOpc))
  1026. .addReg(BaseReg, getKillRegState(BaseKill))
  1027. .addImm(Pred).addReg(PredReg)
  1028. .addReg(EvenReg,
  1029. getKillRegState(EvenDeadKill) | getUndefRegState(EvenUndef))
  1030. .addReg(OddReg,
  1031. getKillRegState(OddDeadKill) | getUndefRegState(OddUndef));
  1032. ++NumSTRD2STM;
  1033. }
  1034. NewBBI = llvm::prior(MBBI);
  1035. } else {
  1036. // Split into two instructions.
  1037. unsigned NewOpc = (isLd)
  1038. ? (isT2 ? (OffImm < 0 ? ARM::t2LDRi8 : ARM::t2LDRi12) : ARM::LDRi12)
  1039. : (isT2 ? (OffImm < 0 ? ARM::t2STRi8 : ARM::t2STRi12) : ARM::STRi12);
  1040. DebugLoc dl = MBBI->getDebugLoc();
  1041. // If this is a load and base register is killed, it may have been
  1042. // re-defed by the load, make sure the first load does not clobber it.
  1043. if (isLd &&
  1044. (BaseKill || OffKill) &&
  1045. (TRI->regsOverlap(EvenReg, BaseReg))) {
  1046. assert(!TRI->regsOverlap(OddReg, BaseReg));
  1047. InsertLDR_STR(MBB, MBBI, OffImm+4, isLd, dl, NewOpc,
  1048. OddReg, OddDeadKill, false,
  1049. BaseReg, false, BaseUndef, false, OffUndef,
  1050. Pred, PredReg, TII, isT2);
  1051. NewBBI = llvm::prior(MBBI);
  1052. InsertLDR_STR(MBB, MBBI, OffImm, isLd, dl, NewOpc,
  1053. EvenReg, EvenDeadKill, false,
  1054. BaseReg, BaseKill, BaseUndef, OffKill, OffUndef,
  1055. Pred, PredReg, TII, isT2);
  1056. } else {
  1057. if (OddReg == EvenReg && EvenDeadKill) {
  1058. // If the two source operands are the same, the kill marker is
  1059. // probably on the first one. e.g.
  1060. // t2STRDi8 %R5<kill>, %R5, %R9<kill>, 0, 14, %reg0
  1061. EvenDeadKill = false;
  1062. OddDeadKill = true;
  1063. }
  1064. InsertLDR_STR(MBB, MBBI, OffImm, isLd, dl, NewOpc,
  1065. EvenReg, EvenDeadKill, EvenUndef,
  1066. BaseReg, false, BaseUndef, false, OffUndef,
  1067. Pred, PredReg, TII, isT2);
  1068. NewBBI = llvm::prior(MBBI);
  1069. InsertLDR_STR(MBB, MBBI, OffImm+4, isLd, dl, NewOpc,
  1070. OddReg, OddDeadKill, OddUndef,
  1071. BaseReg, BaseKill, BaseUndef, OffKill, OffUndef,
  1072. Pred, PredReg, TII, isT2);
  1073. }
  1074. if (isLd)
  1075. ++NumLDRD2LDR;
  1076. else
  1077. ++NumSTRD2STR;
  1078. }
  1079. MBB.erase(MI);
  1080. MBBI = NewBBI;
  1081. return true;
  1082. }
  1083. return false;
  1084. }
  1085. /// LoadStoreMultipleOpti - An optimization pass to turn multiple LDR / STR
  1086. /// ops of the same base and incrementing offset into LDM / STM ops.
  1087. bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
  1088. unsigned NumMerges = 0;
  1089. unsigned NumMemOps = 0;
  1090. MemOpQueue MemOps;
  1091. unsigned CurrBase = 0;
  1092. int CurrOpc = -1;
  1093. unsigned CurrSize = 0;
  1094. ARMCC::CondCodes CurrPred = ARMCC::AL;
  1095. unsigned CurrPredReg = 0;
  1096. unsigned Position = 0;
  1097. SmallVector<MachineBasicBlock::iterator,4> Merges;
  1098. RS->enterBasicBlock(&MBB);
  1099. MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
  1100. while (MBBI != E) {
  1101. if (FixInvalidRegPairOp(MBB, MBBI))
  1102. continue;
  1103. bool Advance = false;
  1104. bool TryMerge = false;
  1105. bool Clobber = false;
  1106. bool isMemOp = isMemoryOp(MBBI);
  1107. if (isMemOp) {
  1108. int Opcode = MBBI->getOpcode();
  1109. unsigned Size = getLSMultipleTransferSize(MBBI);
  1110. const MachineOperand &MO = MBBI->getOperand(0);
  1111. unsigned Reg = MO.getReg();
  1112. bool isKill = MO.isDef() ? false : MO.isKill();
  1113. unsigned Base = MBBI->getOperand(1).getReg();
  1114. unsigned PredReg = 0;
  1115. ARMCC::CondCodes Pred = llvm::getInstrPredicate(MBBI, PredReg);
  1116. int Offset = getMemoryOpOffset(MBBI);
  1117. // Watch out for:
  1118. // r4 := ldr [r5]
  1119. // r5 := ldr [r5, #4]
  1120. // r6 := ldr [r5, #8]
  1121. //
  1122. // The second ldr has effectively broken the chain even though it
  1123. // looks like the later ldr(s) use the same base register. Try to
  1124. // merge the ldr's so far, including this one. But don't try to
  1125. // combine the following ldr(s).
  1126. Clobber = (isi32Load(Opcode) && Base == MBBI->getOperand(0).getReg());
  1127. if (CurrBase == 0 && !Clobber) {
  1128. // Start of a new chain.
  1129. CurrBase = Base;
  1130. CurrOpc = Opcode;
  1131. CurrSize = Size;
  1132. CurrPred = Pred;
  1133. CurrPredReg = PredReg;
  1134. MemOps.push_back(MemOpQueueEntry(Offset, Reg, isKill, Position, MBBI));
  1135. ++NumMemOps;
  1136. Advance = true;
  1137. } else {
  1138. if (Clobber) {
  1139. TryMerge = true;
  1140. Advance = true;
  1141. }
  1142. if (CurrOpc == Opcode && CurrBase == Base && CurrPred == Pred) {
  1143. // No need to match PredReg.
  1144. // Continue adding to the queue.
  1145. if (Offset > MemOps.back().Offset) {
  1146. MemOps.push_back(MemOpQueueEntry(Offset, Reg, isKill,
  1147. Position, MBBI));
  1148. ++NumMemOps;
  1149. Advance = true;
  1150. } else {
  1151. for (MemOpQueueIter I = MemOps.begin(), E = MemOps.end();
  1152. I != E; ++I) {
  1153. if (Offset < I->Offset) {
  1154. MemOps.insert(I, MemOpQueueEntry(Offset, Reg, isKill,
  1155. Position, MBBI));
  1156. ++NumMemOps;
  1157. Advance = true;
  1158. break;
  1159. } else if (Offset == I->Offset) {
  1160. // Collision! This can't be merged!
  1161. break;
  1162. }
  1163. }
  1164. }
  1165. }
  1166. }
  1167. }
  1168. if (MBBI->isDebugValue()) {
  1169. ++MBBI;
  1170. if (MBBI == E)
  1171. // Reach the end of the block, try merging the memory instructions.
  1172. TryMerge = true;
  1173. } else if (Advance) {
  1174. ++Position;
  1175. ++MBBI;
  1176. if (MBBI == E)
  1177. // Reach the end of the block, try merging the memory instructions.
  1178. TryMerge = true;
  1179. } else
  1180. TryMerge = true;
  1181. if (TryMerge) {
  1182. if (NumMemOps > 1) {
  1183. // Try to find a free register to use as a new base in case it's needed.
  1184. // First advance to the instruction just before the start of the chain.
  1185. AdvanceRS(MBB, MemOps);
  1186. // Find a scratch register.
  1187. unsigned Scratch = RS->FindUnusedReg(ARM::GPRRegisterClass);
  1188. // Process the load / store instructions.
  1189. RS->forward(prior(MBBI));
  1190. // Merge ops.
  1191. Merges.clear();
  1192. MergeLDR_STR(MBB, 0, CurrBase, CurrOpc, CurrSize,
  1193. CurrPred, CurrPredReg, Scratch, MemOps, Merges);
  1194. // Try folding preceding/trailing base inc/dec into the generated
  1195. // LDM/STM ops.
  1196. for (unsigned i = 0, e = Merges.size(); i < e; ++i)
  1197. if (MergeBaseUpdateLSMultiple(MBB, Merges[i], Advance, MBBI))
  1198. ++NumMerges;
  1199. NumMerges += Merges.size();
  1200. // Try folding preceding/trailing base inc/dec into those load/store
  1201. // that were not merged to form LDM/STM ops.
  1202. for (unsigned i = 0; i != NumMemOps; ++i)
  1203. if (!MemOps[i].Merged)
  1204. if (MergeBaseUpdateLoadStore(MBB, MemOps[i].MBBI, TII,Advance,MBBI))
  1205. ++NumMerges;
  1206. // RS may be pointing to an instruction that's deleted.
  1207. RS->skipTo(prior(MBBI));
  1208. } else if (NumMemOps == 1) {
  1209. // Try folding preceding/trailing base inc/dec into the single
  1210. // load/store.
  1211. if (MergeBaseUpdateLoadStore(MBB, MemOps[0].MBBI, TII, Advance, MBBI)) {
  1212. ++NumMerges;
  1213. RS->forward(prior(MBBI));
  1214. }
  1215. }
  1216. CurrBase = 0;
  1217. CurrOpc = -1;
  1218. CurrSize = 0;
  1219. CurrPred = ARMCC::AL;
  1220. CurrPredReg = 0;
  1221. if (NumMemOps) {
  1222. MemOps.clear();
  1223. NumMemOps = 0;
  1224. }
  1225. // If iterator hasn't been advanced and this is not a memory op, skip it.
  1226. // It can't start a new chain anyway.
  1227. if (!Advance && !isMemOp && MBBI != E) {
  1228. ++Position;
  1229. ++MBBI;
  1230. }
  1231. }
  1232. }
  1233. return NumMerges > 0;
  1234. }
  1235. /// MergeReturnIntoLDM - If this is a exit BB, try merging the return ops
  1236. /// ("bx lr" and "mov pc, lr") into the preceding stack restore so it
  1237. /// directly restore the value of LR into pc.
  1238. /// ldmfd sp!, {..., lr}
  1239. /// bx lr
  1240. /// or
  1241. /// ldmfd sp!, {..., lr}
  1242. /// mov pc, lr
  1243. /// =>
  1244. /// ldmfd sp!, {..., pc}
  1245. bool ARMLoadStoreOpt::MergeReturnIntoLDM(MachineBasicBlock &MBB) {
  1246. if (MBB.empty()) return false;
  1247. MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
  1248. if (MBBI != MBB.begin() &&
  1249. (MBBI->getOpcode() == ARM::BX_RET ||
  1250. MBBI->getOpcode() == ARM::tBX_RET ||
  1251. MBBI->getOpcode() == ARM::MOVPCLR)) {
  1252. MachineInstr *PrevMI = prior(MBBI);
  1253. unsigned Opcode = PrevMI->getOpcode();
  1254. if (Opcode == ARM::LDMIA_UPD || Opcode == ARM::LDMDA_UPD ||
  1255. Opcode == ARM::LDMDB_UPD || Opcode == ARM::LDMIB_UPD ||
  1256. Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
  1257. MachineOperand &MO = PrevMI->getOperand(PrevMI->getNumOperands()-1);
  1258. if (MO.getReg() != ARM::LR)
  1259. return false;
  1260. unsigned NewOpc = (isThumb2 ? ARM::t2LDMIA_RET : ARM::LDMIA_RET);
  1261. assert(((isThumb2 && Opcode == ARM::t2LDMIA_UPD) ||
  1262. Opcode == ARM::LDMIA_UPD) && "Unsupported multiple load-return!");
  1263. PrevMI->setDesc(TII->get(NewOpc));
  1264. MO.setReg(ARM::PC);
  1265. PrevMI->copyImplicitOps(&*MBBI);
  1266. MBB.erase(MBBI);
  1267. return true;
  1268. }
  1269. }
  1270. return false;
  1271. }
  1272. bool ARMLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
  1273. const TargetMachine &TM = Fn.getTarget();
  1274. AFI = Fn.getInfo<ARMFunctionInfo>();
  1275. TII = TM.getInstrInfo();
  1276. TRI = TM.getRegisterInfo();
  1277. RS = new RegScavenger();
  1278. isThumb2 = AFI->isThumb2Function();
  1279. bool Modified = false;
  1280. for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
  1281. ++MFI) {
  1282. MachineBasicBlock &MBB = *MFI;
  1283. Modified |= LoadStoreMultipleOpti(MBB);
  1284. if (TM.getSubtarget<ARMSubtarget>().hasV5TOps())
  1285. Modified |= MergeReturnIntoLDM(MBB);
  1286. }
  1287. delete RS;
  1288. return Modified;
  1289. }
  1290. /// ARMPreAllocLoadStoreOpt - Pre- register allocation pass that move
  1291. /// load / stores from consecutive locations close to make it more
  1292. /// likely they will be combined later.
  1293. namespace {
  1294. struct ARMPreAllocLoadStoreOpt : public MachineFunctionPass{
  1295. static char ID;
  1296. ARMPreAllocLoadStoreOpt() : MachineFunctionPass(ID) {}
  1297. const TargetData *TD;
  1298. const TargetInstrInfo *TII;
  1299. const TargetRegisterInfo *TRI;
  1300. const ARMSubtarget *STI;
  1301. MachineRegisterInfo *MRI;
  1302. MachineFunction *MF;
  1303. virtual bool runOnMachineFunction(MachineFunction &Fn);
  1304. virtual const char *getPassName() const {
  1305. return "ARM pre- register allocation load / store optimization pass";
  1306. }
  1307. private:
  1308. bool CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1, DebugLoc &dl,
  1309. unsigned &NewOpc, unsigned &EvenReg,
  1310. unsigned &OddReg, unsigned &BaseReg,
  1311. int &Offset,
  1312. unsigned &PredReg, ARMCC::CondCodes &Pred,
  1313. bool &isT2);
  1314. bool RescheduleOps(MachineBasicBlock *MBB,
  1315. SmallVector<MachineInstr*, 4> &Ops,
  1316. unsigned Base, bool isLd,
  1317. DenseMap<MachineInstr*, unsigned> &MI2LocMap);
  1318. bool RescheduleLoadStoreInstrs(MachineBasicBlock *MBB);
  1319. };
  1320. char ARMPreAllocLoadStoreOpt::ID = 0;
  1321. }
  1322. bool ARMPreAllocLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
  1323. TD = Fn.getTarget().getTargetData();
  1324. TII = Fn.getTarget().getInstrInfo();
  1325. TRI = Fn.getTarget().getRegisterInfo();
  1326. STI = &Fn.getTarget().getSubtarget<ARMSubtarget>();
  1327. MRI = &Fn.getRegInfo();
  1328. MF = &Fn;
  1329. bool Modified = false;
  1330. for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
  1331. ++MFI)
  1332. Modified |= RescheduleLoadStoreInstrs(MFI);
  1333. return Modified;
  1334. }
  1335. static bool IsSafeAndProfitableToMove(bool isLd, unsigned Base,
  1336. MachineBasicBlock::iterator I,
  1337. MachineBasicBlock::iterator E,
  1338. SmallPtrSet<MachineInstr*, 4> &MemOps,
  1339. SmallSet<unsigned, 4> &MemRegs,
  1340. const TargetRegisterInfo *TRI) {
  1341. // Are there stores / loads / calls between them?
  1342. // FIXME: This is overly conservative. We should make use of alias information
  1343. // some day.
  1344. SmallSet<unsigned, 4> AddedRegPressure;
  1345. while (++I != E) {
  1346. if (I->isDebugValue() || MemOps.count(&*I))
  1347. continue;
  1348. const MCInstrDesc &MCID = I->getDesc();
  1349. if (MCID.isCall() || MCID.isTerminator() || I->hasUnmodeledSideEffects())
  1350. return false;
  1351. if (isLd && MCID.mayStore())
  1352. return false;
  1353. if (!isLd) {
  1354. if (MCID.mayLoad())
  1355. return false;
  1356. // It's not safe to move the first 'str' down.
  1357. // str r1, [r0]
  1358. // strh r5, [r0]
  1359. // str r4, [r0, #+4]
  1360. if (MCID.mayStore())
  1361. return false;
  1362. }
  1363. for (unsigned j = 0, NumOps = I->getNumOperands(); j != NumOps; ++j) {
  1364. MachineOperand &MO = I->getOperand(j);
  1365. if (!MO.isReg())
  1366. continue;
  1367. unsigned Reg = MO.getReg();
  1368. if (MO.isDef() && TRI->regsOverlap(Reg, Base))
  1369. return false;
  1370. if (Reg != Base && !MemRegs.count(Reg))
  1371. AddedRegPressure.insert(Reg);
  1372. }
  1373. }
  1374. // Estimate register pressure increase due to the transformation.
  1375. if (MemRegs.size() <= 4)
  1376. // Ok if we are moving small number of instructions.
  1377. return true;
  1378. return AddedRegPressure.size() <= MemRegs.size() * 2;
  1379. }
  1380. bool
  1381. ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1,
  1382. DebugLoc &dl,
  1383. unsigned &NewOpc, unsigned &EvenReg,
  1384. unsigned &OddReg, unsigned &BaseReg,
  1385. int &Offset, unsigned &PredReg,
  1386. ARMCC::CondCodes &Pred,
  1387. bool &isT2) {
  1388. // Make sure we're allowed to generate LDRD/STRD.
  1389. if (!STI->hasV5TEOps())
  1390. return false;
  1391. // FIXME: VLDRS / VSTRS -> VLDRD / VSTRD
  1392. unsigned Scale = 1;
  1393. unsigned Opcode = Op0->getOpcode();
  1394. if (Opcode == ARM::LDRi12)
  1395. NewOpc = ARM::LDRD;
  1396. else if (Opcode == ARM::STRi12)
  1397. NewOpc = ARM::STRD;
  1398. else if (Opcode == ARM::t2LDRi8 || Opcode == ARM::t2LDRi12) {
  1399. NewOpc = ARM::t2LDRDi8;
  1400. Scale = 4;
  1401. isT2 = true;
  1402. } else if (Opcode == ARM::t2STRi8 || Opcode == ARM::t2STRi12) {
  1403. NewOpc = ARM::t2STRDi8;
  1404. Scale = 4;
  1405. isT2 = true;
  1406. } else
  1407. return false;
  1408. // Make sure the base address satisfies i64 ld / st alignment requirement.
  1409. if (!Op0->hasOneMemOperand() ||
  1410. !(*Op0->memoperands_begin())->getValue() ||
  1411. (*Op0->memoperands_begin())->isVolatile())
  1412. return false;
  1413. unsigned Align = (*Op0->memoperands_begin())->getAlignment();
  1414. const Function *Func = MF->getFunction();
  1415. unsigned ReqAlign = STI->hasV6Ops()
  1416. ? TD->getABITypeAlignment(Type::getInt64Ty(Func->getContext()))
  1417. : 8; // Pre-v6 need 8-byte align
  1418. if (Align < ReqAlign)
  1419. return false;
  1420. // Then make sure the immediate offset fits.
  1421. int OffImm = getMemoryOpOffset(Op0);
  1422. if (isT2) {
  1423. int Limit = (1 << 8) * Scale;
  1424. if (OffImm >= Limit || (OffImm <= -Limit) || (OffImm & (Scale-1)))
  1425. return false;
  1426. Offset = OffImm;
  1427. } else {
  1428. ARM_AM::AddrOpc AddSub = ARM_AM::add;
  1429. if (OffImm < 0) {
  1430. AddSub = ARM_AM::sub;
  1431. OffImm = - OffImm;
  1432. }
  1433. int Limit = (1 << 8) * Scale;
  1434. if (OffImm >= Limit || (OffImm & (Scale-1)))
  1435. return false;
  1436. Offset = ARM_AM::getAM3Opc(AddSub, OffImm);
  1437. }
  1438. EvenReg = Op0->getOperand(0).getReg();
  1439. OddReg = Op1->getOperand(0).getReg();
  1440. if (EvenReg == OddReg)
  1441. return false;
  1442. BaseReg = Op0->getOperand(1).getReg();
  1443. Pred = llvm::getInstrPredicate(Op0, PredReg);
  1444. dl = Op0->getDebugLoc();
  1445. return true;
  1446. }
  1447. namespace {
  1448. struct OffsetCompare {
  1449. bool operator()(const MachineInstr *LHS, const MachineInstr *RHS) const {
  1450. int LOffset = getMemoryOpOffset(LHS);
  1451. int ROffset = getMemoryOpOffset(RHS);
  1452. assert(LHS == RHS || LOffset != ROffset);
  1453. return LOffset > ROffset;
  1454. }
  1455. };
  1456. }
  1457. bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
  1458. SmallVector<MachineInstr*, 4> &Ops,
  1459. unsigned Base, bool isLd,
  1460. DenseMap<MachineInstr*, unsigned> &MI2LocMap) {
  1461. bool RetVal = false;
  1462. // Sort by offset (in reverse order).
  1463. std::sort(Ops.begin(), Ops.end(), OffsetCompare());
  1464. // The loads / stores of the same base are in order. Scan them from first to
  1465. // last and check for the following:
  1466. // 1. Any def of base.
  1467. // 2. Any gaps.
  1468. while (Ops.size() > 1) {
  1469. unsigned FirstLoc = ~0U;
  1470. unsigned LastLoc = 0;
  1471. MachineInstr *FirstOp = 0;
  1472. MachineInstr *LastOp = 0;
  1473. int LastOffset = 0;
  1474. unsigned LastOpcode = 0;
  1475. unsigned LastBytes = 0;
  1476. unsigned NumMove = 0;
  1477. for (int i = Ops.size() - 1; i >= 0; --i) {
  1478. MachineInstr *Op = Ops[i];
  1479. unsigned Loc = MI2LocMap[Op];
  1480. if (Loc <= FirstLoc) {
  1481. FirstLoc = Loc;
  1482. FirstOp = Op;
  1483. }
  1484. if (Loc >= LastLoc) {
  1485. LastLoc = Loc;
  1486. LastOp = Op;
  1487. }
  1488. unsigned Opcode = Op->getOpcode();
  1489. if (LastOpcode && Opcode != LastOpcode)
  1490. break;
  1491. int Offset = getMemoryOpOffset(Op);
  1492. unsigned Bytes = getLSMultipleTransferSize(Op);
  1493. if (LastBytes) {
  1494. if (Bytes != LastBytes || Offset != (LastOffset + (int)Bytes))
  1495. break;
  1496. }
  1497. LastOffset = Offset;
  1498. LastBytes = Bytes;
  1499. LastOpcode = Opcode;
  1500. if (++NumMove == 8) // FIXME: Tune this limit.
  1501. break;
  1502. }
  1503. if (NumMove <= 1)
  1504. Ops.pop_back();
  1505. else {
  1506. SmallPtrSet<MachineInstr*, 4> MemOps;
  1507. SmallSet<unsigned, 4> MemRegs;
  1508. for (int i = NumMove-1; i >= 0; --i) {
  1509. MemOps.insert(Ops[i]);
  1510. MemRegs.insert(Ops[i]->getOperand(0).getReg());
  1511. }
  1512. // Be conservative, if the instructions are too far apart, don't
  1513. // move them. We want to limit the increase of register pressure.
  1514. bool DoMove = (LastLoc - FirstLoc) <= NumMove*4; // FIXME: Tune this.
  1515. if (DoMove)
  1516. DoMove = IsSafeAndProfitableToMove(isLd, Base, FirstOp, LastOp,
  1517. MemOps, MemRegs, TRI);
  1518. if (!DoMove) {
  1519. for (unsigned i = 0; i != NumMove; ++i)
  1520. Ops.pop_back();
  1521. } else {
  1522. // This is the new location for the loads / stores.
  1523. MachineBasicBlock::iterator InsertPos = isLd ? FirstOp : LastOp;
  1524. while (InsertPos != MBB->end()
  1525. && (MemOps.count(InsertPos) || InsertPos->isDebugValue()))
  1526. ++InsertPos;
  1527. // If we are moving a pair of loads / stores, see if it makes sense
  1528. // to try to allocate a pair of registers that can form register pairs.
  1529. MachineInstr *Op0 = Ops.back();
  1530. MachineInstr *Op1 = Ops[Ops.size()-2];
  1531. unsigned EvenReg = 0, OddReg = 0;
  1532. unsigned BaseReg = 0, PredReg = 0;
  1533. ARMCC::CondCodes Pred = ARMCC::AL;
  1534. bool isT2 = false;
  1535. unsigned NewOpc = 0;
  1536. int Offset = 0;
  1537. DebugLoc dl;
  1538. if (NumMove == 2 && CanFormLdStDWord(Op0, Op1, dl, NewOpc,
  1539. EvenReg, OddReg, BaseReg,
  1540. Offset, PredReg, Pred, isT2)) {
  1541. Ops.pop_back();
  1542. Ops.pop_back();
  1543. const MCInstrDesc &MCID = TII->get(NewOpc);
  1544. const TargetRegisterClass *TRC = TII->getRegClass(MCID, 0, TRI);
  1545. MRI->constrainRegClass(EvenReg, TRC);
  1546. MRI->constrainRegClass(OddReg, TRC);
  1547. // Form the pair instruction.
  1548. if (isLd) {
  1549. MachineInstrBuilder MIB = BuildMI(*MBB, InsertPos, dl, MCID)
  1550. .addReg(EvenReg, RegState::Define)
  1551. .addReg(OddReg, RegState::Define)
  1552. .addReg(BaseReg);
  1553. // FIXME: We're converting from LDRi12 to an insn that still
  1554. // uses addrmode2, so we need an explicit offset reg. It should
  1555. // always by reg0 since we're transforming LDRi12s.
  1556. if (!isT2)
  1557. MIB.addReg(0);
  1558. MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
  1559. ++NumLDRDFormed;
  1560. } else {
  1561. MachineInstrBuilder MIB = BuildMI(*MBB, InsertPos, dl, MCID)
  1562. .addReg(EvenReg)
  1563. .addReg(OddReg)
  1564. .addReg(BaseReg);
  1565. // FIXME: We're converting from LDRi12 to an insn that still
  1566. // uses addrmode2, so we need an explicit offset reg. It should
  1567. // always by reg0 since we're transforming STRi12s.
  1568. if (!isT2)
  1569. MIB.addReg(0);
  1570. MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
  1571. ++NumSTRDFormed;
  1572. }
  1573. MBB->erase(Op0);
  1574. MBB->erase(Op1);
  1575. // Add register allocation hints to form register pairs.
  1576. MRI->setRegAllocationHint(EvenReg, ARMRI::RegPairEven, OddReg);
  1577. MRI->setRegAllocationHint(OddReg, ARMRI::RegPairOdd, EvenReg);
  1578. } else {
  1579. for (unsigned i = 0; i != NumMove; ++i) {
  1580. MachineInstr *Op = Ops.back();
  1581. Ops.pop_back();
  1582. MBB->splice(InsertPos, MBB, Op);
  1583. }
  1584. }
  1585. NumLdStMoved += NumMove;
  1586. RetVal = true;
  1587. }
  1588. }
  1589. }
  1590. return RetVal;
  1591. }
  1592. bool
  1593. ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) {
  1594. bool RetVal = false;
  1595. DenseMap<MachineInstr*, unsigned> MI2LocMap;
  1596. DenseMap<unsigned, SmallVector<MachineInstr*, 4> > Base2LdsMap;
  1597. DenseMap<unsigned, SmallVector<MachineInstr*, 4> > Base2StsMap;
  1598. SmallVector<unsigned, 4> LdBases;
  1599. SmallVector<unsigned, 4> StBases;
  1600. unsigned Loc = 0;
  1601. MachineBasicBlock::iterator MBBI = MBB->begin();
  1602. MachineBasicBlock::iterator E = MBB->end();
  1603. while (MBBI != E) {
  1604. for (; MBBI != E; ++MBBI) {
  1605. MachineInstr *MI = MBBI;
  1606. const MCInstrDesc &MCID = MI->getDesc();
  1607. if (MCID.isCall() || MCID.isTerminator()) {
  1608. // Stop at barriers.
  1609. ++MBBI;
  1610. break;
  1611. }
  1612. if (!MI->isDebugValue())
  1613. MI2LocMap[MI] = ++Loc;
  1614. if (!isMemoryOp(MI))
  1615. continue;
  1616. unsigned PredReg = 0;
  1617. if (llvm::getInstrPredicate(MI, PredReg) != ARMCC::AL)
  1618. continue;
  1619. int Opc = MI->getOpcode();
  1620. bool isLd = isi32Load(Opc) || Opc == ARM::VLDRS || Opc == ARM::VLDRD;
  1621. unsigned Base = MI->getOperand(1).getReg();
  1622. int Offset = getMemoryOpOffset(MI);
  1623. bool StopHere = false;
  1624. if (isLd) {
  1625. DenseMap<unsigned, SmallVector<MachineInstr*, 4> >::iterator BI =
  1626. Base2LdsMap.find(Base);
  1627. if (BI != Base2LdsMap.end()) {
  1628. for (unsigned i = 0, e = BI->second.size(); i != e; ++i) {
  1629. if (Offset == getMemoryOpOffset(BI->second[i])) {
  1630. StopHere = true;
  1631. break;
  1632. }
  1633. }
  1634. if (!StopHere)
  1635. BI->second.push_back(MI);
  1636. } else {
  1637. SmallVector<MachineInstr*, 4> MIs;
  1638. MIs.push_back(MI);
  1639. Base2LdsMap[Base] = MIs;
  1640. LdBases.push_back(Base);
  1641. }
  1642. } else {
  1643. DenseMap<unsigned, SmallVector<MachineInstr*, 4> >::iterator BI =
  1644. Base2StsMap.find(Base);
  1645. if (BI != Base2StsMap.end()) {
  1646. for (unsigned i = 0, e = BI->second.size(); i != e; ++i) {
  1647. if (Offset == getMemoryOpOffset(BI->second[i])) {
  1648. StopHere = true;
  1649. break;
  1650. }
  1651. }
  1652. if (!StopHere)
  1653. BI->second.push_back(MI);
  1654. } else {
  1655. SmallVector<MachineInstr*, 4> MIs;
  1656. MIs.push_back(MI);
  1657. Base2StsMap[Base] = MIs;
  1658. StBases.push_back(Base);
  1659. }
  1660. }
  1661. if (StopHere) {
  1662. // Found a duplicate (a base+offset combination that's seen earlier).
  1663. // Backtrack.
  1664. --Loc;
  1665. break;
  1666. }
  1667. }
  1668. // Re-schedule loads.
  1669. for (unsigned i = 0, e = LdBases.size(); i != e; ++i) {
  1670. unsigned Base = LdBases[i];
  1671. SmallVector<MachineInstr*, 4> &Lds = Base2LdsMap[Base];
  1672. if (Lds.size() > 1)
  1673. RetVal |= RescheduleOps(MBB, Lds, Base, true, MI2LocMap);
  1674. }
  1675. // Re-schedule stores.
  1676. for (unsigned i = 0, e = StBases.size(); i != e; ++i) {
  1677. unsigned Base = StBases[i];
  1678. SmallVector<MachineInstr*, 4> &Sts = Base2StsMap[Base];
  1679. if (Sts.size() > 1)
  1680. RetVal |= RescheduleOps(MBB, Sts, Base, false, MI2LocMap);
  1681. }
  1682. if (MBBI != E) {
  1683. Base2LdsMap.clear();
  1684. Base2StsMap.clear();
  1685. LdBases.clear();
  1686. StBases.clear();
  1687. }
  1688. }
  1689. return RetVal;
  1690. }
  1691. /// createARMLoadStoreOptimizationPass - returns an instance of the load / store
  1692. /// optimization pass.
  1693. FunctionPass *llvm::createARMLoadStoreOptimizationPass(bool PreAlloc) {
  1694. if (PreAlloc)
  1695. return new ARMPreAllocLoadStoreOpt();
  1696. return new ARMLoadStoreOpt();
  1697. }