SILoadStoreOptimizer.cpp 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551
  1. //===-- SILoadStoreOptimizer.cpp ------------------------------------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This pass tries to fuse DS instructions with close by immediate offsets.
  11. // This will fuse operations such as
  12. // ds_read_b32 v0, v2 offset:16
  13. // ds_read_b32 v1, v2 offset:32
  14. // ==>
  15. // ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
  16. //
  17. //
  18. // Future improvements:
  19. //
  20. // - This currently relies on the scheduler to place loads and stores next to
  21. // each other, and then only merges adjacent pairs of instructions. It would
  22. // be good to be more flexible with interleaved instructions, and possibly run
  23. // before scheduling. It currently missing stores of constants because loading
  24. // the constant into the data register is placed between the stores, although
  25. // this is arguably a scheduling problem.
  26. //
  27. // - Live interval recomputing seems inefficient. This currently only matches
  28. // one pair, and recomputes live intervals and moves on to the next pair. It
  29. // would be better to compute a list of all merges that need to occur.
  30. //
  31. // - With a list of instructions to process, we can also merge more. If a
  32. // cluster of loads have offsets that are too large to fit in the 8-bit
  33. // offsets, but are close enough to fit in the 8 bits, we can add to the base
  34. // pointer and use the new reduced offsets.
  35. //
  36. //===----------------------------------------------------------------------===//
  37. #include "AMDGPU.h"
  38. #include "AMDGPUSubtarget.h"
  39. #include "SIInstrInfo.h"
  40. #include "SIRegisterInfo.h"
  41. #include "Utils/AMDGPUBaseInfo.h"
  42. #include "llvm/ADT/ArrayRef.h"
  43. #include "llvm/ADT/SmallVector.h"
  44. #include "llvm/ADT/StringRef.h"
  45. #include "llvm/Analysis/AliasAnalysis.h"
  46. #include "llvm/CodeGen/MachineBasicBlock.h"
  47. #include "llvm/CodeGen/MachineFunction.h"
  48. #include "llvm/CodeGen/MachineFunctionPass.h"
  49. #include "llvm/CodeGen/MachineInstr.h"
  50. #include "llvm/CodeGen/MachineInstrBuilder.h"
  51. #include "llvm/CodeGen/MachineOperand.h"
  52. #include "llvm/CodeGen/MachineRegisterInfo.h"
  53. #include "llvm/IR/DebugLoc.h"
  54. #include "llvm/Pass.h"
  55. #include "llvm/Support/Debug.h"
  56. #include "llvm/Support/MathExtras.h"
  57. #include "llvm/Support/raw_ostream.h"
  58. #include "llvm/Target/TargetMachine.h"
  59. #include <cassert>
  60. #include <iterator>
  61. #include <utility>
  62. using namespace llvm;
  63. #define DEBUG_TYPE "si-load-store-opt"
  64. namespace {
  65. class SILoadStoreOptimizer : public MachineFunctionPass {
  66. typedef struct {
  67. MachineBasicBlock::iterator I;
  68. MachineBasicBlock::iterator Paired;
  69. unsigned EltSize;
  70. unsigned Offset0;
  71. unsigned Offset1;
  72. unsigned BaseOff;
  73. bool UseST64;
  74. SmallVector<MachineInstr*, 8> InstsToMove;
  75. } CombineInfo;
  76. private:
  77. const SIInstrInfo *TII = nullptr;
  78. const SIRegisterInfo *TRI = nullptr;
  79. MachineRegisterInfo *MRI = nullptr;
  80. AliasAnalysis *AA = nullptr;
  81. static bool offsetsCanBeCombined(CombineInfo &CI);
  82. bool findMatchingDSInst(CombineInfo &CI);
  83. MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI);
  84. MachineBasicBlock::iterator mergeWrite2Pair(CombineInfo &CI);
  85. public:
  86. static char ID;
  87. SILoadStoreOptimizer() : MachineFunctionPass(ID) {
  88. initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
  89. }
  90. bool optimizeBlock(MachineBasicBlock &MBB);
  91. bool runOnMachineFunction(MachineFunction &MF) override;
  92. StringRef getPassName() const override { return "SI Load / Store Optimizer"; }
  93. void getAnalysisUsage(AnalysisUsage &AU) const override {
  94. AU.setPreservesCFG();
  95. AU.addRequired<AAResultsWrapperPass>();
  96. MachineFunctionPass::getAnalysisUsage(AU);
  97. }
  98. };
  99. } // end anonymous namespace.
  100. INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
  101. "SI Load / Store Optimizer", false, false)
  102. INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
  103. INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE,
  104. "SI Load / Store Optimizer", false, false)
  105. char SILoadStoreOptimizer::ID = 0;
  106. char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
  107. FunctionPass *llvm::createSILoadStoreOptimizerPass() {
  108. return new SILoadStoreOptimizer();
  109. }
  110. static void moveInstsAfter(MachineBasicBlock::iterator I,
  111. ArrayRef<MachineInstr*> InstsToMove) {
  112. MachineBasicBlock *MBB = I->getParent();
  113. ++I;
  114. for (MachineInstr *MI : InstsToMove) {
  115. MI->removeFromParent();
  116. MBB->insert(I, MI);
  117. }
  118. }
  119. static void addDefsToList(const MachineInstr &MI,
  120. SmallVectorImpl<const MachineOperand *> &Defs) {
  121. for (const MachineOperand &Def : MI.defs()) {
  122. Defs.push_back(&Def);
  123. }
  124. }
  125. static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A,
  126. MachineBasicBlock::iterator B,
  127. const SIInstrInfo *TII,
  128. AliasAnalysis * AA) {
  129. return (TII->areMemAccessesTriviallyDisjoint(*A, *B, AA) ||
  130. // RAW or WAR - cannot reorder
  131. // WAW - cannot reorder
  132. // RAR - safe to reorder
  133. !(A->mayStore() || B->mayStore()));
  134. }
  135. // Add MI and its defs to the lists if MI reads one of the defs that are
  136. // already in the list. Returns true in that case.
  137. static bool
  138. addToListsIfDependent(MachineInstr &MI,
  139. SmallVectorImpl<const MachineOperand *> &Defs,
  140. SmallVectorImpl<MachineInstr*> &Insts) {
  141. for (const MachineOperand *Def : Defs) {
  142. bool ReadDef = MI.readsVirtualRegister(Def->getReg());
  143. // If ReadDef is true, then there is a use of Def between I
  144. // and the instruction that I will potentially be merged with. We
  145. // will need to move this instruction after the merged instructions.
  146. if (ReadDef) {
  147. Insts.push_back(&MI);
  148. addDefsToList(MI, Defs);
  149. return true;
  150. }
  151. }
  152. return false;
  153. }
  154. static bool
  155. canMoveInstsAcrossMemOp(MachineInstr &MemOp,
  156. ArrayRef<MachineInstr*> InstsToMove,
  157. const SIInstrInfo *TII,
  158. AliasAnalysis *AA) {
  159. assert(MemOp.mayLoadOrStore());
  160. for (MachineInstr *InstToMove : InstsToMove) {
  161. if (!InstToMove->mayLoadOrStore())
  162. continue;
  163. if (!memAccessesCanBeReordered(MemOp, *InstToMove, TII, AA))
  164. return false;
  165. }
  166. return true;
  167. }
  168. bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI) {
  169. // XXX - Would the same offset be OK? Is there any reason this would happen or
  170. // be useful?
  171. if (CI.Offset0 == CI.Offset1)
  172. return false;
  173. // This won't be valid if the offset isn't aligned.
  174. if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0))
  175. return false;
  176. unsigned EltOffset0 = CI.Offset0 / CI.EltSize;
  177. unsigned EltOffset1 = CI.Offset1 / CI.EltSize;
  178. CI.UseST64 = false;
  179. CI.BaseOff = 0;
  180. // If the offset in elements doesn't fit in 8-bits, we might be able to use
  181. // the stride 64 versions.
  182. if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 &&
  183. isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) {
  184. CI.Offset0 = EltOffset0 / 64;
  185. CI.Offset1 = EltOffset1 / 64;
  186. CI.UseST64 = true;
  187. return true;
  188. }
  189. // Check if the new offsets fit in the reduced 8-bit range.
  190. if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) {
  191. CI.Offset0 = EltOffset0;
  192. CI.Offset1 = EltOffset1;
  193. return true;
  194. }
  195. // Try to shift base address to decrease offsets.
  196. unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0);
  197. CI.BaseOff = std::min(CI.Offset0, CI.Offset1);
  198. if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) {
  199. CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64;
  200. CI.Offset1 = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64;
  201. CI.UseST64 = true;
  202. return true;
  203. }
  204. if (isUInt<8>(OffsetDiff)) {
  205. CI.Offset0 = EltOffset0 - CI.BaseOff / CI.EltSize;
  206. CI.Offset1 = EltOffset1 - CI.BaseOff / CI.EltSize;
  207. return true;
  208. }
  209. return false;
  210. }
  211. bool SILoadStoreOptimizer::findMatchingDSInst(CombineInfo &CI) {
  212. MachineBasicBlock::iterator E = CI.I->getParent()->end();
  213. MachineBasicBlock::iterator MBBI = CI.I;
  214. ++MBBI;
  215. SmallVector<const MachineOperand *, 8> DefsToMove;
  216. addDefsToList(*CI.I, DefsToMove);
  217. for ( ; MBBI != E; ++MBBI) {
  218. if (MBBI->getOpcode() != CI.I->getOpcode()) {
  219. // This is not a matching DS instruction, but we can keep looking as
  220. // long as one of these conditions are met:
  221. // 1. It is safe to move I down past MBBI.
  222. // 2. It is safe to move MBBI down past the instruction that I will
  223. // be merged into.
  224. if (MBBI->hasUnmodeledSideEffects())
  225. // We can't re-order this instruction with respect to other memory
  226. // opeations, so we fail both conditions mentioned above.
  227. return false;
  228. if (MBBI->mayLoadOrStore() &&
  229. !memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA)) {
  230. // We fail condition #1, but we may still be able to satisfy condition
  231. // #2. Add this instruction to the move list and then we will check
  232. // if condition #2 holds once we have selected the matching instruction.
  233. CI.InstsToMove.push_back(&*MBBI);
  234. addDefsToList(*MBBI, DefsToMove);
  235. continue;
  236. }
  237. // When we match I with another DS instruction we will be moving I down
  238. // to the location of the matched instruction any uses of I will need to
  239. // be moved down as well.
  240. addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove);
  241. continue;
  242. }
  243. // Don't merge volatiles.
  244. if (MBBI->hasOrderedMemoryRef())
  245. return false;
  246. // Handle a case like
  247. // DS_WRITE_B32 addr, v, idx0
  248. // w = DS_READ_B32 addr, idx0
  249. // DS_WRITE_B32 addr, f(w), idx1
  250. // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
  251. // merging of the two writes.
  252. if (addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove))
  253. continue;
  254. int AddrIdx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(),
  255. AMDGPU::OpName::addr);
  256. const MachineOperand &AddrReg0 = CI.I->getOperand(AddrIdx);
  257. const MachineOperand &AddrReg1 = MBBI->getOperand(AddrIdx);
  258. // Check same base pointer. Be careful of subregisters, which can occur with
  259. // vectors of pointers.
  260. if (AddrReg0.getReg() == AddrReg1.getReg() &&
  261. AddrReg0.getSubReg() == AddrReg1.getSubReg()) {
  262. int OffsetIdx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(),
  263. AMDGPU::OpName::offset);
  264. CI.Offset0 = CI.I->getOperand(OffsetIdx).getImm() & 0xffff;
  265. CI.Offset1 = MBBI->getOperand(OffsetIdx).getImm() & 0xffff;
  266. CI.Paired = MBBI;
  267. // Check both offsets fit in the reduced range.
  268. // We also need to go through the list of instructions that we plan to
  269. // move and make sure they are all safe to move down past the merged
  270. // instruction.
  271. if (offsetsCanBeCombined(CI))
  272. if (canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
  273. return true;
  274. }
  275. // We've found a load/store that we couldn't merge for some reason.
  276. // We could potentially keep looking, but we'd need to make sure that
  277. // it was safe to move I and also all the instruction in InstsToMove
  278. // down past this instruction.
  279. // check if we can move I across MBBI and if we can move all I's users
  280. if (!memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA) ||
  281. !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
  282. break;
  283. }
  284. return false;
  285. }
  286. MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair(
  287. CombineInfo &CI) {
  288. MachineBasicBlock *MBB = CI.I->getParent();
  289. // Be careful, since the addresses could be subregisters themselves in weird
  290. // cases, like vectors of pointers.
  291. const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
  292. const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst);
  293. const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdst);
  294. unsigned NewOffset0 = CI.Offset0;
  295. unsigned NewOffset1 = CI.Offset1;
  296. unsigned Opc = (CI.EltSize == 4) ? AMDGPU::DS_READ2_B32
  297. : AMDGPU::DS_READ2_B64;
  298. if (CI.UseST64)
  299. Opc = (CI.EltSize == 4) ? AMDGPU::DS_READ2ST64_B32
  300. : AMDGPU::DS_READ2ST64_B64;
  301. unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
  302. unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
  303. if (NewOffset0 > NewOffset1) {
  304. // Canonicalize the merged instruction so the smaller offset comes first.
  305. std::swap(NewOffset0, NewOffset1);
  306. std::swap(SubRegIdx0, SubRegIdx1);
  307. }
  308. assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
  309. (NewOffset0 != NewOffset1) &&
  310. "Computed offset doesn't fit");
  311. const MCInstrDesc &Read2Desc = TII->get(Opc);
  312. const TargetRegisterClass *SuperRC
  313. = (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
  314. unsigned DestReg = MRI->createVirtualRegister(SuperRC);
  315. DebugLoc DL = CI.I->getDebugLoc();
  316. unsigned BaseReg = AddrReg->getReg();
  317. unsigned BaseRegFlags = 0;
  318. if (CI.BaseOff) {
  319. BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
  320. BaseRegFlags = RegState::Kill;
  321. BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::V_ADD_I32_e32), BaseReg)
  322. .addImm(CI.BaseOff)
  323. .addReg(AddrReg->getReg());
  324. }
  325. MachineInstrBuilder Read2 =
  326. BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
  327. .addReg(BaseReg, BaseRegFlags) // addr
  328. .addImm(NewOffset0) // offset0
  329. .addImm(NewOffset1) // offset1
  330. .addImm(0) // gds
  331. .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
  332. (void)Read2;
  333. const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
  334. // Copy to the old destination registers.
  335. BuildMI(*MBB, CI.Paired, DL, CopyDesc)
  336. .add(*Dest0) // Copy to same destination including flags and sub reg.
  337. .addReg(DestReg, 0, SubRegIdx0);
  338. MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
  339. .add(*Dest1)
  340. .addReg(DestReg, RegState::Kill, SubRegIdx1);
  341. moveInstsAfter(Copy1, CI.InstsToMove);
  342. MachineBasicBlock::iterator Next = std::next(CI.I);
  343. CI.I->eraseFromParent();
  344. CI.Paired->eraseFromParent();
  345. DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
  346. return Next;
  347. }
  348. MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
  349. CombineInfo &CI) {
  350. MachineBasicBlock *MBB = CI.I->getParent();
  351. // Be sure to use .addOperand(), and not .addReg() with these. We want to be
  352. // sure we preserve the subregister index and any register flags set on them.
  353. const MachineOperand *Addr = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
  354. const MachineOperand *Data0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0);
  355. const MachineOperand *Data1
  356. = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::data0);
  357. unsigned NewOffset0 = CI.Offset0;
  358. unsigned NewOffset1 = CI.Offset1;
  359. unsigned Opc = (CI.EltSize == 4) ? AMDGPU::DS_WRITE2_B32
  360. : AMDGPU::DS_WRITE2_B64;
  361. if (CI.UseST64)
  362. Opc = (CI.EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32
  363. : AMDGPU::DS_WRITE2ST64_B64;
  364. if (NewOffset0 > NewOffset1) {
  365. // Canonicalize the merged instruction so the smaller offset comes first.
  366. std::swap(NewOffset0, NewOffset1);
  367. std::swap(Data0, Data1);
  368. }
  369. assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
  370. (NewOffset0 != NewOffset1) &&
  371. "Computed offset doesn't fit");
  372. const MCInstrDesc &Write2Desc = TII->get(Opc);
  373. DebugLoc DL = CI.I->getDebugLoc();
  374. unsigned BaseReg = Addr->getReg();
  375. unsigned BaseRegFlags = 0;
  376. if (CI.BaseOff) {
  377. BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
  378. BaseRegFlags = RegState::Kill;
  379. BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::V_ADD_I32_e32), BaseReg)
  380. .addImm(CI.BaseOff)
  381. .addReg(Addr->getReg());
  382. }
  383. MachineInstrBuilder Write2 =
  384. BuildMI(*MBB, CI.Paired, DL, Write2Desc)
  385. .addReg(BaseReg, BaseRegFlags) // addr
  386. .add(*Data0) // data0
  387. .add(*Data1) // data1
  388. .addImm(NewOffset0) // offset0
  389. .addImm(NewOffset1) // offset1
  390. .addImm(0) // gds
  391. .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
  392. moveInstsAfter(Write2, CI.InstsToMove);
  393. MachineBasicBlock::iterator Next = std::next(CI.I);
  394. CI.I->eraseFromParent();
  395. CI.Paired->eraseFromParent();
  396. DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
  397. return Next;
  398. }
  399. // Scan through looking for adjacent LDS operations with constant offsets from
  400. // the same base register. We rely on the scheduler to do the hard work of
  401. // clustering nearby loads, and assume these are all adjacent.
  402. bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
  403. bool Modified = false;
  404. for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
  405. MachineInstr &MI = *I;
  406. // Don't combine if volatile.
  407. if (MI.hasOrderedMemoryRef()) {
  408. ++I;
  409. continue;
  410. }
  411. CombineInfo CI;
  412. CI.I = I;
  413. unsigned Opc = MI.getOpcode();
  414. if (Opc == AMDGPU::DS_READ_B32 || Opc == AMDGPU::DS_READ_B64) {
  415. CI.EltSize = (Opc == AMDGPU::DS_READ_B64) ? 8 : 4;
  416. if (findMatchingDSInst(CI)) {
  417. Modified = true;
  418. I = mergeRead2Pair(CI);
  419. } else {
  420. ++I;
  421. }
  422. continue;
  423. } else if (Opc == AMDGPU::DS_WRITE_B32 || Opc == AMDGPU::DS_WRITE_B64) {
  424. CI.EltSize = (Opc == AMDGPU::DS_WRITE_B64) ? 8 : 4;
  425. if (findMatchingDSInst(CI)) {
  426. Modified = true;
  427. I = mergeWrite2Pair(CI);
  428. } else {
  429. ++I;
  430. }
  431. continue;
  432. }
  433. ++I;
  434. }
  435. return Modified;
  436. }
  437. bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
  438. if (skipFunction(*MF.getFunction()))
  439. return false;
  440. const SISubtarget &STM = MF.getSubtarget<SISubtarget>();
  441. if (!STM.loadStoreOptEnabled())
  442. return false;
  443. TII = STM.getInstrInfo();
  444. TRI = &TII->getRegisterInfo();
  445. MRI = &MF.getRegInfo();
  446. AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
  447. DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
  448. bool Modified = false;
  449. for (MachineBasicBlock &MBB : MF)
  450. Modified |= optimizeBlock(MBB);
  451. return Modified;
  452. }