SILoadStoreOptimizer.cpp 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535
  1. //===- SILoadStoreOptimizer.cpp -------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This pass tries to fuse DS instructions with close by immediate offsets.
  10. // This will fuse operations such as
  11. // ds_read_b32 v0, v2 offset:16
  12. // ds_read_b32 v1, v2 offset:32
  13. // ==>
  14. // ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
  15. //
  16. // The same is done for certain SMEM and VMEM opcodes, e.g.:
  17. // s_buffer_load_dword s4, s[0:3], 4
  18. // s_buffer_load_dword s5, s[0:3], 8
  19. // ==>
  20. // s_buffer_load_dwordx2 s[4:5], s[0:3], 4
  21. //
  22. // This pass also tries to promote constant offset to the immediate by
  23. // adjusting the base. It tries to use a base from the nearby instructions that
  24. // allows it to have a 13bit constant offset and then promotes the 13bit offset
  25. // to the immediate.
  26. // E.g.
  27. // s_movk_i32 s0, 0x1800
  28. // v_add_co_u32_e32 v0, vcc, s0, v2
  29. // v_addc_co_u32_e32 v1, vcc, 0, v6, vcc
  30. //
  31. // s_movk_i32 s0, 0x1000
  32. // v_add_co_u32_e32 v5, vcc, s0, v2
  33. // v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
  34. // global_load_dwordx2 v[5:6], v[5:6], off
  35. // global_load_dwordx2 v[0:1], v[0:1], off
  36. // =>
  37. // s_movk_i32 s0, 0x1000
  38. // v_add_co_u32_e32 v5, vcc, s0, v2
  39. // v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
  40. // global_load_dwordx2 v[5:6], v[5:6], off
  41. // global_load_dwordx2 v[0:1], v[5:6], off offset:2048
  42. //
  43. // Future improvements:
  44. //
  45. // - This currently relies on the scheduler to place loads and stores next to
  46. // each other, and then only merges adjacent pairs of instructions. It would
  47. // be good to be more flexible with interleaved instructions, and possibly run
  48. // before scheduling. It currently missing stores of constants because loading
  49. // the constant into the data register is placed between the stores, although
  50. // this is arguably a scheduling problem.
  51. //
  52. // - Live interval recomputing seems inefficient. This currently only matches
  53. // one pair, and recomputes live intervals and moves on to the next pair. It
  54. // would be better to compute a list of all merges that need to occur.
  55. //
  56. // - With a list of instructions to process, we can also merge more. If a
  57. // cluster of loads have offsets that are too large to fit in the 8-bit
  58. // offsets, but are close enough to fit in the 8 bits, we can add to the base
  59. // pointer and use the new reduced offsets.
  60. //
  61. //===----------------------------------------------------------------------===//
  62. #include "AMDGPU.h"
  63. #include "AMDGPUSubtarget.h"
  64. #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
  65. #include "SIInstrInfo.h"
  66. #include "SIRegisterInfo.h"
  67. #include "Utils/AMDGPUBaseInfo.h"
  68. #include "llvm/ADT/ArrayRef.h"
  69. #include "llvm/ADT/SmallVector.h"
  70. #include "llvm/ADT/StringRef.h"
  71. #include "llvm/Analysis/AliasAnalysis.h"
  72. #include "llvm/CodeGen/MachineBasicBlock.h"
  73. #include "llvm/CodeGen/MachineFunction.h"
  74. #include "llvm/CodeGen/MachineFunctionPass.h"
  75. #include "llvm/CodeGen/MachineInstr.h"
  76. #include "llvm/CodeGen/MachineInstrBuilder.h"
  77. #include "llvm/CodeGen/MachineOperand.h"
  78. #include "llvm/CodeGen/MachineRegisterInfo.h"
  79. #include "llvm/IR/DebugLoc.h"
  80. #include "llvm/Pass.h"
  81. #include "llvm/Support/Debug.h"
  82. #include "llvm/Support/MathExtras.h"
  83. #include "llvm/Support/raw_ostream.h"
  84. #include <algorithm>
  85. #include <cassert>
  86. #include <cstdlib>
  87. #include <iterator>
  88. #include <utility>
  89. using namespace llvm;
  90. #define DEBUG_TYPE "si-load-store-opt"
  91. namespace {
  92. enum InstClassEnum {
  93. UNKNOWN,
  94. DS_READ,
  95. DS_WRITE,
  96. S_BUFFER_LOAD_IMM,
  97. BUFFER_LOAD_OFFEN = AMDGPU::BUFFER_LOAD_DWORD_OFFEN,
  98. BUFFER_LOAD_OFFSET = AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
  99. BUFFER_STORE_OFFEN = AMDGPU::BUFFER_STORE_DWORD_OFFEN,
  100. BUFFER_STORE_OFFSET = AMDGPU::BUFFER_STORE_DWORD_OFFSET,
  101. BUFFER_LOAD_OFFEN_exact = AMDGPU::BUFFER_LOAD_DWORD_OFFEN_exact,
  102. BUFFER_LOAD_OFFSET_exact = AMDGPU::BUFFER_LOAD_DWORD_OFFSET_exact,
  103. BUFFER_STORE_OFFEN_exact = AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact,
  104. BUFFER_STORE_OFFSET_exact = AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact,
  105. };
  106. enum RegisterEnum {
  107. SBASE = 0x1,
  108. SRSRC = 0x2,
  109. SOFFSET = 0x4,
  110. VADDR = 0x8,
  111. ADDR = 0x10,
  112. };
  113. class SILoadStoreOptimizer : public MachineFunctionPass {
  114. struct CombineInfo {
  115. MachineBasicBlock::iterator I;
  116. MachineBasicBlock::iterator Paired;
  117. unsigned EltSize;
  118. unsigned Offset0;
  119. unsigned Offset1;
  120. unsigned Width0;
  121. unsigned Width1;
  122. unsigned BaseOff;
  123. InstClassEnum InstClass;
  124. bool GLC0;
  125. bool GLC1;
  126. bool SLC0;
  127. bool SLC1;
  128. bool UseST64;
  129. SmallVector<MachineInstr *, 8> InstsToMove;
  130. };
  131. struct BaseRegisters {
  132. unsigned LoReg = 0;
  133. unsigned HiReg = 0;
  134. unsigned LoSubReg = 0;
  135. unsigned HiSubReg = 0;
  136. };
  137. struct MemAddress {
  138. BaseRegisters Base;
  139. int64_t Offset = 0;
  140. };
  141. using MemInfoMap = DenseMap<MachineInstr *, MemAddress>;
  142. private:
  143. const GCNSubtarget *STM = nullptr;
  144. const SIInstrInfo *TII = nullptr;
  145. const SIRegisterInfo *TRI = nullptr;
  146. MachineRegisterInfo *MRI = nullptr;
  147. AliasAnalysis *AA = nullptr;
  148. bool OptimizeAgain;
  149. static bool offsetsCanBeCombined(CombineInfo &CI);
  150. static bool widthsFit(const GCNSubtarget &STM, const CombineInfo &CI);
  151. static unsigned getNewOpcode(const CombineInfo &CI);
  152. static std::pair<unsigned, unsigned> getSubRegIdxs(const CombineInfo &CI);
  153. const TargetRegisterClass *getTargetRegisterClass(const CombineInfo &CI);
  154. unsigned getOpcodeWidth(const MachineInstr &MI);
  155. InstClassEnum getInstClass(unsigned Opc);
  156. unsigned getRegs(unsigned Opc);
  157. bool findMatchingInst(CombineInfo &CI);
  158. unsigned read2Opcode(unsigned EltSize) const;
  159. unsigned read2ST64Opcode(unsigned EltSize) const;
  160. MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI);
  161. unsigned write2Opcode(unsigned EltSize) const;
  162. unsigned write2ST64Opcode(unsigned EltSize) const;
  163. MachineBasicBlock::iterator mergeWrite2Pair(CombineInfo &CI);
  164. MachineBasicBlock::iterator mergeSBufferLoadImmPair(CombineInfo &CI);
  165. MachineBasicBlock::iterator mergeBufferLoadPair(CombineInfo &CI);
  166. MachineBasicBlock::iterator mergeBufferStorePair(CombineInfo &CI);
  167. void updateBaseAndOffset(MachineInstr &I, unsigned NewBase,
  168. int32_t NewOffset);
  169. unsigned computeBase(MachineInstr &MI, const MemAddress &Addr);
  170. MachineOperand createRegOrImm(int32_t Val, MachineInstr &MI);
  171. Optional<int32_t> extractConstOffset(const MachineOperand &Op);
  172. void processBaseWithConstOffset(const MachineOperand &Base, MemAddress &Addr);
  173. /// Promotes constant offset to the immediate by adjusting the base. It
  174. /// tries to use a base from the nearby instructions that allows it to have
  175. /// a 13bit constant offset which gets promoted to the immediate.
  176. bool promoteConstantOffsetToImm(MachineInstr &CI,
  177. MemInfoMap &Visited,
  178. SmallPtrSet<MachineInstr *, 4> &Promoted);
  179. public:
  180. static char ID;
  181. SILoadStoreOptimizer() : MachineFunctionPass(ID) {
  182. initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
  183. }
  184. bool optimizeBlock(MachineBasicBlock &MBB);
  185. bool runOnMachineFunction(MachineFunction &MF) override;
  186. StringRef getPassName() const override { return "SI Load Store Optimizer"; }
  187. void getAnalysisUsage(AnalysisUsage &AU) const override {
  188. AU.setPreservesCFG();
  189. AU.addRequired<AAResultsWrapperPass>();
  190. MachineFunctionPass::getAnalysisUsage(AU);
  191. }
  192. };
  193. } // end anonymous namespace.
  194. INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
  195. "SI Load Store Optimizer", false, false)
  196. INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
  197. INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE, "SI Load Store Optimizer",
  198. false, false)
  199. char SILoadStoreOptimizer::ID = 0;
  200. char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
  201. FunctionPass *llvm::createSILoadStoreOptimizerPass() {
  202. return new SILoadStoreOptimizer();
  203. }
  204. static void moveInstsAfter(MachineBasicBlock::iterator I,
  205. ArrayRef<MachineInstr *> InstsToMove) {
  206. MachineBasicBlock *MBB = I->getParent();
  207. ++I;
  208. for (MachineInstr *MI : InstsToMove) {
  209. MI->removeFromParent();
  210. MBB->insert(I, MI);
  211. }
  212. }
  213. static void addDefsUsesToList(const MachineInstr &MI,
  214. DenseSet<unsigned> &RegDefs,
  215. DenseSet<unsigned> &PhysRegUses) {
  216. for (const MachineOperand &Op : MI.operands()) {
  217. if (Op.isReg()) {
  218. if (Op.isDef())
  219. RegDefs.insert(Op.getReg());
  220. else if (Op.readsReg() &&
  221. TargetRegisterInfo::isPhysicalRegister(Op.getReg()))
  222. PhysRegUses.insert(Op.getReg());
  223. }
  224. }
  225. }
  226. static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A,
  227. MachineBasicBlock::iterator B,
  228. AliasAnalysis *AA) {
  229. // RAW or WAR - cannot reorder
  230. // WAW - cannot reorder
  231. // RAR - safe to reorder
  232. return !(A->mayStore() || B->mayStore()) || !A->mayAlias(AA, *B, true);
  233. }
  234. // Add MI and its defs to the lists if MI reads one of the defs that are
  235. // already in the list. Returns true in that case.
  236. static bool addToListsIfDependent(MachineInstr &MI, DenseSet<unsigned> &RegDefs,
  237. DenseSet<unsigned> &PhysRegUses,
  238. SmallVectorImpl<MachineInstr *> &Insts) {
  239. for (MachineOperand &Use : MI.operands()) {
  240. // If one of the defs is read, then there is a use of Def between I and the
  241. // instruction that I will potentially be merged with. We will need to move
  242. // this instruction after the merged instructions.
  243. //
  244. // Similarly, if there is a def which is read by an instruction that is to
  245. // be moved for merging, then we need to move the def-instruction as well.
  246. // This can only happen for physical registers such as M0; virtual
  247. // registers are in SSA form.
  248. if (Use.isReg() &&
  249. ((Use.readsReg() && RegDefs.count(Use.getReg())) ||
  250. (Use.isDef() && TargetRegisterInfo::isPhysicalRegister(Use.getReg()) &&
  251. PhysRegUses.count(Use.getReg())))) {
  252. Insts.push_back(&MI);
  253. addDefsUsesToList(MI, RegDefs, PhysRegUses);
  254. return true;
  255. }
  256. }
  257. return false;
  258. }
  259. static bool canMoveInstsAcrossMemOp(MachineInstr &MemOp,
  260. ArrayRef<MachineInstr *> InstsToMove,
  261. AliasAnalysis *AA) {
  262. assert(MemOp.mayLoadOrStore());
  263. for (MachineInstr *InstToMove : InstsToMove) {
  264. if (!InstToMove->mayLoadOrStore())
  265. continue;
  266. if (!memAccessesCanBeReordered(MemOp, *InstToMove, AA))
  267. return false;
  268. }
  269. return true;
  270. }
  271. bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI) {
  272. // XXX - Would the same offset be OK? Is there any reason this would happen or
  273. // be useful?
  274. if (CI.Offset0 == CI.Offset1)
  275. return false;
  276. // This won't be valid if the offset isn't aligned.
  277. if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0))
  278. return false;
  279. unsigned EltOffset0 = CI.Offset0 / CI.EltSize;
  280. unsigned EltOffset1 = CI.Offset1 / CI.EltSize;
  281. CI.UseST64 = false;
  282. CI.BaseOff = 0;
  283. // Handle SMEM and VMEM instructions.
  284. if ((CI.InstClass != DS_READ) && (CI.InstClass != DS_WRITE)) {
  285. return (EltOffset0 + CI.Width0 == EltOffset1 ||
  286. EltOffset1 + CI.Width1 == EltOffset0) &&
  287. CI.GLC0 == CI.GLC1 &&
  288. (CI.InstClass == S_BUFFER_LOAD_IMM || CI.SLC0 == CI.SLC1);
  289. }
  290. // If the offset in elements doesn't fit in 8-bits, we might be able to use
  291. // the stride 64 versions.
  292. if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 &&
  293. isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) {
  294. CI.Offset0 = EltOffset0 / 64;
  295. CI.Offset1 = EltOffset1 / 64;
  296. CI.UseST64 = true;
  297. return true;
  298. }
  299. // Check if the new offsets fit in the reduced 8-bit range.
  300. if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) {
  301. CI.Offset0 = EltOffset0;
  302. CI.Offset1 = EltOffset1;
  303. return true;
  304. }
  305. // Try to shift base address to decrease offsets.
  306. unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0);
  307. CI.BaseOff = std::min(CI.Offset0, CI.Offset1);
  308. if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) {
  309. CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64;
  310. CI.Offset1 = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64;
  311. CI.UseST64 = true;
  312. return true;
  313. }
  314. if (isUInt<8>(OffsetDiff)) {
  315. CI.Offset0 = EltOffset0 - CI.BaseOff / CI.EltSize;
  316. CI.Offset1 = EltOffset1 - CI.BaseOff / CI.EltSize;
  317. return true;
  318. }
  319. return false;
  320. }
  321. bool SILoadStoreOptimizer::widthsFit(const GCNSubtarget &STM,
  322. const CombineInfo &CI) {
  323. const unsigned Width = (CI.Width0 + CI.Width1);
  324. switch (CI.InstClass) {
  325. default:
  326. return (Width <= 4) && (STM.hasDwordx3LoadStores() || (Width != 3));
  327. case S_BUFFER_LOAD_IMM:
  328. switch (Width) {
  329. default:
  330. return false;
  331. case 2:
  332. case 4:
  333. return true;
  334. }
  335. }
  336. }
  337. unsigned SILoadStoreOptimizer::getOpcodeWidth(const MachineInstr &MI) {
  338. const unsigned Opc = MI.getOpcode();
  339. if (TII->isMUBUF(MI)) {
  340. return AMDGPU::getMUBUFDwords(Opc);
  341. }
  342. switch (Opc) {
  343. default:
  344. return 0;
  345. case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
  346. return 1;
  347. case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
  348. return 2;
  349. case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
  350. return 4;
  351. }
  352. }
  353. InstClassEnum SILoadStoreOptimizer::getInstClass(unsigned Opc) {
  354. if (TII->isMUBUF(Opc)) {
  355. const int baseOpcode = AMDGPU::getMUBUFBaseOpcode(Opc);
  356. // If we couldn't identify the opcode, bail out.
  357. if (baseOpcode == -1) {
  358. return UNKNOWN;
  359. }
  360. switch (baseOpcode) {
  361. default:
  362. return UNKNOWN;
  363. case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
  364. return BUFFER_LOAD_OFFEN;
  365. case AMDGPU::BUFFER_LOAD_DWORD_OFFSET:
  366. return BUFFER_LOAD_OFFSET;
  367. case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
  368. return BUFFER_STORE_OFFEN;
  369. case AMDGPU::BUFFER_STORE_DWORD_OFFSET:
  370. return BUFFER_STORE_OFFSET;
  371. case AMDGPU::BUFFER_LOAD_DWORD_OFFEN_exact:
  372. return BUFFER_LOAD_OFFEN_exact;
  373. case AMDGPU::BUFFER_LOAD_DWORD_OFFSET_exact:
  374. return BUFFER_LOAD_OFFSET_exact;
  375. case AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact:
  376. return BUFFER_STORE_OFFEN_exact;
  377. case AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact:
  378. return BUFFER_STORE_OFFSET_exact;
  379. }
  380. }
  381. switch (Opc) {
  382. default:
  383. return UNKNOWN;
  384. case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
  385. case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
  386. case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
  387. return S_BUFFER_LOAD_IMM;
  388. case AMDGPU::DS_READ_B32:
  389. case AMDGPU::DS_READ_B64:
  390. case AMDGPU::DS_READ_B32_gfx9:
  391. case AMDGPU::DS_READ_B64_gfx9:
  392. return DS_READ;
  393. case AMDGPU::DS_WRITE_B32:
  394. case AMDGPU::DS_WRITE_B64:
  395. case AMDGPU::DS_WRITE_B32_gfx9:
  396. case AMDGPU::DS_WRITE_B64_gfx9:
  397. return DS_WRITE;
  398. }
  399. }
  400. unsigned SILoadStoreOptimizer::getRegs(unsigned Opc) {
  401. if (TII->isMUBUF(Opc)) {
  402. unsigned result = 0;
  403. if (AMDGPU::getMUBUFHasVAddr(Opc)) {
  404. result |= VADDR;
  405. }
  406. if (AMDGPU::getMUBUFHasSrsrc(Opc)) {
  407. result |= SRSRC;
  408. }
  409. if (AMDGPU::getMUBUFHasSoffset(Opc)) {
  410. result |= SOFFSET;
  411. }
  412. return result;
  413. }
  414. switch (Opc) {
  415. default:
  416. return 0;
  417. case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
  418. case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
  419. case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
  420. return SBASE;
  421. case AMDGPU::DS_READ_B32:
  422. case AMDGPU::DS_READ_B64:
  423. case AMDGPU::DS_READ_B32_gfx9:
  424. case AMDGPU::DS_READ_B64_gfx9:
  425. case AMDGPU::DS_WRITE_B32:
  426. case AMDGPU::DS_WRITE_B64:
  427. case AMDGPU::DS_WRITE_B32_gfx9:
  428. case AMDGPU::DS_WRITE_B64_gfx9:
  429. return ADDR;
  430. }
  431. }
  432. bool SILoadStoreOptimizer::findMatchingInst(CombineInfo &CI) {
  433. MachineBasicBlock *MBB = CI.I->getParent();
  434. MachineBasicBlock::iterator E = MBB->end();
  435. MachineBasicBlock::iterator MBBI = CI.I;
  436. const unsigned Opc = CI.I->getOpcode();
  437. const InstClassEnum InstClass = getInstClass(Opc);
  438. if (InstClass == UNKNOWN) {
  439. return false;
  440. }
  441. const unsigned Regs = getRegs(Opc);
  442. unsigned AddrOpName[5] = {0};
  443. int AddrIdx[5];
  444. const MachineOperand *AddrReg[5];
  445. unsigned NumAddresses = 0;
  446. if (Regs & ADDR) {
  447. AddrOpName[NumAddresses++] = AMDGPU::OpName::addr;
  448. }
  449. if (Regs & SBASE) {
  450. AddrOpName[NumAddresses++] = AMDGPU::OpName::sbase;
  451. }
  452. if (Regs & SRSRC) {
  453. AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc;
  454. }
  455. if (Regs & SOFFSET) {
  456. AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset;
  457. }
  458. if (Regs & VADDR) {
  459. AddrOpName[NumAddresses++] = AMDGPU::OpName::vaddr;
  460. }
  461. for (unsigned i = 0; i < NumAddresses; i++) {
  462. AddrIdx[i] = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AddrOpName[i]);
  463. AddrReg[i] = &CI.I->getOperand(AddrIdx[i]);
  464. // We only ever merge operations with the same base address register, so
  465. // don't bother scanning forward if there are no other uses.
  466. if (AddrReg[i]->isReg() &&
  467. (TargetRegisterInfo::isPhysicalRegister(AddrReg[i]->getReg()) ||
  468. MRI->hasOneNonDBGUse(AddrReg[i]->getReg())))
  469. return false;
  470. }
  471. ++MBBI;
  472. DenseSet<unsigned> RegDefsToMove;
  473. DenseSet<unsigned> PhysRegUsesToMove;
  474. addDefsUsesToList(*CI.I, RegDefsToMove, PhysRegUsesToMove);
  475. for (; MBBI != E; ++MBBI) {
  476. const bool IsDS = (InstClass == DS_READ) || (InstClass == DS_WRITE);
  477. if ((getInstClass(MBBI->getOpcode()) != InstClass) ||
  478. (IsDS && (MBBI->getOpcode() != Opc))) {
  479. // This is not a matching DS instruction, but we can keep looking as
  480. // long as one of these conditions are met:
  481. // 1. It is safe to move I down past MBBI.
  482. // 2. It is safe to move MBBI down past the instruction that I will
  483. // be merged into.
  484. if (MBBI->hasUnmodeledSideEffects()) {
  485. // We can't re-order this instruction with respect to other memory
  486. // operations, so we fail both conditions mentioned above.
  487. return false;
  488. }
  489. if (MBBI->mayLoadOrStore() &&
  490. (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) ||
  491. !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))) {
  492. // We fail condition #1, but we may still be able to satisfy condition
  493. // #2. Add this instruction to the move list and then we will check
  494. // if condition #2 holds once we have selected the matching instruction.
  495. CI.InstsToMove.push_back(&*MBBI);
  496. addDefsUsesToList(*MBBI, RegDefsToMove, PhysRegUsesToMove);
  497. continue;
  498. }
  499. // When we match I with another DS instruction we will be moving I down
  500. // to the location of the matched instruction any uses of I will need to
  501. // be moved down as well.
  502. addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
  503. CI.InstsToMove);
  504. continue;
  505. }
  506. // Don't merge volatiles.
  507. if (MBBI->hasOrderedMemoryRef())
  508. return false;
  509. // Handle a case like
  510. // DS_WRITE_B32 addr, v, idx0
  511. // w = DS_READ_B32 addr, idx0
  512. // DS_WRITE_B32 addr, f(w), idx1
  513. // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
  514. // merging of the two writes.
  515. if (addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
  516. CI.InstsToMove))
  517. continue;
  518. bool Match = true;
  519. for (unsigned i = 0; i < NumAddresses; i++) {
  520. const MachineOperand &AddrRegNext = MBBI->getOperand(AddrIdx[i]);
  521. if (AddrReg[i]->isImm() || AddrRegNext.isImm()) {
  522. if (AddrReg[i]->isImm() != AddrRegNext.isImm() ||
  523. AddrReg[i]->getImm() != AddrRegNext.getImm()) {
  524. Match = false;
  525. break;
  526. }
  527. continue;
  528. }
  529. // Check same base pointer. Be careful of subregisters, which can occur
  530. // with vectors of pointers.
  531. if (AddrReg[i]->getReg() != AddrRegNext.getReg() ||
  532. AddrReg[i]->getSubReg() != AddrRegNext.getSubReg()) {
  533. Match = false;
  534. break;
  535. }
  536. }
  537. if (Match) {
  538. int OffsetIdx =
  539. AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AMDGPU::OpName::offset);
  540. CI.Offset0 = CI.I->getOperand(OffsetIdx).getImm();
  541. CI.Width0 = getOpcodeWidth(*CI.I);
  542. CI.Offset1 = MBBI->getOperand(OffsetIdx).getImm();
  543. CI.Width1 = getOpcodeWidth(*MBBI);
  544. CI.Paired = MBBI;
  545. if ((CI.InstClass == DS_READ) || (CI.InstClass == DS_WRITE)) {
  546. CI.Offset0 &= 0xffff;
  547. CI.Offset1 &= 0xffff;
  548. } else {
  549. CI.GLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::glc)->getImm();
  550. CI.GLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::glc)->getImm();
  551. if (CI.InstClass != S_BUFFER_LOAD_IMM) {
  552. CI.SLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::slc)->getImm();
  553. CI.SLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::slc)->getImm();
  554. }
  555. }
  556. // Check both offsets fit in the reduced range.
  557. // We also need to go through the list of instructions that we plan to
  558. // move and make sure they are all safe to move down past the merged
  559. // instruction.
  560. if (widthsFit(*STM, CI) && offsetsCanBeCombined(CI))
  561. if (canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))
  562. return true;
  563. }
  564. // We've found a load/store that we couldn't merge for some reason.
  565. // We could potentially keep looking, but we'd need to make sure that
  566. // it was safe to move I and also all the instruction in InstsToMove
  567. // down past this instruction.
  568. // check if we can move I across MBBI and if we can move all I's users
  569. if (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) ||
  570. !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))
  571. break;
  572. }
  573. return false;
  574. }
  575. unsigned SILoadStoreOptimizer::read2Opcode(unsigned EltSize) const {
  576. if (STM->ldsRequiresM0Init())
  577. return (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64;
  578. return (EltSize == 4) ? AMDGPU::DS_READ2_B32_gfx9 : AMDGPU::DS_READ2_B64_gfx9;
  579. }
  580. unsigned SILoadStoreOptimizer::read2ST64Opcode(unsigned EltSize) const {
  581. if (STM->ldsRequiresM0Init())
  582. return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64;
  583. return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32_gfx9
  584. : AMDGPU::DS_READ2ST64_B64_gfx9;
  585. }
  586. MachineBasicBlock::iterator
  587. SILoadStoreOptimizer::mergeRead2Pair(CombineInfo &CI) {
  588. MachineBasicBlock *MBB = CI.I->getParent();
  589. // Be careful, since the addresses could be subregisters themselves in weird
  590. // cases, like vectors of pointers.
  591. const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
  592. const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst);
  593. const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdst);
  594. unsigned NewOffset0 = CI.Offset0;
  595. unsigned NewOffset1 = CI.Offset1;
  596. unsigned Opc =
  597. CI.UseST64 ? read2ST64Opcode(CI.EltSize) : read2Opcode(CI.EltSize);
  598. unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
  599. unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
  600. if (NewOffset0 > NewOffset1) {
  601. // Canonicalize the merged instruction so the smaller offset comes first.
  602. std::swap(NewOffset0, NewOffset1);
  603. std::swap(SubRegIdx0, SubRegIdx1);
  604. }
  605. assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
  606. (NewOffset0 != NewOffset1) && "Computed offset doesn't fit");
  607. const MCInstrDesc &Read2Desc = TII->get(Opc);
  608. const TargetRegisterClass *SuperRC =
  609. (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
  610. unsigned DestReg = MRI->createVirtualRegister(SuperRC);
  611. DebugLoc DL = CI.I->getDebugLoc();
  612. unsigned BaseReg = AddrReg->getReg();
  613. unsigned BaseSubReg = AddrReg->getSubReg();
  614. unsigned BaseRegFlags = 0;
  615. if (CI.BaseOff) {
  616. unsigned ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
  617. BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
  618. .addImm(CI.BaseOff);
  619. BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
  620. BaseRegFlags = RegState::Kill;
  621. TII->getAddNoCarry(*MBB, CI.Paired, DL, BaseReg)
  622. .addReg(ImmReg)
  623. .addReg(AddrReg->getReg(), 0, BaseSubReg);
  624. BaseSubReg = 0;
  625. }
  626. MachineInstrBuilder Read2 =
  627. BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
  628. .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr
  629. .addImm(NewOffset0) // offset0
  630. .addImm(NewOffset1) // offset1
  631. .addImm(0) // gds
  632. .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
  633. (void)Read2;
  634. const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
  635. // Copy to the old destination registers.
  636. BuildMI(*MBB, CI.Paired, DL, CopyDesc)
  637. .add(*Dest0) // Copy to same destination including flags and sub reg.
  638. .addReg(DestReg, 0, SubRegIdx0);
  639. MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
  640. .add(*Dest1)
  641. .addReg(DestReg, RegState::Kill, SubRegIdx1);
  642. moveInstsAfter(Copy1, CI.InstsToMove);
  643. MachineBasicBlock::iterator Next = std::next(CI.I);
  644. CI.I->eraseFromParent();
  645. CI.Paired->eraseFromParent();
  646. LLVM_DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
  647. return Next;
  648. }
  649. unsigned SILoadStoreOptimizer::write2Opcode(unsigned EltSize) const {
  650. if (STM->ldsRequiresM0Init())
  651. return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64;
  652. return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32_gfx9
  653. : AMDGPU::DS_WRITE2_B64_gfx9;
  654. }
  655. unsigned SILoadStoreOptimizer::write2ST64Opcode(unsigned EltSize) const {
  656. if (STM->ldsRequiresM0Init())
  657. return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32
  658. : AMDGPU::DS_WRITE2ST64_B64;
  659. return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32_gfx9
  660. : AMDGPU::DS_WRITE2ST64_B64_gfx9;
  661. }
  662. MachineBasicBlock::iterator
  663. SILoadStoreOptimizer::mergeWrite2Pair(CombineInfo &CI) {
  664. MachineBasicBlock *MBB = CI.I->getParent();
  665. // Be sure to use .addOperand(), and not .addReg() with these. We want to be
  666. // sure we preserve the subregister index and any register flags set on them.
  667. const MachineOperand *AddrReg =
  668. TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
  669. const MachineOperand *Data0 =
  670. TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0);
  671. const MachineOperand *Data1 =
  672. TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::data0);
  673. unsigned NewOffset0 = CI.Offset0;
  674. unsigned NewOffset1 = CI.Offset1;
  675. unsigned Opc =
  676. CI.UseST64 ? write2ST64Opcode(CI.EltSize) : write2Opcode(CI.EltSize);
  677. if (NewOffset0 > NewOffset1) {
  678. // Canonicalize the merged instruction so the smaller offset comes first.
  679. std::swap(NewOffset0, NewOffset1);
  680. std::swap(Data0, Data1);
  681. }
  682. assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
  683. (NewOffset0 != NewOffset1) && "Computed offset doesn't fit");
  684. const MCInstrDesc &Write2Desc = TII->get(Opc);
  685. DebugLoc DL = CI.I->getDebugLoc();
  686. unsigned BaseReg = AddrReg->getReg();
  687. unsigned BaseSubReg = AddrReg->getSubReg();
  688. unsigned BaseRegFlags = 0;
  689. if (CI.BaseOff) {
  690. unsigned ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
  691. BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
  692. .addImm(CI.BaseOff);
  693. BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
  694. BaseRegFlags = RegState::Kill;
  695. TII->getAddNoCarry(*MBB, CI.Paired, DL, BaseReg)
  696. .addReg(ImmReg)
  697. .addReg(AddrReg->getReg(), 0, BaseSubReg);
  698. BaseSubReg = 0;
  699. }
  700. MachineInstrBuilder Write2 =
  701. BuildMI(*MBB, CI.Paired, DL, Write2Desc)
  702. .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr
  703. .add(*Data0) // data0
  704. .add(*Data1) // data1
  705. .addImm(NewOffset0) // offset0
  706. .addImm(NewOffset1) // offset1
  707. .addImm(0) // gds
  708. .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
  709. moveInstsAfter(Write2, CI.InstsToMove);
  710. MachineBasicBlock::iterator Next = std::next(CI.I);
  711. CI.I->eraseFromParent();
  712. CI.Paired->eraseFromParent();
  713. LLVM_DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
  714. return Next;
  715. }
  716. MachineBasicBlock::iterator
  717. SILoadStoreOptimizer::mergeSBufferLoadImmPair(CombineInfo &CI) {
  718. MachineBasicBlock *MBB = CI.I->getParent();
  719. DebugLoc DL = CI.I->getDebugLoc();
  720. const unsigned Opcode = getNewOpcode(CI);
  721. const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
  722. unsigned DestReg = MRI->createVirtualRegister(SuperRC);
  723. unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
  724. BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg)
  725. .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::sbase))
  726. .addImm(MergedOffset) // offset
  727. .addImm(CI.GLC0) // glc
  728. .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
  729. std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
  730. const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
  731. const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
  732. // Copy to the old destination registers.
  733. const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
  734. const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::sdst);
  735. const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::sdst);
  736. BuildMI(*MBB, CI.Paired, DL, CopyDesc)
  737. .add(*Dest0) // Copy to same destination including flags and sub reg.
  738. .addReg(DestReg, 0, SubRegIdx0);
  739. MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
  740. .add(*Dest1)
  741. .addReg(DestReg, RegState::Kill, SubRegIdx1);
  742. moveInstsAfter(Copy1, CI.InstsToMove);
  743. MachineBasicBlock::iterator Next = std::next(CI.I);
  744. CI.I->eraseFromParent();
  745. CI.Paired->eraseFromParent();
  746. return Next;
  747. }
  748. MachineBasicBlock::iterator
  749. SILoadStoreOptimizer::mergeBufferLoadPair(CombineInfo &CI) {
  750. MachineBasicBlock *MBB = CI.I->getParent();
  751. DebugLoc DL = CI.I->getDebugLoc();
  752. const unsigned Opcode = getNewOpcode(CI);
  753. const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
  754. // Copy to the new source register.
  755. unsigned DestReg = MRI->createVirtualRegister(SuperRC);
  756. unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
  757. auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg);
  758. const unsigned Regs = getRegs(Opcode);
  759. if (Regs & VADDR)
  760. MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
  761. MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
  762. .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
  763. .addImm(MergedOffset) // offset
  764. .addImm(CI.GLC0) // glc
  765. .addImm(CI.SLC0) // slc
  766. .addImm(0) // tfe
  767. .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
  768. std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
  769. const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
  770. const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
  771. // Copy to the old destination registers.
  772. const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
  773. const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
  774. const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
  775. BuildMI(*MBB, CI.Paired, DL, CopyDesc)
  776. .add(*Dest0) // Copy to same destination including flags and sub reg.
  777. .addReg(DestReg, 0, SubRegIdx0);
  778. MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
  779. .add(*Dest1)
  780. .addReg(DestReg, RegState::Kill, SubRegIdx1);
  781. moveInstsAfter(Copy1, CI.InstsToMove);
  782. MachineBasicBlock::iterator Next = std::next(CI.I);
  783. CI.I->eraseFromParent();
  784. CI.Paired->eraseFromParent();
  785. return Next;
  786. }
  787. unsigned SILoadStoreOptimizer::getNewOpcode(const CombineInfo &CI) {
  788. const unsigned Width = CI.Width0 + CI.Width1;
  789. switch (CI.InstClass) {
  790. default:
  791. return AMDGPU::getMUBUFOpcode(CI.InstClass, Width);
  792. case UNKNOWN:
  793. llvm_unreachable("Unknown instruction class");
  794. case S_BUFFER_LOAD_IMM:
  795. switch (Width) {
  796. default:
  797. return 0;
  798. case 2:
  799. return AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
  800. case 4:
  801. return AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM;
  802. }
  803. }
  804. }
  805. std::pair<unsigned, unsigned>
  806. SILoadStoreOptimizer::getSubRegIdxs(const CombineInfo &CI) {
  807. if (CI.Offset0 > CI.Offset1) {
  808. switch (CI.Width0) {
  809. default:
  810. return std::make_pair(0, 0);
  811. case 1:
  812. switch (CI.Width1) {
  813. default:
  814. return std::make_pair(0, 0);
  815. case 1:
  816. return std::make_pair(AMDGPU::sub1, AMDGPU::sub0);
  817. case 2:
  818. return std::make_pair(AMDGPU::sub2, AMDGPU::sub0_sub1);
  819. case 3:
  820. return std::make_pair(AMDGPU::sub3, AMDGPU::sub0_sub1_sub2);
  821. }
  822. case 2:
  823. switch (CI.Width1) {
  824. default:
  825. return std::make_pair(0, 0);
  826. case 1:
  827. return std::make_pair(AMDGPU::sub1_sub2, AMDGPU::sub0);
  828. case 2:
  829. return std::make_pair(AMDGPU::sub2_sub3, AMDGPU::sub0_sub1);
  830. }
  831. case 3:
  832. switch (CI.Width1) {
  833. default:
  834. return std::make_pair(0, 0);
  835. case 1:
  836. return std::make_pair(AMDGPU::sub1_sub2_sub3, AMDGPU::sub0);
  837. }
  838. }
  839. } else {
  840. switch (CI.Width0) {
  841. default:
  842. return std::make_pair(0, 0);
  843. case 1:
  844. switch (CI.Width1) {
  845. default:
  846. return std::make_pair(0, 0);
  847. case 1:
  848. return std::make_pair(AMDGPU::sub0, AMDGPU::sub1);
  849. case 2:
  850. return std::make_pair(AMDGPU::sub0, AMDGPU::sub1_sub2);
  851. case 3:
  852. return std::make_pair(AMDGPU::sub0, AMDGPU::sub1_sub2_sub3);
  853. }
  854. case 2:
  855. switch (CI.Width1) {
  856. default:
  857. return std::make_pair(0, 0);
  858. case 1:
  859. return std::make_pair(AMDGPU::sub0_sub1, AMDGPU::sub2);
  860. case 2:
  861. return std::make_pair(AMDGPU::sub0_sub1, AMDGPU::sub2_sub3);
  862. }
  863. case 3:
  864. switch (CI.Width1) {
  865. default:
  866. return std::make_pair(0, 0);
  867. case 1:
  868. return std::make_pair(AMDGPU::sub0_sub1_sub2, AMDGPU::sub3);
  869. }
  870. }
  871. }
  872. }
  873. const TargetRegisterClass *
  874. SILoadStoreOptimizer::getTargetRegisterClass(const CombineInfo &CI) {
  875. if (CI.InstClass == S_BUFFER_LOAD_IMM) {
  876. switch (CI.Width0 + CI.Width1) {
  877. default:
  878. return nullptr;
  879. case 2:
  880. return &AMDGPU::SReg_64_XEXECRegClass;
  881. case 4:
  882. return &AMDGPU::SReg_128RegClass;
  883. case 8:
  884. return &AMDGPU::SReg_256RegClass;
  885. case 16:
  886. return &AMDGPU::SReg_512RegClass;
  887. }
  888. } else {
  889. switch (CI.Width0 + CI.Width1) {
  890. default:
  891. return nullptr;
  892. case 2:
  893. return &AMDGPU::VReg_64RegClass;
  894. case 3:
  895. return &AMDGPU::VReg_96RegClass;
  896. case 4:
  897. return &AMDGPU::VReg_128RegClass;
  898. }
  899. }
  900. }
  901. MachineBasicBlock::iterator
  902. SILoadStoreOptimizer::mergeBufferStorePair(CombineInfo &CI) {
  903. MachineBasicBlock *MBB = CI.I->getParent();
  904. DebugLoc DL = CI.I->getDebugLoc();
  905. const unsigned Opcode = getNewOpcode(CI);
  906. std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
  907. const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
  908. const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
  909. // Copy to the new source register.
  910. const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
  911. unsigned SrcReg = MRI->createVirtualRegister(SuperRC);
  912. const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
  913. const auto *Src1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
  914. BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg)
  915. .add(*Src0)
  916. .addImm(SubRegIdx0)
  917. .add(*Src1)
  918. .addImm(SubRegIdx1);
  919. auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode))
  920. .addReg(SrcReg, RegState::Kill);
  921. const unsigned Regs = getRegs(Opcode);
  922. if (Regs & VADDR)
  923. MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
  924. MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
  925. .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
  926. .addImm(std::min(CI.Offset0, CI.Offset1)) // offset
  927. .addImm(CI.GLC0) // glc
  928. .addImm(CI.SLC0) // slc
  929. .addImm(0) // tfe
  930. .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
  931. moveInstsAfter(MIB, CI.InstsToMove);
  932. MachineBasicBlock::iterator Next = std::next(CI.I);
  933. CI.I->eraseFromParent();
  934. CI.Paired->eraseFromParent();
  935. return Next;
  936. }
  937. MachineOperand
  938. SILoadStoreOptimizer::createRegOrImm(int32_t Val, MachineInstr &MI) {
  939. APInt V(32, Val, true);
  940. if (TII->isInlineConstant(V))
  941. return MachineOperand::CreateImm(Val);
  942. unsigned Reg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
  943. MachineInstr *Mov =
  944. BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
  945. TII->get(AMDGPU::S_MOV_B32), Reg)
  946. .addImm(Val);
  947. (void)Mov;
  948. LLVM_DEBUG(dbgs() << " "; Mov->dump());
  949. return MachineOperand::CreateReg(Reg, false);
  950. }
  951. // Compute base address using Addr and return the final register.
  952. unsigned SILoadStoreOptimizer::computeBase(MachineInstr &MI,
  953. const MemAddress &Addr) {
  954. MachineBasicBlock *MBB = MI.getParent();
  955. MachineBasicBlock::iterator MBBI = MI.getIterator();
  956. DebugLoc DL = MI.getDebugLoc();
  957. assert((TRI->getRegSizeInBits(Addr.Base.LoReg, *MRI) == 32 ||
  958. Addr.Base.LoSubReg) &&
  959. "Expected 32-bit Base-Register-Low!!");
  960. assert((TRI->getRegSizeInBits(Addr.Base.HiReg, *MRI) == 32 ||
  961. Addr.Base.HiSubReg) &&
  962. "Expected 32-bit Base-Register-Hi!!");
  963. LLVM_DEBUG(dbgs() << " Re-Computed Anchor-Base:\n");
  964. MachineOperand OffsetLo = createRegOrImm(static_cast<int32_t>(Addr.Offset), MI);
  965. MachineOperand OffsetHi =
  966. createRegOrImm(static_cast<int32_t>(Addr.Offset >> 32), MI);
  967. unsigned CarryReg = MRI->createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
  968. unsigned DeadCarryReg =
  969. MRI->createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
  970. unsigned DestSub0 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
  971. unsigned DestSub1 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
  972. MachineInstr *LoHalf =
  973. BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADD_I32_e64), DestSub0)
  974. .addReg(CarryReg, RegState::Define)
  975. .addReg(Addr.Base.LoReg, 0, Addr.Base.LoSubReg)
  976. .add(OffsetLo);
  977. (void)LoHalf;
  978. LLVM_DEBUG(dbgs() << " "; LoHalf->dump(););
  979. MachineInstr *HiHalf =
  980. BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADDC_U32_e64), DestSub1)
  981. .addReg(DeadCarryReg, RegState::Define | RegState::Dead)
  982. .addReg(Addr.Base.HiReg, 0, Addr.Base.HiSubReg)
  983. .add(OffsetHi)
  984. .addReg(CarryReg, RegState::Kill);
  985. (void)HiHalf;
  986. LLVM_DEBUG(dbgs() << " "; HiHalf->dump(););
  987. unsigned FullDestReg = MRI->createVirtualRegister(&AMDGPU::VReg_64RegClass);
  988. MachineInstr *FullBase =
  989. BuildMI(*MBB, MBBI, DL, TII->get(TargetOpcode::REG_SEQUENCE), FullDestReg)
  990. .addReg(DestSub0)
  991. .addImm(AMDGPU::sub0)
  992. .addReg(DestSub1)
  993. .addImm(AMDGPU::sub1);
  994. (void)FullBase;
  995. LLVM_DEBUG(dbgs() << " "; FullBase->dump(); dbgs() << "\n";);
  996. return FullDestReg;
  997. }
  998. // Update base and offset with the NewBase and NewOffset in MI.
  999. void SILoadStoreOptimizer::updateBaseAndOffset(MachineInstr &MI,
  1000. unsigned NewBase,
  1001. int32_t NewOffset) {
  1002. TII->getNamedOperand(MI, AMDGPU::OpName::vaddr)->setReg(NewBase);
  1003. TII->getNamedOperand(MI, AMDGPU::OpName::offset)->setImm(NewOffset);
  1004. }
  1005. Optional<int32_t>
  1006. SILoadStoreOptimizer::extractConstOffset(const MachineOperand &Op) {
  1007. if (Op.isImm())
  1008. return Op.getImm();
  1009. if (!Op.isReg())
  1010. return None;
  1011. MachineInstr *Def = MRI->getUniqueVRegDef(Op.getReg());
  1012. if (!Def || Def->getOpcode() != AMDGPU::S_MOV_B32 ||
  1013. !Def->getOperand(1).isImm())
  1014. return None;
  1015. return Def->getOperand(1).getImm();
  1016. }
  1017. // Analyze Base and extracts:
  1018. // - 32bit base registers, subregisters
  1019. // - 64bit constant offset
  1020. // Expecting base computation as:
  1021. // %OFFSET0:sgpr_32 = S_MOV_B32 8000
  1022. // %LO:vgpr_32, %c:sreg_64_xexec =
  1023. // V_ADD_I32_e64 %BASE_LO:vgpr_32, %103:sgpr_32,
  1024. // %HI:vgpr_32, = V_ADDC_U32_e64 %BASE_HI:vgpr_32, 0, killed %c:sreg_64_xexec
  1025. // %Base:vreg_64 =
  1026. // REG_SEQUENCE %LO:vgpr_32, %subreg.sub0, %HI:vgpr_32, %subreg.sub1
  1027. void SILoadStoreOptimizer::processBaseWithConstOffset(const MachineOperand &Base,
  1028. MemAddress &Addr) {
  1029. if (!Base.isReg())
  1030. return;
  1031. MachineInstr *Def = MRI->getUniqueVRegDef(Base.getReg());
  1032. if (!Def || Def->getOpcode() != AMDGPU::REG_SEQUENCE
  1033. || Def->getNumOperands() != 5)
  1034. return;
  1035. MachineOperand BaseLo = Def->getOperand(1);
  1036. MachineOperand BaseHi = Def->getOperand(3);
  1037. if (!BaseLo.isReg() || !BaseHi.isReg())
  1038. return;
  1039. MachineInstr *BaseLoDef = MRI->getUniqueVRegDef(BaseLo.getReg());
  1040. MachineInstr *BaseHiDef = MRI->getUniqueVRegDef(BaseHi.getReg());
  1041. if (!BaseLoDef || BaseLoDef->getOpcode() != AMDGPU::V_ADD_I32_e64 ||
  1042. !BaseHiDef || BaseHiDef->getOpcode() != AMDGPU::V_ADDC_U32_e64)
  1043. return;
  1044. const auto *Src0 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src0);
  1045. const auto *Src1 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src1);
  1046. auto Offset0P = extractConstOffset(*Src0);
  1047. if (Offset0P)
  1048. BaseLo = *Src1;
  1049. else {
  1050. if (!(Offset0P = extractConstOffset(*Src1)))
  1051. return;
  1052. BaseLo = *Src0;
  1053. }
  1054. Src0 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src0);
  1055. Src1 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src1);
  1056. if (Src0->isImm())
  1057. std::swap(Src0, Src1);
  1058. if (!Src1->isImm())
  1059. return;
  1060. uint64_t Offset1 = Src1->getImm();
  1061. BaseHi = *Src0;
  1062. Addr.Base.LoReg = BaseLo.getReg();
  1063. Addr.Base.HiReg = BaseHi.getReg();
  1064. Addr.Base.LoSubReg = BaseLo.getSubReg();
  1065. Addr.Base.HiSubReg = BaseHi.getSubReg();
  1066. Addr.Offset = (*Offset0P & 0x00000000ffffffff) | (Offset1 << 32);
  1067. }
  1068. bool SILoadStoreOptimizer::promoteConstantOffsetToImm(
  1069. MachineInstr &MI,
  1070. MemInfoMap &Visited,
  1071. SmallPtrSet<MachineInstr *, 4> &AnchorList) {
  1072. // TODO: Support flat and scratch.
  1073. if (AMDGPU::getGlobalSaddrOp(MI.getOpcode()) < 0 ||
  1074. TII->getNamedOperand(MI, AMDGPU::OpName::vdata) != NULL)
  1075. return false;
  1076. // TODO: Support Store.
  1077. if (!MI.mayLoad())
  1078. return false;
  1079. if (AnchorList.count(&MI))
  1080. return false;
  1081. LLVM_DEBUG(dbgs() << "\nTryToPromoteConstantOffsetToImmFor "; MI.dump());
  1082. if (TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm()) {
  1083. LLVM_DEBUG(dbgs() << " Const-offset is already promoted.\n";);
  1084. return false;
  1085. }
  1086. // Step1: Find the base-registers and a 64bit constant offset.
  1087. MachineOperand &Base = *TII->getNamedOperand(MI, AMDGPU::OpName::vaddr);
  1088. MemAddress MAddr;
  1089. if (Visited.find(&MI) == Visited.end()) {
  1090. processBaseWithConstOffset(Base, MAddr);
  1091. Visited[&MI] = MAddr;
  1092. } else
  1093. MAddr = Visited[&MI];
  1094. if (MAddr.Offset == 0) {
  1095. LLVM_DEBUG(dbgs() << " Failed to extract constant-offset or there are no"
  1096. " constant offsets that can be promoted.\n";);
  1097. return false;
  1098. }
  1099. LLVM_DEBUG(dbgs() << " BASE: {" << MAddr.Base.HiReg << ", "
  1100. << MAddr.Base.LoReg << "} Offset: " << MAddr.Offset << "\n\n";);
  1101. // Step2: Traverse through MI's basic block and find an anchor(that has the
  1102. // same base-registers) with the highest 13bit distance from MI's offset.
  1103. // E.g. (64bit loads)
  1104. // bb:
  1105. // addr1 = &a + 4096; load1 = load(addr1, 0)
  1106. // addr2 = &a + 6144; load2 = load(addr2, 0)
  1107. // addr3 = &a + 8192; load3 = load(addr3, 0)
  1108. // addr4 = &a + 10240; load4 = load(addr4, 0)
  1109. // addr5 = &a + 12288; load5 = load(addr5, 0)
  1110. //
  1111. // Starting from the first load, the optimization will try to find a new base
  1112. // from which (&a + 4096) has 13 bit distance. Both &a + 6144 and &a + 8192
  1113. // has 13bit distance from &a + 4096. The heuristic considers &a + 8192
  1114. // as the new-base(anchor) because of the maximum distance which can
  1115. // accomodate more intermediate bases presumeably.
  1116. //
  1117. // Step3: move (&a + 8192) above load1. Compute and promote offsets from
  1118. // (&a + 8192) for load1, load2, load4.
  1119. // addr = &a + 8192
  1120. // load1 = load(addr, -4096)
  1121. // load2 = load(addr, -2048)
  1122. // load3 = load(addr, 0)
  1123. // load4 = load(addr, 2048)
  1124. // addr5 = &a + 12288; load5 = load(addr5, 0)
  1125. //
  1126. MachineInstr *AnchorInst = nullptr;
  1127. MemAddress AnchorAddr;
  1128. uint32_t MaxDist = std::numeric_limits<uint32_t>::min();
  1129. SmallVector<std::pair<MachineInstr *, int64_t>, 4> InstsWCommonBase;
  1130. MachineBasicBlock *MBB = MI.getParent();
  1131. MachineBasicBlock::iterator E = MBB->end();
  1132. MachineBasicBlock::iterator MBBI = MI.getIterator();
  1133. ++MBBI;
  1134. const SITargetLowering *TLI =
  1135. static_cast<const SITargetLowering *>(STM->getTargetLowering());
  1136. for ( ; MBBI != E; ++MBBI) {
  1137. MachineInstr &MINext = *MBBI;
  1138. // TODO: Support finding an anchor(with same base) from store addresses or
  1139. // any other load addresses where the opcodes are different.
  1140. if (MINext.getOpcode() != MI.getOpcode() ||
  1141. TII->getNamedOperand(MINext, AMDGPU::OpName::offset)->getImm())
  1142. continue;
  1143. const MachineOperand &BaseNext =
  1144. *TII->getNamedOperand(MINext, AMDGPU::OpName::vaddr);
  1145. MemAddress MAddrNext;
  1146. if (Visited.find(&MINext) == Visited.end()) {
  1147. processBaseWithConstOffset(BaseNext, MAddrNext);
  1148. Visited[&MINext] = MAddrNext;
  1149. } else
  1150. MAddrNext = Visited[&MINext];
  1151. if (MAddrNext.Base.LoReg != MAddr.Base.LoReg ||
  1152. MAddrNext.Base.HiReg != MAddr.Base.HiReg ||
  1153. MAddrNext.Base.LoSubReg != MAddr.Base.LoSubReg ||
  1154. MAddrNext.Base.HiSubReg != MAddr.Base.HiSubReg)
  1155. continue;
  1156. InstsWCommonBase.push_back(std::make_pair(&MINext, MAddrNext.Offset));
  1157. int64_t Dist = MAddr.Offset - MAddrNext.Offset;
  1158. TargetLoweringBase::AddrMode AM;
  1159. AM.HasBaseReg = true;
  1160. AM.BaseOffs = Dist;
  1161. if (TLI->isLegalGlobalAddressingMode(AM) &&
  1162. (uint32_t)std::abs(Dist) > MaxDist) {
  1163. MaxDist = std::abs(Dist);
  1164. AnchorAddr = MAddrNext;
  1165. AnchorInst = &MINext;
  1166. }
  1167. }
  1168. if (AnchorInst) {
  1169. LLVM_DEBUG(dbgs() << " Anchor-Inst(with max-distance from Offset): ";
  1170. AnchorInst->dump());
  1171. LLVM_DEBUG(dbgs() << " Anchor-Offset from BASE: "
  1172. << AnchorAddr.Offset << "\n\n");
  1173. // Instead of moving up, just re-compute anchor-instruction's base address.
  1174. unsigned Base = computeBase(MI, AnchorAddr);
  1175. updateBaseAndOffset(MI, Base, MAddr.Offset - AnchorAddr.Offset);
  1176. LLVM_DEBUG(dbgs() << " After promotion: "; MI.dump(););
  1177. for (auto P : InstsWCommonBase) {
  1178. TargetLoweringBase::AddrMode AM;
  1179. AM.HasBaseReg = true;
  1180. AM.BaseOffs = P.second - AnchorAddr.Offset;
  1181. if (TLI->isLegalGlobalAddressingMode(AM)) {
  1182. LLVM_DEBUG(dbgs() << " Promote Offset(" << P.second;
  1183. dbgs() << ")"; P.first->dump());
  1184. updateBaseAndOffset(*P.first, Base, P.second - AnchorAddr.Offset);
  1185. LLVM_DEBUG(dbgs() << " After promotion: "; P.first->dump());
  1186. }
  1187. }
  1188. AnchorList.insert(AnchorInst);
  1189. return true;
  1190. }
  1191. return false;
  1192. }
  1193. // Scan through looking for adjacent LDS operations with constant offsets from
  1194. // the same base register. We rely on the scheduler to do the hard work of
  1195. // clustering nearby loads, and assume these are all adjacent.
  1196. bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
  1197. bool Modified = false;
  1198. // Contain the list
  1199. MemInfoMap Visited;
  1200. // Contains the list of instructions for which constant offsets are being
  1201. // promoted to the IMM.
  1202. SmallPtrSet<MachineInstr *, 4> AnchorList;
  1203. for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
  1204. MachineInstr &MI = *I;
  1205. if (promoteConstantOffsetToImm(MI, Visited, AnchorList))
  1206. Modified = true;
  1207. // Don't combine if volatile.
  1208. if (MI.hasOrderedMemoryRef()) {
  1209. ++I;
  1210. continue;
  1211. }
  1212. const unsigned Opc = MI.getOpcode();
  1213. CombineInfo CI;
  1214. CI.I = I;
  1215. CI.InstClass = getInstClass(Opc);
  1216. switch (CI.InstClass) {
  1217. default:
  1218. break;
  1219. case DS_READ:
  1220. CI.EltSize =
  1221. (Opc == AMDGPU::DS_READ_B64 || Opc == AMDGPU::DS_READ_B64_gfx9) ? 8
  1222. : 4;
  1223. if (findMatchingInst(CI)) {
  1224. Modified = true;
  1225. I = mergeRead2Pair(CI);
  1226. } else {
  1227. ++I;
  1228. }
  1229. continue;
  1230. case DS_WRITE:
  1231. CI.EltSize =
  1232. (Opc == AMDGPU::DS_WRITE_B64 || Opc == AMDGPU::DS_WRITE_B64_gfx9) ? 8
  1233. : 4;
  1234. if (findMatchingInst(CI)) {
  1235. Modified = true;
  1236. I = mergeWrite2Pair(CI);
  1237. } else {
  1238. ++I;
  1239. }
  1240. continue;
  1241. case S_BUFFER_LOAD_IMM:
  1242. CI.EltSize = AMDGPU::getSMRDEncodedOffset(*STM, 4);
  1243. if (findMatchingInst(CI)) {
  1244. Modified = true;
  1245. I = mergeSBufferLoadImmPair(CI);
  1246. OptimizeAgain |= (CI.Width0 + CI.Width1) < 16;
  1247. } else {
  1248. ++I;
  1249. }
  1250. continue;
  1251. case BUFFER_LOAD_OFFEN:
  1252. case BUFFER_LOAD_OFFSET:
  1253. case BUFFER_LOAD_OFFEN_exact:
  1254. case BUFFER_LOAD_OFFSET_exact:
  1255. CI.EltSize = 4;
  1256. if (findMatchingInst(CI)) {
  1257. Modified = true;
  1258. I = mergeBufferLoadPair(CI);
  1259. OptimizeAgain |= (CI.Width0 + CI.Width1) < 4;
  1260. } else {
  1261. ++I;
  1262. }
  1263. continue;
  1264. case BUFFER_STORE_OFFEN:
  1265. case BUFFER_STORE_OFFSET:
  1266. case BUFFER_STORE_OFFEN_exact:
  1267. case BUFFER_STORE_OFFSET_exact:
  1268. CI.EltSize = 4;
  1269. if (findMatchingInst(CI)) {
  1270. Modified = true;
  1271. I = mergeBufferStorePair(CI);
  1272. OptimizeAgain |= (CI.Width0 + CI.Width1) < 4;
  1273. } else {
  1274. ++I;
  1275. }
  1276. continue;
  1277. }
  1278. ++I;
  1279. }
  1280. return Modified;
  1281. }
  1282. bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
  1283. if (skipFunction(MF.getFunction()))
  1284. return false;
  1285. STM = &MF.getSubtarget<GCNSubtarget>();
  1286. if (!STM->loadStoreOptEnabled())
  1287. return false;
  1288. TII = STM->getInstrInfo();
  1289. TRI = &TII->getRegisterInfo();
  1290. MRI = &MF.getRegInfo();
  1291. AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
  1292. assert(MRI->isSSA() && "Must be run on SSA");
  1293. LLVM_DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
  1294. bool Modified = false;
  1295. for (MachineBasicBlock &MBB : MF) {
  1296. do {
  1297. OptimizeAgain = false;
  1298. Modified |= optimizeBlock(MBB);
  1299. } while (OptimizeAgain);
  1300. }
  1301. return Modified;
  1302. }