SILoadStoreOptimizer.cpp 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547
  1. //===- SILoadStoreOptimizer.cpp -------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This pass tries to fuse DS instructions with close by immediate offsets.
  10. // This will fuse operations such as
  11. // ds_read_b32 v0, v2 offset:16
  12. // ds_read_b32 v1, v2 offset:32
  13. // ==>
  14. // ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
  15. //
  16. // The same is done for certain SMEM and VMEM opcodes, e.g.:
  17. // s_buffer_load_dword s4, s[0:3], 4
  18. // s_buffer_load_dword s5, s[0:3], 8
  19. // ==>
  20. // s_buffer_load_dwordx2 s[4:5], s[0:3], 4
  21. //
  22. // This pass also tries to promote constant offset to the immediate by
  23. // adjusting the base. It tries to use a base from the nearby instructions that
  24. // allows it to have a 13bit constant offset and then promotes the 13bit offset
  25. // to the immediate.
  26. // E.g.
  27. // s_movk_i32 s0, 0x1800
  28. // v_add_co_u32_e32 v0, vcc, s0, v2
  29. // v_addc_co_u32_e32 v1, vcc, 0, v6, vcc
  30. //
  31. // s_movk_i32 s0, 0x1000
  32. // v_add_co_u32_e32 v5, vcc, s0, v2
  33. // v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
  34. // global_load_dwordx2 v[5:6], v[5:6], off
  35. // global_load_dwordx2 v[0:1], v[0:1], off
  36. // =>
  37. // s_movk_i32 s0, 0x1000
  38. // v_add_co_u32_e32 v5, vcc, s0, v2
  39. // v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
  40. // global_load_dwordx2 v[5:6], v[5:6], off
  41. // global_load_dwordx2 v[0:1], v[5:6], off offset:2048
  42. //
  43. // Future improvements:
  44. //
  45. // - This currently relies on the scheduler to place loads and stores next to
  46. // each other, and then only merges adjacent pairs of instructions. It would
  47. // be good to be more flexible with interleaved instructions, and possibly run
  48. // before scheduling. It currently missing stores of constants because loading
  49. // the constant into the data register is placed between the stores, although
  50. // this is arguably a scheduling problem.
  51. //
  52. // - Live interval recomputing seems inefficient. This currently only matches
  53. // one pair, and recomputes live intervals and moves on to the next pair. It
  54. // would be better to compute a list of all merges that need to occur.
  55. //
  56. // - With a list of instructions to process, we can also merge more. If a
  57. // cluster of loads have offsets that are too large to fit in the 8-bit
  58. // offsets, but are close enough to fit in the 8 bits, we can add to the base
  59. // pointer and use the new reduced offsets.
  60. //
  61. //===----------------------------------------------------------------------===//
  62. #include "AMDGPU.h"
  63. #include "AMDGPUSubtarget.h"
  64. #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
  65. #include "SIInstrInfo.h"
  66. #include "SIRegisterInfo.h"
  67. #include "Utils/AMDGPUBaseInfo.h"
  68. #include "llvm/ADT/ArrayRef.h"
  69. #include "llvm/ADT/SmallVector.h"
  70. #include "llvm/ADT/StringRef.h"
  71. #include "llvm/Analysis/AliasAnalysis.h"
  72. #include "llvm/CodeGen/MachineBasicBlock.h"
  73. #include "llvm/CodeGen/MachineFunction.h"
  74. #include "llvm/CodeGen/MachineFunctionPass.h"
  75. #include "llvm/CodeGen/MachineInstr.h"
  76. #include "llvm/CodeGen/MachineInstrBuilder.h"
  77. #include "llvm/CodeGen/MachineOperand.h"
  78. #include "llvm/CodeGen/MachineRegisterInfo.h"
  79. #include "llvm/IR/DebugLoc.h"
  80. #include "llvm/Pass.h"
  81. #include "llvm/Support/Debug.h"
  82. #include "llvm/Support/MathExtras.h"
  83. #include "llvm/Support/raw_ostream.h"
  84. #include <algorithm>
  85. #include <cassert>
  86. #include <cstdlib>
  87. #include <iterator>
  88. #include <utility>
  89. using namespace llvm;
  90. #define DEBUG_TYPE "si-load-store-opt"
  91. namespace {
  92. enum InstClassEnum {
  93. UNKNOWN,
  94. DS_READ,
  95. DS_WRITE,
  96. S_BUFFER_LOAD_IMM,
  97. BUFFER_LOAD_OFFEN = AMDGPU::BUFFER_LOAD_DWORD_OFFEN,
  98. BUFFER_LOAD_OFFSET = AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
  99. BUFFER_STORE_OFFEN = AMDGPU::BUFFER_STORE_DWORD_OFFEN,
  100. BUFFER_STORE_OFFSET = AMDGPU::BUFFER_STORE_DWORD_OFFSET,
  101. BUFFER_LOAD_OFFEN_exact = AMDGPU::BUFFER_LOAD_DWORD_OFFEN_exact,
  102. BUFFER_LOAD_OFFSET_exact = AMDGPU::BUFFER_LOAD_DWORD_OFFSET_exact,
  103. BUFFER_STORE_OFFEN_exact = AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact,
  104. BUFFER_STORE_OFFSET_exact = AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact,
  105. };
  106. enum RegisterEnum {
  107. SBASE = 0x1,
  108. SRSRC = 0x2,
  109. SOFFSET = 0x4,
  110. VADDR = 0x8,
  111. ADDR = 0x10,
  112. };
  113. class SILoadStoreOptimizer : public MachineFunctionPass {
  114. struct CombineInfo {
  115. MachineBasicBlock::iterator I;
  116. MachineBasicBlock::iterator Paired;
  117. unsigned EltSize;
  118. unsigned Offset0;
  119. unsigned Offset1;
  120. unsigned Width0;
  121. unsigned Width1;
  122. unsigned BaseOff;
  123. InstClassEnum InstClass;
  124. bool GLC0;
  125. bool GLC1;
  126. bool SLC0;
  127. bool SLC1;
  128. bool DLC0;
  129. bool DLC1;
  130. bool UseST64;
  131. SmallVector<MachineInstr *, 8> InstsToMove;
  132. };
  133. struct BaseRegisters {
  134. unsigned LoReg = 0;
  135. unsigned HiReg = 0;
  136. unsigned LoSubReg = 0;
  137. unsigned HiSubReg = 0;
  138. };
  139. struct MemAddress {
  140. BaseRegisters Base;
  141. int64_t Offset = 0;
  142. };
  143. using MemInfoMap = DenseMap<MachineInstr *, MemAddress>;
  144. private:
  145. const GCNSubtarget *STM = nullptr;
  146. const SIInstrInfo *TII = nullptr;
  147. const SIRegisterInfo *TRI = nullptr;
  148. MachineRegisterInfo *MRI = nullptr;
  149. AliasAnalysis *AA = nullptr;
  150. bool OptimizeAgain;
  151. static bool offsetsCanBeCombined(CombineInfo &CI);
  152. static bool widthsFit(const GCNSubtarget &STM, const CombineInfo &CI);
  153. static unsigned getNewOpcode(const CombineInfo &CI);
  154. static std::pair<unsigned, unsigned> getSubRegIdxs(const CombineInfo &CI);
  155. const TargetRegisterClass *getTargetRegisterClass(const CombineInfo &CI);
  156. unsigned getOpcodeWidth(const MachineInstr &MI);
  157. InstClassEnum getInstClass(unsigned Opc);
  158. unsigned getRegs(unsigned Opc);
  159. bool findMatchingInst(CombineInfo &CI);
  160. unsigned read2Opcode(unsigned EltSize) const;
  161. unsigned read2ST64Opcode(unsigned EltSize) const;
  162. MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI);
  163. unsigned write2Opcode(unsigned EltSize) const;
  164. unsigned write2ST64Opcode(unsigned EltSize) const;
  165. MachineBasicBlock::iterator mergeWrite2Pair(CombineInfo &CI);
  166. MachineBasicBlock::iterator mergeSBufferLoadImmPair(CombineInfo &CI);
  167. MachineBasicBlock::iterator mergeBufferLoadPair(CombineInfo &CI);
  168. MachineBasicBlock::iterator mergeBufferStorePair(CombineInfo &CI);
  169. void updateBaseAndOffset(MachineInstr &I, unsigned NewBase,
  170. int32_t NewOffset);
  171. unsigned computeBase(MachineInstr &MI, const MemAddress &Addr);
  172. MachineOperand createRegOrImm(int32_t Val, MachineInstr &MI);
  173. Optional<int32_t> extractConstOffset(const MachineOperand &Op);
  174. void processBaseWithConstOffset(const MachineOperand &Base, MemAddress &Addr);
  175. /// Promotes constant offset to the immediate by adjusting the base. It
  176. /// tries to use a base from the nearby instructions that allows it to have
  177. /// a 13bit constant offset which gets promoted to the immediate.
  178. bool promoteConstantOffsetToImm(MachineInstr &CI,
  179. MemInfoMap &Visited,
  180. SmallPtrSet<MachineInstr *, 4> &Promoted);
  181. public:
  182. static char ID;
  183. SILoadStoreOptimizer() : MachineFunctionPass(ID) {
  184. initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
  185. }
  186. bool optimizeBlock(MachineBasicBlock &MBB);
  187. bool runOnMachineFunction(MachineFunction &MF) override;
  188. StringRef getPassName() const override { return "SI Load Store Optimizer"; }
  189. void getAnalysisUsage(AnalysisUsage &AU) const override {
  190. AU.setPreservesCFG();
  191. AU.addRequired<AAResultsWrapperPass>();
  192. MachineFunctionPass::getAnalysisUsage(AU);
  193. }
  194. };
  195. } // end anonymous namespace.
  196. INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
  197. "SI Load Store Optimizer", false, false)
  198. INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
  199. INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE, "SI Load Store Optimizer",
  200. false, false)
  201. char SILoadStoreOptimizer::ID = 0;
  202. char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
  203. FunctionPass *llvm::createSILoadStoreOptimizerPass() {
  204. return new SILoadStoreOptimizer();
  205. }
  206. static void moveInstsAfter(MachineBasicBlock::iterator I,
  207. ArrayRef<MachineInstr *> InstsToMove) {
  208. MachineBasicBlock *MBB = I->getParent();
  209. ++I;
  210. for (MachineInstr *MI : InstsToMove) {
  211. MI->removeFromParent();
  212. MBB->insert(I, MI);
  213. }
  214. }
  215. static void addDefsUsesToList(const MachineInstr &MI,
  216. DenseSet<unsigned> &RegDefs,
  217. DenseSet<unsigned> &PhysRegUses) {
  218. for (const MachineOperand &Op : MI.operands()) {
  219. if (Op.isReg()) {
  220. if (Op.isDef())
  221. RegDefs.insert(Op.getReg());
  222. else if (Op.readsReg() &&
  223. TargetRegisterInfo::isPhysicalRegister(Op.getReg()))
  224. PhysRegUses.insert(Op.getReg());
  225. }
  226. }
  227. }
  228. static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A,
  229. MachineBasicBlock::iterator B,
  230. AliasAnalysis *AA) {
  231. // RAW or WAR - cannot reorder
  232. // WAW - cannot reorder
  233. // RAR - safe to reorder
  234. return !(A->mayStore() || B->mayStore()) || !A->mayAlias(AA, *B, true);
  235. }
  236. // Add MI and its defs to the lists if MI reads one of the defs that are
  237. // already in the list. Returns true in that case.
  238. static bool addToListsIfDependent(MachineInstr &MI, DenseSet<unsigned> &RegDefs,
  239. DenseSet<unsigned> &PhysRegUses,
  240. SmallVectorImpl<MachineInstr *> &Insts) {
  241. for (MachineOperand &Use : MI.operands()) {
  242. // If one of the defs is read, then there is a use of Def between I and the
  243. // instruction that I will potentially be merged with. We will need to move
  244. // this instruction after the merged instructions.
  245. //
  246. // Similarly, if there is a def which is read by an instruction that is to
  247. // be moved for merging, then we need to move the def-instruction as well.
  248. // This can only happen for physical registers such as M0; virtual
  249. // registers are in SSA form.
  250. if (Use.isReg() &&
  251. ((Use.readsReg() && RegDefs.count(Use.getReg())) ||
  252. (Use.isDef() && RegDefs.count(Use.getReg())) ||
  253. (Use.isDef() && TargetRegisterInfo::isPhysicalRegister(Use.getReg()) &&
  254. PhysRegUses.count(Use.getReg())))) {
  255. Insts.push_back(&MI);
  256. addDefsUsesToList(MI, RegDefs, PhysRegUses);
  257. return true;
  258. }
  259. }
  260. return false;
  261. }
  262. static bool canMoveInstsAcrossMemOp(MachineInstr &MemOp,
  263. ArrayRef<MachineInstr *> InstsToMove,
  264. AliasAnalysis *AA) {
  265. assert(MemOp.mayLoadOrStore());
  266. for (MachineInstr *InstToMove : InstsToMove) {
  267. if (!InstToMove->mayLoadOrStore())
  268. continue;
  269. if (!memAccessesCanBeReordered(MemOp, *InstToMove, AA))
  270. return false;
  271. }
  272. return true;
  273. }
  274. bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI) {
  275. // XXX - Would the same offset be OK? Is there any reason this would happen or
  276. // be useful?
  277. if (CI.Offset0 == CI.Offset1)
  278. return false;
  279. // This won't be valid if the offset isn't aligned.
  280. if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0))
  281. return false;
  282. unsigned EltOffset0 = CI.Offset0 / CI.EltSize;
  283. unsigned EltOffset1 = CI.Offset1 / CI.EltSize;
  284. CI.UseST64 = false;
  285. CI.BaseOff = 0;
  286. // Handle SMEM and VMEM instructions.
  287. if ((CI.InstClass != DS_READ) && (CI.InstClass != DS_WRITE)) {
  288. return (EltOffset0 + CI.Width0 == EltOffset1 ||
  289. EltOffset1 + CI.Width1 == EltOffset0) &&
  290. CI.GLC0 == CI.GLC1 && CI.DLC0 == CI.DLC1 &&
  291. (CI.InstClass == S_BUFFER_LOAD_IMM || CI.SLC0 == CI.SLC1);
  292. }
  293. // If the offset in elements doesn't fit in 8-bits, we might be able to use
  294. // the stride 64 versions.
  295. if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 &&
  296. isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) {
  297. CI.Offset0 = EltOffset0 / 64;
  298. CI.Offset1 = EltOffset1 / 64;
  299. CI.UseST64 = true;
  300. return true;
  301. }
  302. // Check if the new offsets fit in the reduced 8-bit range.
  303. if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) {
  304. CI.Offset0 = EltOffset0;
  305. CI.Offset1 = EltOffset1;
  306. return true;
  307. }
  308. // Try to shift base address to decrease offsets.
  309. unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0);
  310. CI.BaseOff = std::min(CI.Offset0, CI.Offset1);
  311. if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) {
  312. CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64;
  313. CI.Offset1 = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64;
  314. CI.UseST64 = true;
  315. return true;
  316. }
  317. if (isUInt<8>(OffsetDiff)) {
  318. CI.Offset0 = EltOffset0 - CI.BaseOff / CI.EltSize;
  319. CI.Offset1 = EltOffset1 - CI.BaseOff / CI.EltSize;
  320. return true;
  321. }
  322. return false;
  323. }
  324. bool SILoadStoreOptimizer::widthsFit(const GCNSubtarget &STM,
  325. const CombineInfo &CI) {
  326. const unsigned Width = (CI.Width0 + CI.Width1);
  327. switch (CI.InstClass) {
  328. default:
  329. return (Width <= 4) && (STM.hasDwordx3LoadStores() || (Width != 3));
  330. case S_BUFFER_LOAD_IMM:
  331. switch (Width) {
  332. default:
  333. return false;
  334. case 2:
  335. case 4:
  336. return true;
  337. }
  338. }
  339. }
  340. unsigned SILoadStoreOptimizer::getOpcodeWidth(const MachineInstr &MI) {
  341. const unsigned Opc = MI.getOpcode();
  342. if (TII->isMUBUF(MI)) {
  343. return AMDGPU::getMUBUFDwords(Opc);
  344. }
  345. switch (Opc) {
  346. default:
  347. return 0;
  348. case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
  349. return 1;
  350. case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
  351. return 2;
  352. case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
  353. return 4;
  354. }
  355. }
  356. InstClassEnum SILoadStoreOptimizer::getInstClass(unsigned Opc) {
  357. if (TII->isMUBUF(Opc)) {
  358. const int baseOpcode = AMDGPU::getMUBUFBaseOpcode(Opc);
  359. // If we couldn't identify the opcode, bail out.
  360. if (baseOpcode == -1) {
  361. return UNKNOWN;
  362. }
  363. switch (baseOpcode) {
  364. default:
  365. return UNKNOWN;
  366. case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
  367. return BUFFER_LOAD_OFFEN;
  368. case AMDGPU::BUFFER_LOAD_DWORD_OFFSET:
  369. return BUFFER_LOAD_OFFSET;
  370. case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
  371. return BUFFER_STORE_OFFEN;
  372. case AMDGPU::BUFFER_STORE_DWORD_OFFSET:
  373. return BUFFER_STORE_OFFSET;
  374. case AMDGPU::BUFFER_LOAD_DWORD_OFFEN_exact:
  375. return BUFFER_LOAD_OFFEN_exact;
  376. case AMDGPU::BUFFER_LOAD_DWORD_OFFSET_exact:
  377. return BUFFER_LOAD_OFFSET_exact;
  378. case AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact:
  379. return BUFFER_STORE_OFFEN_exact;
  380. case AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact:
  381. return BUFFER_STORE_OFFSET_exact;
  382. }
  383. }
  384. switch (Opc) {
  385. default:
  386. return UNKNOWN;
  387. case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
  388. case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
  389. case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
  390. return S_BUFFER_LOAD_IMM;
  391. case AMDGPU::DS_READ_B32:
  392. case AMDGPU::DS_READ_B64:
  393. case AMDGPU::DS_READ_B32_gfx9:
  394. case AMDGPU::DS_READ_B64_gfx9:
  395. return DS_READ;
  396. case AMDGPU::DS_WRITE_B32:
  397. case AMDGPU::DS_WRITE_B64:
  398. case AMDGPU::DS_WRITE_B32_gfx9:
  399. case AMDGPU::DS_WRITE_B64_gfx9:
  400. return DS_WRITE;
  401. }
  402. }
  403. unsigned SILoadStoreOptimizer::getRegs(unsigned Opc) {
  404. if (TII->isMUBUF(Opc)) {
  405. unsigned result = 0;
  406. if (AMDGPU::getMUBUFHasVAddr(Opc)) {
  407. result |= VADDR;
  408. }
  409. if (AMDGPU::getMUBUFHasSrsrc(Opc)) {
  410. result |= SRSRC;
  411. }
  412. if (AMDGPU::getMUBUFHasSoffset(Opc)) {
  413. result |= SOFFSET;
  414. }
  415. return result;
  416. }
  417. switch (Opc) {
  418. default:
  419. return 0;
  420. case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
  421. case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
  422. case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
  423. return SBASE;
  424. case AMDGPU::DS_READ_B32:
  425. case AMDGPU::DS_READ_B64:
  426. case AMDGPU::DS_READ_B32_gfx9:
  427. case AMDGPU::DS_READ_B64_gfx9:
  428. case AMDGPU::DS_WRITE_B32:
  429. case AMDGPU::DS_WRITE_B64:
  430. case AMDGPU::DS_WRITE_B32_gfx9:
  431. case AMDGPU::DS_WRITE_B64_gfx9:
  432. return ADDR;
  433. }
  434. }
  435. bool SILoadStoreOptimizer::findMatchingInst(CombineInfo &CI) {
  436. MachineBasicBlock *MBB = CI.I->getParent();
  437. MachineBasicBlock::iterator E = MBB->end();
  438. MachineBasicBlock::iterator MBBI = CI.I;
  439. const unsigned Opc = CI.I->getOpcode();
  440. const InstClassEnum InstClass = getInstClass(Opc);
  441. if (InstClass == UNKNOWN) {
  442. return false;
  443. }
  444. const unsigned Regs = getRegs(Opc);
  445. unsigned AddrOpName[5] = {0};
  446. int AddrIdx[5];
  447. const MachineOperand *AddrReg[5];
  448. unsigned NumAddresses = 0;
  449. if (Regs & ADDR) {
  450. AddrOpName[NumAddresses++] = AMDGPU::OpName::addr;
  451. }
  452. if (Regs & SBASE) {
  453. AddrOpName[NumAddresses++] = AMDGPU::OpName::sbase;
  454. }
  455. if (Regs & SRSRC) {
  456. AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc;
  457. }
  458. if (Regs & SOFFSET) {
  459. AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset;
  460. }
  461. if (Regs & VADDR) {
  462. AddrOpName[NumAddresses++] = AMDGPU::OpName::vaddr;
  463. }
  464. for (unsigned i = 0; i < NumAddresses; i++) {
  465. AddrIdx[i] = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AddrOpName[i]);
  466. AddrReg[i] = &CI.I->getOperand(AddrIdx[i]);
  467. // We only ever merge operations with the same base address register, so
  468. // don't bother scanning forward if there are no other uses.
  469. if (AddrReg[i]->isReg() &&
  470. (TargetRegisterInfo::isPhysicalRegister(AddrReg[i]->getReg()) ||
  471. MRI->hasOneNonDBGUse(AddrReg[i]->getReg())))
  472. return false;
  473. }
  474. ++MBBI;
  475. DenseSet<unsigned> RegDefsToMove;
  476. DenseSet<unsigned> PhysRegUsesToMove;
  477. addDefsUsesToList(*CI.I, RegDefsToMove, PhysRegUsesToMove);
  478. for (; MBBI != E; ++MBBI) {
  479. const bool IsDS = (InstClass == DS_READ) || (InstClass == DS_WRITE);
  480. if ((getInstClass(MBBI->getOpcode()) != InstClass) ||
  481. (IsDS && (MBBI->getOpcode() != Opc))) {
  482. // This is not a matching DS instruction, but we can keep looking as
  483. // long as one of these conditions are met:
  484. // 1. It is safe to move I down past MBBI.
  485. // 2. It is safe to move MBBI down past the instruction that I will
  486. // be merged into.
  487. if (MBBI->hasUnmodeledSideEffects()) {
  488. // We can't re-order this instruction with respect to other memory
  489. // operations, so we fail both conditions mentioned above.
  490. return false;
  491. }
  492. if (MBBI->mayLoadOrStore() &&
  493. (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) ||
  494. !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))) {
  495. // We fail condition #1, but we may still be able to satisfy condition
  496. // #2. Add this instruction to the move list and then we will check
  497. // if condition #2 holds once we have selected the matching instruction.
  498. CI.InstsToMove.push_back(&*MBBI);
  499. addDefsUsesToList(*MBBI, RegDefsToMove, PhysRegUsesToMove);
  500. continue;
  501. }
  502. // When we match I with another DS instruction we will be moving I down
  503. // to the location of the matched instruction any uses of I will need to
  504. // be moved down as well.
  505. addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
  506. CI.InstsToMove);
  507. continue;
  508. }
  509. // Don't merge volatiles.
  510. if (MBBI->hasOrderedMemoryRef())
  511. return false;
  512. // Handle a case like
  513. // DS_WRITE_B32 addr, v, idx0
  514. // w = DS_READ_B32 addr, idx0
  515. // DS_WRITE_B32 addr, f(w), idx1
  516. // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
  517. // merging of the two writes.
  518. if (addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
  519. CI.InstsToMove))
  520. continue;
  521. bool Match = true;
  522. for (unsigned i = 0; i < NumAddresses; i++) {
  523. const MachineOperand &AddrRegNext = MBBI->getOperand(AddrIdx[i]);
  524. if (AddrReg[i]->isImm() || AddrRegNext.isImm()) {
  525. if (AddrReg[i]->isImm() != AddrRegNext.isImm() ||
  526. AddrReg[i]->getImm() != AddrRegNext.getImm()) {
  527. Match = false;
  528. break;
  529. }
  530. continue;
  531. }
  532. // Check same base pointer. Be careful of subregisters, which can occur
  533. // with vectors of pointers.
  534. if (AddrReg[i]->getReg() != AddrRegNext.getReg() ||
  535. AddrReg[i]->getSubReg() != AddrRegNext.getSubReg()) {
  536. Match = false;
  537. break;
  538. }
  539. }
  540. if (Match) {
  541. int OffsetIdx =
  542. AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AMDGPU::OpName::offset);
  543. CI.Offset0 = CI.I->getOperand(OffsetIdx).getImm();
  544. CI.Width0 = getOpcodeWidth(*CI.I);
  545. CI.Offset1 = MBBI->getOperand(OffsetIdx).getImm();
  546. CI.Width1 = getOpcodeWidth(*MBBI);
  547. CI.Paired = MBBI;
  548. if ((CI.InstClass == DS_READ) || (CI.InstClass == DS_WRITE)) {
  549. CI.Offset0 &= 0xffff;
  550. CI.Offset1 &= 0xffff;
  551. } else {
  552. CI.GLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::glc)->getImm();
  553. CI.GLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::glc)->getImm();
  554. if (CI.InstClass != S_BUFFER_LOAD_IMM) {
  555. CI.SLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::slc)->getImm();
  556. CI.SLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::slc)->getImm();
  557. }
  558. CI.DLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::dlc)->getImm();
  559. CI.DLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::dlc)->getImm();
  560. }
  561. // Check both offsets fit in the reduced range.
  562. // We also need to go through the list of instructions that we plan to
  563. // move and make sure they are all safe to move down past the merged
  564. // instruction.
  565. if (widthsFit(*STM, CI) && offsetsCanBeCombined(CI))
  566. if (canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))
  567. return true;
  568. }
  569. // We've found a load/store that we couldn't merge for some reason.
  570. // We could potentially keep looking, but we'd need to make sure that
  571. // it was safe to move I and also all the instruction in InstsToMove
  572. // down past this instruction.
  573. // check if we can move I across MBBI and if we can move all I's users
  574. if (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) ||
  575. !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))
  576. break;
  577. }
  578. return false;
  579. }
  580. unsigned SILoadStoreOptimizer::read2Opcode(unsigned EltSize) const {
  581. if (STM->ldsRequiresM0Init())
  582. return (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64;
  583. return (EltSize == 4) ? AMDGPU::DS_READ2_B32_gfx9 : AMDGPU::DS_READ2_B64_gfx9;
  584. }
  585. unsigned SILoadStoreOptimizer::read2ST64Opcode(unsigned EltSize) const {
  586. if (STM->ldsRequiresM0Init())
  587. return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64;
  588. return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32_gfx9
  589. : AMDGPU::DS_READ2ST64_B64_gfx9;
  590. }
  591. MachineBasicBlock::iterator
  592. SILoadStoreOptimizer::mergeRead2Pair(CombineInfo &CI) {
  593. MachineBasicBlock *MBB = CI.I->getParent();
  594. // Be careful, since the addresses could be subregisters themselves in weird
  595. // cases, like vectors of pointers.
  596. const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
  597. const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst);
  598. const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdst);
  599. unsigned NewOffset0 = CI.Offset0;
  600. unsigned NewOffset1 = CI.Offset1;
  601. unsigned Opc =
  602. CI.UseST64 ? read2ST64Opcode(CI.EltSize) : read2Opcode(CI.EltSize);
  603. unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
  604. unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
  605. if (NewOffset0 > NewOffset1) {
  606. // Canonicalize the merged instruction so the smaller offset comes first.
  607. std::swap(NewOffset0, NewOffset1);
  608. std::swap(SubRegIdx0, SubRegIdx1);
  609. }
  610. assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
  611. (NewOffset0 != NewOffset1) && "Computed offset doesn't fit");
  612. const MCInstrDesc &Read2Desc = TII->get(Opc);
  613. const TargetRegisterClass *SuperRC =
  614. (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
  615. unsigned DestReg = MRI->createVirtualRegister(SuperRC);
  616. DebugLoc DL = CI.I->getDebugLoc();
  617. unsigned BaseReg = AddrReg->getReg();
  618. unsigned BaseSubReg = AddrReg->getSubReg();
  619. unsigned BaseRegFlags = 0;
  620. if (CI.BaseOff) {
  621. unsigned ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
  622. BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
  623. .addImm(CI.BaseOff);
  624. BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
  625. BaseRegFlags = RegState::Kill;
  626. TII->getAddNoCarry(*MBB, CI.Paired, DL, BaseReg)
  627. .addReg(ImmReg)
  628. .addReg(AddrReg->getReg(), 0, BaseSubReg)
  629. .addImm(0); // clamp bit
  630. BaseSubReg = 0;
  631. }
  632. MachineInstrBuilder Read2 =
  633. BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
  634. .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr
  635. .addImm(NewOffset0) // offset0
  636. .addImm(NewOffset1) // offset1
  637. .addImm(0) // gds
  638. .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
  639. (void)Read2;
  640. const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
  641. // Copy to the old destination registers.
  642. BuildMI(*MBB, CI.Paired, DL, CopyDesc)
  643. .add(*Dest0) // Copy to same destination including flags and sub reg.
  644. .addReg(DestReg, 0, SubRegIdx0);
  645. MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
  646. .add(*Dest1)
  647. .addReg(DestReg, RegState::Kill, SubRegIdx1);
  648. moveInstsAfter(Copy1, CI.InstsToMove);
  649. MachineBasicBlock::iterator Next = std::next(CI.I);
  650. CI.I->eraseFromParent();
  651. CI.Paired->eraseFromParent();
  652. LLVM_DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
  653. return Next;
  654. }
  655. unsigned SILoadStoreOptimizer::write2Opcode(unsigned EltSize) const {
  656. if (STM->ldsRequiresM0Init())
  657. return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64;
  658. return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32_gfx9
  659. : AMDGPU::DS_WRITE2_B64_gfx9;
  660. }
  661. unsigned SILoadStoreOptimizer::write2ST64Opcode(unsigned EltSize) const {
  662. if (STM->ldsRequiresM0Init())
  663. return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32
  664. : AMDGPU::DS_WRITE2ST64_B64;
  665. return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32_gfx9
  666. : AMDGPU::DS_WRITE2ST64_B64_gfx9;
  667. }
  668. MachineBasicBlock::iterator
  669. SILoadStoreOptimizer::mergeWrite2Pair(CombineInfo &CI) {
  670. MachineBasicBlock *MBB = CI.I->getParent();
  671. // Be sure to use .addOperand(), and not .addReg() with these. We want to be
  672. // sure we preserve the subregister index and any register flags set on them.
  673. const MachineOperand *AddrReg =
  674. TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
  675. const MachineOperand *Data0 =
  676. TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0);
  677. const MachineOperand *Data1 =
  678. TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::data0);
  679. unsigned NewOffset0 = CI.Offset0;
  680. unsigned NewOffset1 = CI.Offset1;
  681. unsigned Opc =
  682. CI.UseST64 ? write2ST64Opcode(CI.EltSize) : write2Opcode(CI.EltSize);
  683. if (NewOffset0 > NewOffset1) {
  684. // Canonicalize the merged instruction so the smaller offset comes first.
  685. std::swap(NewOffset0, NewOffset1);
  686. std::swap(Data0, Data1);
  687. }
  688. assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
  689. (NewOffset0 != NewOffset1) && "Computed offset doesn't fit");
  690. const MCInstrDesc &Write2Desc = TII->get(Opc);
  691. DebugLoc DL = CI.I->getDebugLoc();
  692. unsigned BaseReg = AddrReg->getReg();
  693. unsigned BaseSubReg = AddrReg->getSubReg();
  694. unsigned BaseRegFlags = 0;
  695. if (CI.BaseOff) {
  696. unsigned ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
  697. BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
  698. .addImm(CI.BaseOff);
  699. BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
  700. BaseRegFlags = RegState::Kill;
  701. TII->getAddNoCarry(*MBB, CI.Paired, DL, BaseReg)
  702. .addReg(ImmReg)
  703. .addReg(AddrReg->getReg(), 0, BaseSubReg)
  704. .addImm(0); // clamp bit
  705. BaseSubReg = 0;
  706. }
  707. MachineInstrBuilder Write2 =
  708. BuildMI(*MBB, CI.Paired, DL, Write2Desc)
  709. .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr
  710. .add(*Data0) // data0
  711. .add(*Data1) // data1
  712. .addImm(NewOffset0) // offset0
  713. .addImm(NewOffset1) // offset1
  714. .addImm(0) // gds
  715. .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
  716. moveInstsAfter(Write2, CI.InstsToMove);
  717. MachineBasicBlock::iterator Next = std::next(CI.I);
  718. CI.I->eraseFromParent();
  719. CI.Paired->eraseFromParent();
  720. LLVM_DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
  721. return Next;
  722. }
  723. MachineBasicBlock::iterator
  724. SILoadStoreOptimizer::mergeSBufferLoadImmPair(CombineInfo &CI) {
  725. MachineBasicBlock *MBB = CI.I->getParent();
  726. DebugLoc DL = CI.I->getDebugLoc();
  727. const unsigned Opcode = getNewOpcode(CI);
  728. const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
  729. unsigned DestReg = MRI->createVirtualRegister(SuperRC);
  730. unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
  731. BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg)
  732. .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::sbase))
  733. .addImm(MergedOffset) // offset
  734. .addImm(CI.GLC0) // glc
  735. .addImm(CI.DLC0) // dlc
  736. .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
  737. std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
  738. const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
  739. const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
  740. // Copy to the old destination registers.
  741. const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
  742. const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::sdst);
  743. const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::sdst);
  744. BuildMI(*MBB, CI.Paired, DL, CopyDesc)
  745. .add(*Dest0) // Copy to same destination including flags and sub reg.
  746. .addReg(DestReg, 0, SubRegIdx0);
  747. MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
  748. .add(*Dest1)
  749. .addReg(DestReg, RegState::Kill, SubRegIdx1);
  750. moveInstsAfter(Copy1, CI.InstsToMove);
  751. MachineBasicBlock::iterator Next = std::next(CI.I);
  752. CI.I->eraseFromParent();
  753. CI.Paired->eraseFromParent();
  754. return Next;
  755. }
  756. MachineBasicBlock::iterator
  757. SILoadStoreOptimizer::mergeBufferLoadPair(CombineInfo &CI) {
  758. MachineBasicBlock *MBB = CI.I->getParent();
  759. DebugLoc DL = CI.I->getDebugLoc();
  760. const unsigned Opcode = getNewOpcode(CI);
  761. const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
  762. // Copy to the new source register.
  763. unsigned DestReg = MRI->createVirtualRegister(SuperRC);
  764. unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
  765. auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg);
  766. const unsigned Regs = getRegs(Opcode);
  767. if (Regs & VADDR)
  768. MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
  769. MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
  770. .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
  771. .addImm(MergedOffset) // offset
  772. .addImm(CI.GLC0) // glc
  773. .addImm(CI.SLC0) // slc
  774. .addImm(0) // tfe
  775. .addImm(CI.DLC0) // dlc
  776. .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
  777. std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
  778. const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
  779. const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
  780. // Copy to the old destination registers.
  781. const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
  782. const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
  783. const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
  784. BuildMI(*MBB, CI.Paired, DL, CopyDesc)
  785. .add(*Dest0) // Copy to same destination including flags and sub reg.
  786. .addReg(DestReg, 0, SubRegIdx0);
  787. MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
  788. .add(*Dest1)
  789. .addReg(DestReg, RegState::Kill, SubRegIdx1);
  790. moveInstsAfter(Copy1, CI.InstsToMove);
  791. MachineBasicBlock::iterator Next = std::next(CI.I);
  792. CI.I->eraseFromParent();
  793. CI.Paired->eraseFromParent();
  794. return Next;
  795. }
  796. unsigned SILoadStoreOptimizer::getNewOpcode(const CombineInfo &CI) {
  797. const unsigned Width = CI.Width0 + CI.Width1;
  798. switch (CI.InstClass) {
  799. default:
  800. return AMDGPU::getMUBUFOpcode(CI.InstClass, Width);
  801. case UNKNOWN:
  802. llvm_unreachable("Unknown instruction class");
  803. case S_BUFFER_LOAD_IMM:
  804. switch (Width) {
  805. default:
  806. return 0;
  807. case 2:
  808. return AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
  809. case 4:
  810. return AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM;
  811. }
  812. }
  813. }
  814. std::pair<unsigned, unsigned>
  815. SILoadStoreOptimizer::getSubRegIdxs(const CombineInfo &CI) {
  816. if (CI.Offset0 > CI.Offset1) {
  817. switch (CI.Width0) {
  818. default:
  819. return std::make_pair(0, 0);
  820. case 1:
  821. switch (CI.Width1) {
  822. default:
  823. return std::make_pair(0, 0);
  824. case 1:
  825. return std::make_pair(AMDGPU::sub1, AMDGPU::sub0);
  826. case 2:
  827. return std::make_pair(AMDGPU::sub2, AMDGPU::sub0_sub1);
  828. case 3:
  829. return std::make_pair(AMDGPU::sub3, AMDGPU::sub0_sub1_sub2);
  830. }
  831. case 2:
  832. switch (CI.Width1) {
  833. default:
  834. return std::make_pair(0, 0);
  835. case 1:
  836. return std::make_pair(AMDGPU::sub1_sub2, AMDGPU::sub0);
  837. case 2:
  838. return std::make_pair(AMDGPU::sub2_sub3, AMDGPU::sub0_sub1);
  839. }
  840. case 3:
  841. switch (CI.Width1) {
  842. default:
  843. return std::make_pair(0, 0);
  844. case 1:
  845. return std::make_pair(AMDGPU::sub1_sub2_sub3, AMDGPU::sub0);
  846. }
  847. }
  848. } else {
  849. switch (CI.Width0) {
  850. default:
  851. return std::make_pair(0, 0);
  852. case 1:
  853. switch (CI.Width1) {
  854. default:
  855. return std::make_pair(0, 0);
  856. case 1:
  857. return std::make_pair(AMDGPU::sub0, AMDGPU::sub1);
  858. case 2:
  859. return std::make_pair(AMDGPU::sub0, AMDGPU::sub1_sub2);
  860. case 3:
  861. return std::make_pair(AMDGPU::sub0, AMDGPU::sub1_sub2_sub3);
  862. }
  863. case 2:
  864. switch (CI.Width1) {
  865. default:
  866. return std::make_pair(0, 0);
  867. case 1:
  868. return std::make_pair(AMDGPU::sub0_sub1, AMDGPU::sub2);
  869. case 2:
  870. return std::make_pair(AMDGPU::sub0_sub1, AMDGPU::sub2_sub3);
  871. }
  872. case 3:
  873. switch (CI.Width1) {
  874. default:
  875. return std::make_pair(0, 0);
  876. case 1:
  877. return std::make_pair(AMDGPU::sub0_sub1_sub2, AMDGPU::sub3);
  878. }
  879. }
  880. }
  881. }
  882. const TargetRegisterClass *
  883. SILoadStoreOptimizer::getTargetRegisterClass(const CombineInfo &CI) {
  884. if (CI.InstClass == S_BUFFER_LOAD_IMM) {
  885. switch (CI.Width0 + CI.Width1) {
  886. default:
  887. return nullptr;
  888. case 2:
  889. return &AMDGPU::SReg_64_XEXECRegClass;
  890. case 4:
  891. return &AMDGPU::SReg_128RegClass;
  892. case 8:
  893. return &AMDGPU::SReg_256RegClass;
  894. case 16:
  895. return &AMDGPU::SReg_512RegClass;
  896. }
  897. } else {
  898. switch (CI.Width0 + CI.Width1) {
  899. default:
  900. return nullptr;
  901. case 2:
  902. return &AMDGPU::VReg_64RegClass;
  903. case 3:
  904. return &AMDGPU::VReg_96RegClass;
  905. case 4:
  906. return &AMDGPU::VReg_128RegClass;
  907. }
  908. }
  909. }
  910. MachineBasicBlock::iterator
  911. SILoadStoreOptimizer::mergeBufferStorePair(CombineInfo &CI) {
  912. MachineBasicBlock *MBB = CI.I->getParent();
  913. DebugLoc DL = CI.I->getDebugLoc();
  914. const unsigned Opcode = getNewOpcode(CI);
  915. std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
  916. const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
  917. const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
  918. // Copy to the new source register.
  919. const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
  920. unsigned SrcReg = MRI->createVirtualRegister(SuperRC);
  921. const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
  922. const auto *Src1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
  923. BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg)
  924. .add(*Src0)
  925. .addImm(SubRegIdx0)
  926. .add(*Src1)
  927. .addImm(SubRegIdx1);
  928. auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode))
  929. .addReg(SrcReg, RegState::Kill);
  930. const unsigned Regs = getRegs(Opcode);
  931. if (Regs & VADDR)
  932. MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
  933. MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
  934. .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
  935. .addImm(std::min(CI.Offset0, CI.Offset1)) // offset
  936. .addImm(CI.GLC0) // glc
  937. .addImm(CI.SLC0) // slc
  938. .addImm(0) // tfe
  939. .addImm(CI.DLC0) // dlc
  940. .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
  941. moveInstsAfter(MIB, CI.InstsToMove);
  942. MachineBasicBlock::iterator Next = std::next(CI.I);
  943. CI.I->eraseFromParent();
  944. CI.Paired->eraseFromParent();
  945. return Next;
  946. }
  947. MachineOperand
  948. SILoadStoreOptimizer::createRegOrImm(int32_t Val, MachineInstr &MI) {
  949. APInt V(32, Val, true);
  950. if (TII->isInlineConstant(V))
  951. return MachineOperand::CreateImm(Val);
  952. unsigned Reg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
  953. MachineInstr *Mov =
  954. BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
  955. TII->get(AMDGPU::S_MOV_B32), Reg)
  956. .addImm(Val);
  957. (void)Mov;
  958. LLVM_DEBUG(dbgs() << " "; Mov->dump());
  959. return MachineOperand::CreateReg(Reg, false);
  960. }
  961. // Compute base address using Addr and return the final register.
  962. unsigned SILoadStoreOptimizer::computeBase(MachineInstr &MI,
  963. const MemAddress &Addr) {
  964. MachineBasicBlock *MBB = MI.getParent();
  965. MachineBasicBlock::iterator MBBI = MI.getIterator();
  966. DebugLoc DL = MI.getDebugLoc();
  967. assert((TRI->getRegSizeInBits(Addr.Base.LoReg, *MRI) == 32 ||
  968. Addr.Base.LoSubReg) &&
  969. "Expected 32-bit Base-Register-Low!!");
  970. assert((TRI->getRegSizeInBits(Addr.Base.HiReg, *MRI) == 32 ||
  971. Addr.Base.HiSubReg) &&
  972. "Expected 32-bit Base-Register-Hi!!");
  973. LLVM_DEBUG(dbgs() << " Re-Computed Anchor-Base:\n");
  974. MachineOperand OffsetLo = createRegOrImm(static_cast<int32_t>(Addr.Offset), MI);
  975. MachineOperand OffsetHi =
  976. createRegOrImm(static_cast<int32_t>(Addr.Offset >> 32), MI);
  977. unsigned CarryReg = MRI->createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
  978. unsigned DeadCarryReg =
  979. MRI->createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
  980. unsigned DestSub0 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
  981. unsigned DestSub1 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
  982. MachineInstr *LoHalf =
  983. BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADD_I32_e64), DestSub0)
  984. .addReg(CarryReg, RegState::Define)
  985. .addReg(Addr.Base.LoReg, 0, Addr.Base.LoSubReg)
  986. .add(OffsetLo)
  987. .addImm(0); // clamp bit
  988. (void)LoHalf;
  989. LLVM_DEBUG(dbgs() << " "; LoHalf->dump(););
  990. MachineInstr *HiHalf =
  991. BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADDC_U32_e64), DestSub1)
  992. .addReg(DeadCarryReg, RegState::Define | RegState::Dead)
  993. .addReg(Addr.Base.HiReg, 0, Addr.Base.HiSubReg)
  994. .add(OffsetHi)
  995. .addReg(CarryReg, RegState::Kill)
  996. .addImm(0); // clamp bit
  997. (void)HiHalf;
  998. LLVM_DEBUG(dbgs() << " "; HiHalf->dump(););
  999. unsigned FullDestReg = MRI->createVirtualRegister(&AMDGPU::VReg_64RegClass);
  1000. MachineInstr *FullBase =
  1001. BuildMI(*MBB, MBBI, DL, TII->get(TargetOpcode::REG_SEQUENCE), FullDestReg)
  1002. .addReg(DestSub0)
  1003. .addImm(AMDGPU::sub0)
  1004. .addReg(DestSub1)
  1005. .addImm(AMDGPU::sub1);
  1006. (void)FullBase;
  1007. LLVM_DEBUG(dbgs() << " "; FullBase->dump(); dbgs() << "\n";);
  1008. return FullDestReg;
  1009. }
  1010. // Update base and offset with the NewBase and NewOffset in MI.
  1011. void SILoadStoreOptimizer::updateBaseAndOffset(MachineInstr &MI,
  1012. unsigned NewBase,
  1013. int32_t NewOffset) {
  1014. TII->getNamedOperand(MI, AMDGPU::OpName::vaddr)->setReg(NewBase);
  1015. TII->getNamedOperand(MI, AMDGPU::OpName::offset)->setImm(NewOffset);
  1016. }
  1017. Optional<int32_t>
  1018. SILoadStoreOptimizer::extractConstOffset(const MachineOperand &Op) {
  1019. if (Op.isImm())
  1020. return Op.getImm();
  1021. if (!Op.isReg())
  1022. return None;
  1023. MachineInstr *Def = MRI->getUniqueVRegDef(Op.getReg());
  1024. if (!Def || Def->getOpcode() != AMDGPU::S_MOV_B32 ||
  1025. !Def->getOperand(1).isImm())
  1026. return None;
  1027. return Def->getOperand(1).getImm();
  1028. }
  1029. // Analyze Base and extracts:
  1030. // - 32bit base registers, subregisters
  1031. // - 64bit constant offset
  1032. // Expecting base computation as:
  1033. // %OFFSET0:sgpr_32 = S_MOV_B32 8000
  1034. // %LO:vgpr_32, %c:sreg_64_xexec =
  1035. // V_ADD_I32_e64 %BASE_LO:vgpr_32, %103:sgpr_32,
  1036. // %HI:vgpr_32, = V_ADDC_U32_e64 %BASE_HI:vgpr_32, 0, killed %c:sreg_64_xexec
  1037. // %Base:vreg_64 =
  1038. // REG_SEQUENCE %LO:vgpr_32, %subreg.sub0, %HI:vgpr_32, %subreg.sub1
  1039. void SILoadStoreOptimizer::processBaseWithConstOffset(const MachineOperand &Base,
  1040. MemAddress &Addr) {
  1041. if (!Base.isReg())
  1042. return;
  1043. MachineInstr *Def = MRI->getUniqueVRegDef(Base.getReg());
  1044. if (!Def || Def->getOpcode() != AMDGPU::REG_SEQUENCE
  1045. || Def->getNumOperands() != 5)
  1046. return;
  1047. MachineOperand BaseLo = Def->getOperand(1);
  1048. MachineOperand BaseHi = Def->getOperand(3);
  1049. if (!BaseLo.isReg() || !BaseHi.isReg())
  1050. return;
  1051. MachineInstr *BaseLoDef = MRI->getUniqueVRegDef(BaseLo.getReg());
  1052. MachineInstr *BaseHiDef = MRI->getUniqueVRegDef(BaseHi.getReg());
  1053. if (!BaseLoDef || BaseLoDef->getOpcode() != AMDGPU::V_ADD_I32_e64 ||
  1054. !BaseHiDef || BaseHiDef->getOpcode() != AMDGPU::V_ADDC_U32_e64)
  1055. return;
  1056. const auto *Src0 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src0);
  1057. const auto *Src1 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src1);
  1058. auto Offset0P = extractConstOffset(*Src0);
  1059. if (Offset0P)
  1060. BaseLo = *Src1;
  1061. else {
  1062. if (!(Offset0P = extractConstOffset(*Src1)))
  1063. return;
  1064. BaseLo = *Src0;
  1065. }
  1066. Src0 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src0);
  1067. Src1 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src1);
  1068. if (Src0->isImm())
  1069. std::swap(Src0, Src1);
  1070. if (!Src1->isImm())
  1071. return;
  1072. uint64_t Offset1 = Src1->getImm();
  1073. BaseHi = *Src0;
  1074. Addr.Base.LoReg = BaseLo.getReg();
  1075. Addr.Base.HiReg = BaseHi.getReg();
  1076. Addr.Base.LoSubReg = BaseLo.getSubReg();
  1077. Addr.Base.HiSubReg = BaseHi.getSubReg();
  1078. Addr.Offset = (*Offset0P & 0x00000000ffffffff) | (Offset1 << 32);
  1079. }
  1080. bool SILoadStoreOptimizer::promoteConstantOffsetToImm(
  1081. MachineInstr &MI,
  1082. MemInfoMap &Visited,
  1083. SmallPtrSet<MachineInstr *, 4> &AnchorList) {
  1084. // TODO: Support flat and scratch.
  1085. if (AMDGPU::getGlobalSaddrOp(MI.getOpcode()) < 0 ||
  1086. TII->getNamedOperand(MI, AMDGPU::OpName::vdata) != NULL)
  1087. return false;
  1088. // TODO: Support Store.
  1089. if (!MI.mayLoad())
  1090. return false;
  1091. if (AnchorList.count(&MI))
  1092. return false;
  1093. LLVM_DEBUG(dbgs() << "\nTryToPromoteConstantOffsetToImmFor "; MI.dump());
  1094. if (TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm()) {
  1095. LLVM_DEBUG(dbgs() << " Const-offset is already promoted.\n";);
  1096. return false;
  1097. }
  1098. // Step1: Find the base-registers and a 64bit constant offset.
  1099. MachineOperand &Base = *TII->getNamedOperand(MI, AMDGPU::OpName::vaddr);
  1100. MemAddress MAddr;
  1101. if (Visited.find(&MI) == Visited.end()) {
  1102. processBaseWithConstOffset(Base, MAddr);
  1103. Visited[&MI] = MAddr;
  1104. } else
  1105. MAddr = Visited[&MI];
  1106. if (MAddr.Offset == 0) {
  1107. LLVM_DEBUG(dbgs() << " Failed to extract constant-offset or there are no"
  1108. " constant offsets that can be promoted.\n";);
  1109. return false;
  1110. }
  1111. LLVM_DEBUG(dbgs() << " BASE: {" << MAddr.Base.HiReg << ", "
  1112. << MAddr.Base.LoReg << "} Offset: " << MAddr.Offset << "\n\n";);
  1113. // Step2: Traverse through MI's basic block and find an anchor(that has the
  1114. // same base-registers) with the highest 13bit distance from MI's offset.
  1115. // E.g. (64bit loads)
  1116. // bb:
  1117. // addr1 = &a + 4096; load1 = load(addr1, 0)
  1118. // addr2 = &a + 6144; load2 = load(addr2, 0)
  1119. // addr3 = &a + 8192; load3 = load(addr3, 0)
  1120. // addr4 = &a + 10240; load4 = load(addr4, 0)
  1121. // addr5 = &a + 12288; load5 = load(addr5, 0)
  1122. //
  1123. // Starting from the first load, the optimization will try to find a new base
  1124. // from which (&a + 4096) has 13 bit distance. Both &a + 6144 and &a + 8192
  1125. // has 13bit distance from &a + 4096. The heuristic considers &a + 8192
  1126. // as the new-base(anchor) because of the maximum distance which can
  1127. // accomodate more intermediate bases presumeably.
  1128. //
  1129. // Step3: move (&a + 8192) above load1. Compute and promote offsets from
  1130. // (&a + 8192) for load1, load2, load4.
  1131. // addr = &a + 8192
  1132. // load1 = load(addr, -4096)
  1133. // load2 = load(addr, -2048)
  1134. // load3 = load(addr, 0)
  1135. // load4 = load(addr, 2048)
  1136. // addr5 = &a + 12288; load5 = load(addr5, 0)
  1137. //
  1138. MachineInstr *AnchorInst = nullptr;
  1139. MemAddress AnchorAddr;
  1140. uint32_t MaxDist = std::numeric_limits<uint32_t>::min();
  1141. SmallVector<std::pair<MachineInstr *, int64_t>, 4> InstsWCommonBase;
  1142. MachineBasicBlock *MBB = MI.getParent();
  1143. MachineBasicBlock::iterator E = MBB->end();
  1144. MachineBasicBlock::iterator MBBI = MI.getIterator();
  1145. ++MBBI;
  1146. const SITargetLowering *TLI =
  1147. static_cast<const SITargetLowering *>(STM->getTargetLowering());
  1148. for ( ; MBBI != E; ++MBBI) {
  1149. MachineInstr &MINext = *MBBI;
  1150. // TODO: Support finding an anchor(with same base) from store addresses or
  1151. // any other load addresses where the opcodes are different.
  1152. if (MINext.getOpcode() != MI.getOpcode() ||
  1153. TII->getNamedOperand(MINext, AMDGPU::OpName::offset)->getImm())
  1154. continue;
  1155. const MachineOperand &BaseNext =
  1156. *TII->getNamedOperand(MINext, AMDGPU::OpName::vaddr);
  1157. MemAddress MAddrNext;
  1158. if (Visited.find(&MINext) == Visited.end()) {
  1159. processBaseWithConstOffset(BaseNext, MAddrNext);
  1160. Visited[&MINext] = MAddrNext;
  1161. } else
  1162. MAddrNext = Visited[&MINext];
  1163. if (MAddrNext.Base.LoReg != MAddr.Base.LoReg ||
  1164. MAddrNext.Base.HiReg != MAddr.Base.HiReg ||
  1165. MAddrNext.Base.LoSubReg != MAddr.Base.LoSubReg ||
  1166. MAddrNext.Base.HiSubReg != MAddr.Base.HiSubReg)
  1167. continue;
  1168. InstsWCommonBase.push_back(std::make_pair(&MINext, MAddrNext.Offset));
  1169. int64_t Dist = MAddr.Offset - MAddrNext.Offset;
  1170. TargetLoweringBase::AddrMode AM;
  1171. AM.HasBaseReg = true;
  1172. AM.BaseOffs = Dist;
  1173. if (TLI->isLegalGlobalAddressingMode(AM) &&
  1174. (uint32_t)std::abs(Dist) > MaxDist) {
  1175. MaxDist = std::abs(Dist);
  1176. AnchorAddr = MAddrNext;
  1177. AnchorInst = &MINext;
  1178. }
  1179. }
  1180. if (AnchorInst) {
  1181. LLVM_DEBUG(dbgs() << " Anchor-Inst(with max-distance from Offset): ";
  1182. AnchorInst->dump());
  1183. LLVM_DEBUG(dbgs() << " Anchor-Offset from BASE: "
  1184. << AnchorAddr.Offset << "\n\n");
  1185. // Instead of moving up, just re-compute anchor-instruction's base address.
  1186. unsigned Base = computeBase(MI, AnchorAddr);
  1187. updateBaseAndOffset(MI, Base, MAddr.Offset - AnchorAddr.Offset);
  1188. LLVM_DEBUG(dbgs() << " After promotion: "; MI.dump(););
  1189. for (auto P : InstsWCommonBase) {
  1190. TargetLoweringBase::AddrMode AM;
  1191. AM.HasBaseReg = true;
  1192. AM.BaseOffs = P.second - AnchorAddr.Offset;
  1193. if (TLI->isLegalGlobalAddressingMode(AM)) {
  1194. LLVM_DEBUG(dbgs() << " Promote Offset(" << P.second;
  1195. dbgs() << ")"; P.first->dump());
  1196. updateBaseAndOffset(*P.first, Base, P.second - AnchorAddr.Offset);
  1197. LLVM_DEBUG(dbgs() << " After promotion: "; P.first->dump());
  1198. }
  1199. }
  1200. AnchorList.insert(AnchorInst);
  1201. return true;
  1202. }
  1203. return false;
  1204. }
  1205. // Scan through looking for adjacent LDS operations with constant offsets from
  1206. // the same base register. We rely on the scheduler to do the hard work of
  1207. // clustering nearby loads, and assume these are all adjacent.
  1208. bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
  1209. bool Modified = false;
  1210. // Contain the list
  1211. MemInfoMap Visited;
  1212. // Contains the list of instructions for which constant offsets are being
  1213. // promoted to the IMM.
  1214. SmallPtrSet<MachineInstr *, 4> AnchorList;
  1215. for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
  1216. MachineInstr &MI = *I;
  1217. if (promoteConstantOffsetToImm(MI, Visited, AnchorList))
  1218. Modified = true;
  1219. // Don't combine if volatile.
  1220. if (MI.hasOrderedMemoryRef()) {
  1221. ++I;
  1222. continue;
  1223. }
  1224. const unsigned Opc = MI.getOpcode();
  1225. CombineInfo CI;
  1226. CI.I = I;
  1227. CI.InstClass = getInstClass(Opc);
  1228. switch (CI.InstClass) {
  1229. default:
  1230. break;
  1231. case DS_READ:
  1232. CI.EltSize =
  1233. (Opc == AMDGPU::DS_READ_B64 || Opc == AMDGPU::DS_READ_B64_gfx9) ? 8
  1234. : 4;
  1235. if (findMatchingInst(CI)) {
  1236. Modified = true;
  1237. I = mergeRead2Pair(CI);
  1238. } else {
  1239. ++I;
  1240. }
  1241. continue;
  1242. case DS_WRITE:
  1243. CI.EltSize =
  1244. (Opc == AMDGPU::DS_WRITE_B64 || Opc == AMDGPU::DS_WRITE_B64_gfx9) ? 8
  1245. : 4;
  1246. if (findMatchingInst(CI)) {
  1247. Modified = true;
  1248. I = mergeWrite2Pair(CI);
  1249. } else {
  1250. ++I;
  1251. }
  1252. continue;
  1253. case S_BUFFER_LOAD_IMM:
  1254. CI.EltSize = AMDGPU::getSMRDEncodedOffset(*STM, 4);
  1255. if (findMatchingInst(CI)) {
  1256. Modified = true;
  1257. I = mergeSBufferLoadImmPair(CI);
  1258. OptimizeAgain |= (CI.Width0 + CI.Width1) < 16;
  1259. } else {
  1260. ++I;
  1261. }
  1262. continue;
  1263. case BUFFER_LOAD_OFFEN:
  1264. case BUFFER_LOAD_OFFSET:
  1265. case BUFFER_LOAD_OFFEN_exact:
  1266. case BUFFER_LOAD_OFFSET_exact:
  1267. CI.EltSize = 4;
  1268. if (findMatchingInst(CI)) {
  1269. Modified = true;
  1270. I = mergeBufferLoadPair(CI);
  1271. OptimizeAgain |= (CI.Width0 + CI.Width1) < 4;
  1272. } else {
  1273. ++I;
  1274. }
  1275. continue;
  1276. case BUFFER_STORE_OFFEN:
  1277. case BUFFER_STORE_OFFSET:
  1278. case BUFFER_STORE_OFFEN_exact:
  1279. case BUFFER_STORE_OFFSET_exact:
  1280. CI.EltSize = 4;
  1281. if (findMatchingInst(CI)) {
  1282. Modified = true;
  1283. I = mergeBufferStorePair(CI);
  1284. OptimizeAgain |= (CI.Width0 + CI.Width1) < 4;
  1285. } else {
  1286. ++I;
  1287. }
  1288. continue;
  1289. }
  1290. ++I;
  1291. }
  1292. return Modified;
  1293. }
  1294. bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
  1295. if (skipFunction(MF.getFunction()))
  1296. return false;
  1297. STM = &MF.getSubtarget<GCNSubtarget>();
  1298. if (!STM->loadStoreOptEnabled())
  1299. return false;
  1300. TII = STM->getInstrInfo();
  1301. TRI = &TII->getRegisterInfo();
  1302. MRI = &MF.getRegInfo();
  1303. AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
  1304. assert(MRI->isSSA() && "Must be run on SSA");
  1305. LLVM_DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
  1306. bool Modified = false;
  1307. for (MachineBasicBlock &MBB : MF) {
  1308. do {
  1309. OptimizeAgain = false;
  1310. Modified |= optimizeBlock(MBB);
  1311. } while (OptimizeAgain);
  1312. }
  1313. return Modified;
  1314. }