123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590 |
- //===- SILoadStoreOptimizer.cpp -------------------------------------------===//
- //
- // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- // See https://llvm.org/LICENSE.txt for license information.
- // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- //
- //===----------------------------------------------------------------------===//
- //
- // This pass tries to fuse DS instructions with close by immediate offsets.
- // This will fuse operations such as
- // ds_read_b32 v0, v2 offset:16
- // ds_read_b32 v1, v2 offset:32
- // ==>
- // ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
- //
- // The same is done for certain SMEM and VMEM opcodes, e.g.:
- // s_buffer_load_dword s4, s[0:3], 4
- // s_buffer_load_dword s5, s[0:3], 8
- // ==>
- // s_buffer_load_dwordx2 s[4:5], s[0:3], 4
- //
- // This pass also tries to promote constant offset to the immediate by
- // adjusting the base. It tries to use a base from the nearby instructions that
- // allows it to have a 13bit constant offset and then promotes the 13bit offset
- // to the immediate.
- // E.g.
- // s_movk_i32 s0, 0x1800
- // v_add_co_u32_e32 v0, vcc, s0, v2
- // v_addc_co_u32_e32 v1, vcc, 0, v6, vcc
- //
- // s_movk_i32 s0, 0x1000
- // v_add_co_u32_e32 v5, vcc, s0, v2
- // v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
- // global_load_dwordx2 v[5:6], v[5:6], off
- // global_load_dwordx2 v[0:1], v[0:1], off
- // =>
- // s_movk_i32 s0, 0x1000
- // v_add_co_u32_e32 v5, vcc, s0, v2
- // v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
- // global_load_dwordx2 v[5:6], v[5:6], off
- // global_load_dwordx2 v[0:1], v[5:6], off offset:2048
- //
- // Future improvements:
- //
- // - This currently relies on the scheduler to place loads and stores next to
- // each other, and then only merges adjacent pairs of instructions. It would
- // be good to be more flexible with interleaved instructions, and possibly run
- // before scheduling. It currently missing stores of constants because loading
- // the constant into the data register is placed between the stores, although
- // this is arguably a scheduling problem.
- //
- // - Live interval recomputing seems inefficient. This currently only matches
- // one pair, and recomputes live intervals and moves on to the next pair. It
- // would be better to compute a list of all merges that need to occur.
- //
- // - With a list of instructions to process, we can also merge more. If a
- // cluster of loads have offsets that are too large to fit in the 8-bit
- // offsets, but are close enough to fit in the 8 bits, we can add to the base
- // pointer and use the new reduced offsets.
- //
- //===----------------------------------------------------------------------===//
- #include "AMDGPU.h"
- #include "AMDGPUSubtarget.h"
- #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
- #include "SIInstrInfo.h"
- #include "SIRegisterInfo.h"
- #include "Utils/AMDGPUBaseInfo.h"
- #include "llvm/ADT/ArrayRef.h"
- #include "llvm/ADT/SmallVector.h"
- #include "llvm/ADT/StringRef.h"
- #include "llvm/Analysis/AliasAnalysis.h"
- #include "llvm/CodeGen/MachineBasicBlock.h"
- #include "llvm/CodeGen/MachineFunction.h"
- #include "llvm/CodeGen/MachineFunctionPass.h"
- #include "llvm/CodeGen/MachineInstr.h"
- #include "llvm/CodeGen/MachineInstrBuilder.h"
- #include "llvm/CodeGen/MachineOperand.h"
- #include "llvm/CodeGen/MachineRegisterInfo.h"
- #include "llvm/IR/DebugLoc.h"
- #include "llvm/Pass.h"
- #include "llvm/Support/Debug.h"
- #include "llvm/Support/MathExtras.h"
- #include "llvm/Support/raw_ostream.h"
- #include <algorithm>
- #include <cassert>
- #include <cstdlib>
- #include <iterator>
- #include <utility>
- using namespace llvm;
- #define DEBUG_TYPE "si-load-store-opt"
- namespace {
- enum InstClassEnum {
- UNKNOWN,
- DS_READ,
- DS_WRITE,
- S_BUFFER_LOAD_IMM,
- BUFFER_LOAD_OFFEN = AMDGPU::BUFFER_LOAD_DWORD_OFFEN,
- BUFFER_LOAD_OFFSET = AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
- BUFFER_STORE_OFFEN = AMDGPU::BUFFER_STORE_DWORD_OFFEN,
- BUFFER_STORE_OFFSET = AMDGPU::BUFFER_STORE_DWORD_OFFSET,
- BUFFER_LOAD_OFFEN_exact = AMDGPU::BUFFER_LOAD_DWORD_OFFEN_exact,
- BUFFER_LOAD_OFFSET_exact = AMDGPU::BUFFER_LOAD_DWORD_OFFSET_exact,
- BUFFER_STORE_OFFEN_exact = AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact,
- BUFFER_STORE_OFFSET_exact = AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact,
- };
- enum RegisterEnum {
- SBASE = 0x1,
- SRSRC = 0x2,
- SOFFSET = 0x4,
- VADDR = 0x8,
- ADDR = 0x10,
- };
- class SILoadStoreOptimizer : public MachineFunctionPass {
- struct CombineInfo {
- MachineBasicBlock::iterator I;
- MachineBasicBlock::iterator Paired;
- unsigned EltSize;
- unsigned Offset0;
- unsigned Offset1;
- unsigned Width0;
- unsigned Width1;
- unsigned BaseOff;
- InstClassEnum InstClass;
- bool GLC0;
- bool GLC1;
- bool SLC0;
- bool SLC1;
- bool DLC0;
- bool DLC1;
- bool UseST64;
- SmallVector<MachineInstr *, 8> InstsToMove;
- };
- struct BaseRegisters {
- unsigned LoReg = 0;
- unsigned HiReg = 0;
- unsigned LoSubReg = 0;
- unsigned HiSubReg = 0;
- };
- struct MemAddress {
- BaseRegisters Base;
- int64_t Offset = 0;
- };
- using MemInfoMap = DenseMap<MachineInstr *, MemAddress>;
- private:
- const GCNSubtarget *STM = nullptr;
- const SIInstrInfo *TII = nullptr;
- const SIRegisterInfo *TRI = nullptr;
- MachineRegisterInfo *MRI = nullptr;
- AliasAnalysis *AA = nullptr;
- bool OptimizeAgain;
- static bool offsetsCanBeCombined(CombineInfo &CI);
- static bool widthsFit(const GCNSubtarget &STM, const CombineInfo &CI);
- static unsigned getNewOpcode(const CombineInfo &CI);
- static std::pair<unsigned, unsigned> getSubRegIdxs(const CombineInfo &CI);
- const TargetRegisterClass *getTargetRegisterClass(const CombineInfo &CI);
- unsigned getOpcodeWidth(const MachineInstr &MI) const;
- InstClassEnum getInstClass(unsigned Opc) const;
- unsigned getRegs(unsigned Opc) const;
- bool findMatchingInst(CombineInfo &CI);
- unsigned read2Opcode(unsigned EltSize) const;
- unsigned read2ST64Opcode(unsigned EltSize) const;
- MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI);
- unsigned write2Opcode(unsigned EltSize) const;
- unsigned write2ST64Opcode(unsigned EltSize) const;
- MachineBasicBlock::iterator mergeWrite2Pair(CombineInfo &CI);
- MachineBasicBlock::iterator mergeSBufferLoadImmPair(CombineInfo &CI);
- MachineBasicBlock::iterator mergeBufferLoadPair(CombineInfo &CI);
- MachineBasicBlock::iterator mergeBufferStorePair(CombineInfo &CI);
- void updateBaseAndOffset(MachineInstr &I, unsigned NewBase,
- int32_t NewOffset) const;
- unsigned computeBase(MachineInstr &MI, const MemAddress &Addr) const;
- MachineOperand createRegOrImm(int32_t Val, MachineInstr &MI) const;
- Optional<int32_t> extractConstOffset(const MachineOperand &Op) const;
- void processBaseWithConstOffset(const MachineOperand &Base, MemAddress &Addr) const;
- /// Promotes constant offset to the immediate by adjusting the base. It
- /// tries to use a base from the nearby instructions that allows it to have
- /// a 13bit constant offset which gets promoted to the immediate.
- bool promoteConstantOffsetToImm(MachineInstr &CI,
- MemInfoMap &Visited,
- SmallPtrSet<MachineInstr *, 4> &Promoted) const;
- public:
- static char ID;
- SILoadStoreOptimizer() : MachineFunctionPass(ID) {
- initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
- }
- bool optimizeBlock(MachineBasicBlock &MBB);
- bool runOnMachineFunction(MachineFunction &MF) override;
- StringRef getPassName() const override { return "SI Load Store Optimizer"; }
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.setPreservesCFG();
- AU.addRequired<AAResultsWrapperPass>();
- MachineFunctionPass::getAnalysisUsage(AU);
- }
- };
- } // end anonymous namespace.
- INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
- "SI Load Store Optimizer", false, false)
- INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
- INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE, "SI Load Store Optimizer",
- false, false)
- char SILoadStoreOptimizer::ID = 0;
- char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
- FunctionPass *llvm::createSILoadStoreOptimizerPass() {
- return new SILoadStoreOptimizer();
- }
- static void moveInstsAfter(MachineBasicBlock::iterator I,
- ArrayRef<MachineInstr *> InstsToMove) {
- MachineBasicBlock *MBB = I->getParent();
- ++I;
- for (MachineInstr *MI : InstsToMove) {
- MI->removeFromParent();
- MBB->insert(I, MI);
- }
- }
- static void addDefsUsesToList(const MachineInstr &MI,
- DenseSet<unsigned> &RegDefs,
- DenseSet<unsigned> &PhysRegUses) {
- for (const MachineOperand &Op : MI.operands()) {
- if (Op.isReg()) {
- if (Op.isDef())
- RegDefs.insert(Op.getReg());
- else if (Op.readsReg() && Register::isPhysicalRegister(Op.getReg()))
- PhysRegUses.insert(Op.getReg());
- }
- }
- }
- static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A,
- MachineBasicBlock::iterator B,
- AliasAnalysis *AA) {
- // RAW or WAR - cannot reorder
- // WAW - cannot reorder
- // RAR - safe to reorder
- return !(A->mayStore() || B->mayStore()) || !A->mayAlias(AA, *B, true);
- }
- // Add MI and its defs to the lists if MI reads one of the defs that are
- // already in the list. Returns true in that case.
- static bool addToListsIfDependent(MachineInstr &MI, DenseSet<unsigned> &RegDefs,
- DenseSet<unsigned> &PhysRegUses,
- SmallVectorImpl<MachineInstr *> &Insts) {
- for (MachineOperand &Use : MI.operands()) {
- // If one of the defs is read, then there is a use of Def between I and the
- // instruction that I will potentially be merged with. We will need to move
- // this instruction after the merged instructions.
- //
- // Similarly, if there is a def which is read by an instruction that is to
- // be moved for merging, then we need to move the def-instruction as well.
- // This can only happen for physical registers such as M0; virtual
- // registers are in SSA form.
- if (Use.isReg() &&
- ((Use.readsReg() && RegDefs.count(Use.getReg())) ||
- (Use.isDef() && RegDefs.count(Use.getReg())) ||
- (Use.isDef() && Register::isPhysicalRegister(Use.getReg()) &&
- PhysRegUses.count(Use.getReg())))) {
- Insts.push_back(&MI);
- addDefsUsesToList(MI, RegDefs, PhysRegUses);
- return true;
- }
- }
- return false;
- }
- static bool canMoveInstsAcrossMemOp(MachineInstr &MemOp,
- ArrayRef<MachineInstr *> InstsToMove,
- AliasAnalysis *AA) {
- assert(MemOp.mayLoadOrStore());
- for (MachineInstr *InstToMove : InstsToMove) {
- if (!InstToMove->mayLoadOrStore())
- continue;
- if (!memAccessesCanBeReordered(MemOp, *InstToMove, AA))
- return false;
- }
- return true;
- }
- // This function assumes that \p A and \p B have are identical except for
- // size and offset, and they referecne adjacent memory.
- static MachineMemOperand *combineKnownAdjacentMMOs(MachineFunction &MF,
- const MachineMemOperand *A,
- const MachineMemOperand *B) {
- unsigned MinOffset = std::min(A->getOffset(), B->getOffset());
- unsigned Size = A->getSize() + B->getSize();
- // This function adds the offset parameter to the existing offset for A,
- // so we pass 0 here as the offset and then manually set it to the correct
- // value after the call.
- MachineMemOperand *MMO = MF.getMachineMemOperand(A, 0, Size);
- MMO->setOffset(MinOffset);
- return MMO;
- }
- bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI) {
- // XXX - Would the same offset be OK? Is there any reason this would happen or
- // be useful?
- if (CI.Offset0 == CI.Offset1)
- return false;
- // This won't be valid if the offset isn't aligned.
- if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0))
- return false;
- unsigned EltOffset0 = CI.Offset0 / CI.EltSize;
- unsigned EltOffset1 = CI.Offset1 / CI.EltSize;
- CI.UseST64 = false;
- CI.BaseOff = 0;
- // Handle SMEM and VMEM instructions.
- if ((CI.InstClass != DS_READ) && (CI.InstClass != DS_WRITE)) {
- return (EltOffset0 + CI.Width0 == EltOffset1 ||
- EltOffset1 + CI.Width1 == EltOffset0) &&
- CI.GLC0 == CI.GLC1 && CI.DLC0 == CI.DLC1 &&
- (CI.InstClass == S_BUFFER_LOAD_IMM || CI.SLC0 == CI.SLC1);
- }
- // If the offset in elements doesn't fit in 8-bits, we might be able to use
- // the stride 64 versions.
- if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 &&
- isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) {
- CI.Offset0 = EltOffset0 / 64;
- CI.Offset1 = EltOffset1 / 64;
- CI.UseST64 = true;
- return true;
- }
- // Check if the new offsets fit in the reduced 8-bit range.
- if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) {
- CI.Offset0 = EltOffset0;
- CI.Offset1 = EltOffset1;
- return true;
- }
- // Try to shift base address to decrease offsets.
- unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0);
- CI.BaseOff = std::min(CI.Offset0, CI.Offset1);
- if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) {
- CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64;
- CI.Offset1 = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64;
- CI.UseST64 = true;
- return true;
- }
- if (isUInt<8>(OffsetDiff)) {
- CI.Offset0 = EltOffset0 - CI.BaseOff / CI.EltSize;
- CI.Offset1 = EltOffset1 - CI.BaseOff / CI.EltSize;
- return true;
- }
- return false;
- }
- bool SILoadStoreOptimizer::widthsFit(const GCNSubtarget &STM,
- const CombineInfo &CI) {
- const unsigned Width = (CI.Width0 + CI.Width1);
- switch (CI.InstClass) {
- default:
- return (Width <= 4) && (STM.hasDwordx3LoadStores() || (Width != 3));
- case S_BUFFER_LOAD_IMM:
- switch (Width) {
- default:
- return false;
- case 2:
- case 4:
- return true;
- }
- }
- }
- unsigned SILoadStoreOptimizer::getOpcodeWidth(const MachineInstr &MI) const {
- const unsigned Opc = MI.getOpcode();
- if (TII->isMUBUF(MI)) {
- // FIXME: Handle d16 correctly
- return AMDGPU::getMUBUFElements(Opc);
- }
- switch (Opc) {
- default:
- return 0;
- case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
- return 1;
- case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
- return 2;
- case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
- return 4;
- }
- }
- InstClassEnum SILoadStoreOptimizer::getInstClass(unsigned Opc) const {
- if (TII->isMUBUF(Opc)) {
- const int baseOpcode = AMDGPU::getMUBUFBaseOpcode(Opc);
- // If we couldn't identify the opcode, bail out.
- if (baseOpcode == -1) {
- return UNKNOWN;
- }
- switch (baseOpcode) {
- default:
- return UNKNOWN;
- case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
- return BUFFER_LOAD_OFFEN;
- case AMDGPU::BUFFER_LOAD_DWORD_OFFSET:
- return BUFFER_LOAD_OFFSET;
- case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
- return BUFFER_STORE_OFFEN;
- case AMDGPU::BUFFER_STORE_DWORD_OFFSET:
- return BUFFER_STORE_OFFSET;
- case AMDGPU::BUFFER_LOAD_DWORD_OFFEN_exact:
- return BUFFER_LOAD_OFFEN_exact;
- case AMDGPU::BUFFER_LOAD_DWORD_OFFSET_exact:
- return BUFFER_LOAD_OFFSET_exact;
- case AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact:
- return BUFFER_STORE_OFFEN_exact;
- case AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact:
- return BUFFER_STORE_OFFSET_exact;
- }
- }
- switch (Opc) {
- default:
- return UNKNOWN;
- case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
- case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
- case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
- return S_BUFFER_LOAD_IMM;
- case AMDGPU::DS_READ_B32:
- case AMDGPU::DS_READ_B64:
- case AMDGPU::DS_READ_B32_gfx9:
- case AMDGPU::DS_READ_B64_gfx9:
- return DS_READ;
- case AMDGPU::DS_WRITE_B32:
- case AMDGPU::DS_WRITE_B64:
- case AMDGPU::DS_WRITE_B32_gfx9:
- case AMDGPU::DS_WRITE_B64_gfx9:
- return DS_WRITE;
- }
- }
- unsigned SILoadStoreOptimizer::getRegs(unsigned Opc) const {
- if (TII->isMUBUF(Opc)) {
- unsigned result = 0;
- if (AMDGPU::getMUBUFHasVAddr(Opc)) {
- result |= VADDR;
- }
- if (AMDGPU::getMUBUFHasSrsrc(Opc)) {
- result |= SRSRC;
- }
- if (AMDGPU::getMUBUFHasSoffset(Opc)) {
- result |= SOFFSET;
- }
- return result;
- }
- switch (Opc) {
- default:
- return 0;
- case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
- case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
- case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
- return SBASE;
- case AMDGPU::DS_READ_B32:
- case AMDGPU::DS_READ_B64:
- case AMDGPU::DS_READ_B32_gfx9:
- case AMDGPU::DS_READ_B64_gfx9:
- case AMDGPU::DS_WRITE_B32:
- case AMDGPU::DS_WRITE_B64:
- case AMDGPU::DS_WRITE_B32_gfx9:
- case AMDGPU::DS_WRITE_B64_gfx9:
- return ADDR;
- }
- }
- bool SILoadStoreOptimizer::findMatchingInst(CombineInfo &CI) {
- MachineBasicBlock *MBB = CI.I->getParent();
- MachineBasicBlock::iterator E = MBB->end();
- MachineBasicBlock::iterator MBBI = CI.I;
- const unsigned Opc = CI.I->getOpcode();
- const InstClassEnum InstClass = getInstClass(Opc);
- if (InstClass == UNKNOWN) {
- return false;
- }
- const unsigned Regs = getRegs(Opc);
- unsigned AddrOpName[5] = {0};
- int AddrIdx[5];
- const MachineOperand *AddrReg[5];
- unsigned NumAddresses = 0;
- if (Regs & ADDR) {
- AddrOpName[NumAddresses++] = AMDGPU::OpName::addr;
- }
- if (Regs & SBASE) {
- AddrOpName[NumAddresses++] = AMDGPU::OpName::sbase;
- }
- if (Regs & SRSRC) {
- AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc;
- }
- if (Regs & SOFFSET) {
- AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset;
- }
- if (Regs & VADDR) {
- AddrOpName[NumAddresses++] = AMDGPU::OpName::vaddr;
- }
- for (unsigned i = 0; i < NumAddresses; i++) {
- AddrIdx[i] = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AddrOpName[i]);
- AddrReg[i] = &CI.I->getOperand(AddrIdx[i]);
- // We only ever merge operations with the same base address register, so
- // don't bother scanning forward if there are no other uses.
- if (AddrReg[i]->isReg() &&
- (Register::isPhysicalRegister(AddrReg[i]->getReg()) ||
- MRI->hasOneNonDBGUse(AddrReg[i]->getReg())))
- return false;
- }
- ++MBBI;
- DenseSet<unsigned> RegDefsToMove;
- DenseSet<unsigned> PhysRegUsesToMove;
- addDefsUsesToList(*CI.I, RegDefsToMove, PhysRegUsesToMove);
- for (; MBBI != E; ++MBBI) {
- const bool IsDS = (InstClass == DS_READ) || (InstClass == DS_WRITE);
- if ((getInstClass(MBBI->getOpcode()) != InstClass) ||
- (IsDS && (MBBI->getOpcode() != Opc))) {
- // This is not a matching DS instruction, but we can keep looking as
- // long as one of these conditions are met:
- // 1. It is safe to move I down past MBBI.
- // 2. It is safe to move MBBI down past the instruction that I will
- // be merged into.
- if (MBBI->hasUnmodeledSideEffects()) {
- // We can't re-order this instruction with respect to other memory
- // operations, so we fail both conditions mentioned above.
- return false;
- }
- if (MBBI->mayLoadOrStore() &&
- (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) ||
- !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))) {
- // We fail condition #1, but we may still be able to satisfy condition
- // #2. Add this instruction to the move list and then we will check
- // if condition #2 holds once we have selected the matching instruction.
- CI.InstsToMove.push_back(&*MBBI);
- addDefsUsesToList(*MBBI, RegDefsToMove, PhysRegUsesToMove);
- continue;
- }
- // When we match I with another DS instruction we will be moving I down
- // to the location of the matched instruction any uses of I will need to
- // be moved down as well.
- addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
- CI.InstsToMove);
- continue;
- }
- // Don't merge volatiles.
- if (MBBI->hasOrderedMemoryRef())
- return false;
- // Handle a case like
- // DS_WRITE_B32 addr, v, idx0
- // w = DS_READ_B32 addr, idx0
- // DS_WRITE_B32 addr, f(w), idx1
- // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
- // merging of the two writes.
- if (addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
- CI.InstsToMove))
- continue;
- bool Match = true;
- for (unsigned i = 0; i < NumAddresses; i++) {
- const MachineOperand &AddrRegNext = MBBI->getOperand(AddrIdx[i]);
- if (AddrReg[i]->isImm() || AddrRegNext.isImm()) {
- if (AddrReg[i]->isImm() != AddrRegNext.isImm() ||
- AddrReg[i]->getImm() != AddrRegNext.getImm()) {
- Match = false;
- break;
- }
- continue;
- }
- // Check same base pointer. Be careful of subregisters, which can occur
- // with vectors of pointers.
- if (AddrReg[i]->getReg() != AddrRegNext.getReg() ||
- AddrReg[i]->getSubReg() != AddrRegNext.getSubReg()) {
- Match = false;
- break;
- }
- }
- if (Match) {
- int OffsetIdx =
- AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AMDGPU::OpName::offset);
- CI.Offset0 = CI.I->getOperand(OffsetIdx).getImm();
- CI.Width0 = getOpcodeWidth(*CI.I);
- CI.Offset1 = MBBI->getOperand(OffsetIdx).getImm();
- CI.Width1 = getOpcodeWidth(*MBBI);
- CI.Paired = MBBI;
- if ((CI.InstClass == DS_READ) || (CI.InstClass == DS_WRITE)) {
- CI.Offset0 &= 0xffff;
- CI.Offset1 &= 0xffff;
- } else {
- CI.GLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::glc)->getImm();
- CI.GLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::glc)->getImm();
- if (CI.InstClass != S_BUFFER_LOAD_IMM) {
- CI.SLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::slc)->getImm();
- CI.SLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::slc)->getImm();
- }
- CI.DLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::dlc)->getImm();
- CI.DLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::dlc)->getImm();
- }
- // Check both offsets fit in the reduced range.
- // We also need to go through the list of instructions that we plan to
- // move and make sure they are all safe to move down past the merged
- // instruction.
- if (widthsFit(*STM, CI) && offsetsCanBeCombined(CI))
- if (canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))
- return true;
- }
- // We've found a load/store that we couldn't merge for some reason.
- // We could potentially keep looking, but we'd need to make sure that
- // it was safe to move I and also all the instruction in InstsToMove
- // down past this instruction.
- // check if we can move I across MBBI and if we can move all I's users
- if (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) ||
- !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))
- break;
- }
- return false;
- }
- unsigned SILoadStoreOptimizer::read2Opcode(unsigned EltSize) const {
- if (STM->ldsRequiresM0Init())
- return (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64;
- return (EltSize == 4) ? AMDGPU::DS_READ2_B32_gfx9 : AMDGPU::DS_READ2_B64_gfx9;
- }
- unsigned SILoadStoreOptimizer::read2ST64Opcode(unsigned EltSize) const {
- if (STM->ldsRequiresM0Init())
- return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64;
- return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32_gfx9
- : AMDGPU::DS_READ2ST64_B64_gfx9;
- }
- MachineBasicBlock::iterator
- SILoadStoreOptimizer::mergeRead2Pair(CombineInfo &CI) {
- MachineBasicBlock *MBB = CI.I->getParent();
- // Be careful, since the addresses could be subregisters themselves in weird
- // cases, like vectors of pointers.
- const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
- const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst);
- const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdst);
- unsigned NewOffset0 = CI.Offset0;
- unsigned NewOffset1 = CI.Offset1;
- unsigned Opc =
- CI.UseST64 ? read2ST64Opcode(CI.EltSize) : read2Opcode(CI.EltSize);
- unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
- unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
- if (NewOffset0 > NewOffset1) {
- // Canonicalize the merged instruction so the smaller offset comes first.
- std::swap(NewOffset0, NewOffset1);
- std::swap(SubRegIdx0, SubRegIdx1);
- }
- assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
- (NewOffset0 != NewOffset1) && "Computed offset doesn't fit");
- const MCInstrDesc &Read2Desc = TII->get(Opc);
- const TargetRegisterClass *SuperRC =
- (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
- Register DestReg = MRI->createVirtualRegister(SuperRC);
- DebugLoc DL = CI.I->getDebugLoc();
- Register BaseReg = AddrReg->getReg();
- unsigned BaseSubReg = AddrReg->getSubReg();
- unsigned BaseRegFlags = 0;
- if (CI.BaseOff) {
- Register ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
- BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
- .addImm(CI.BaseOff);
- BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
- BaseRegFlags = RegState::Kill;
- TII->getAddNoCarry(*MBB, CI.Paired, DL, BaseReg)
- .addReg(ImmReg)
- .addReg(AddrReg->getReg(), 0, BaseSubReg)
- .addImm(0); // clamp bit
- BaseSubReg = 0;
- }
- MachineInstrBuilder Read2 =
- BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
- .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr
- .addImm(NewOffset0) // offset0
- .addImm(NewOffset1) // offset1
- .addImm(0) // gds
- .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
- (void)Read2;
- const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
- // Copy to the old destination registers.
- BuildMI(*MBB, CI.Paired, DL, CopyDesc)
- .add(*Dest0) // Copy to same destination including flags and sub reg.
- .addReg(DestReg, 0, SubRegIdx0);
- MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
- .add(*Dest1)
- .addReg(DestReg, RegState::Kill, SubRegIdx1);
- moveInstsAfter(Copy1, CI.InstsToMove);
- MachineBasicBlock::iterator Next = std::next(CI.I);
- CI.I->eraseFromParent();
- CI.Paired->eraseFromParent();
- LLVM_DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
- return Next;
- }
- unsigned SILoadStoreOptimizer::write2Opcode(unsigned EltSize) const {
- if (STM->ldsRequiresM0Init())
- return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64;
- return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32_gfx9
- : AMDGPU::DS_WRITE2_B64_gfx9;
- }
- unsigned SILoadStoreOptimizer::write2ST64Opcode(unsigned EltSize) const {
- if (STM->ldsRequiresM0Init())
- return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32
- : AMDGPU::DS_WRITE2ST64_B64;
- return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32_gfx9
- : AMDGPU::DS_WRITE2ST64_B64_gfx9;
- }
- MachineBasicBlock::iterator
- SILoadStoreOptimizer::mergeWrite2Pair(CombineInfo &CI) {
- MachineBasicBlock *MBB = CI.I->getParent();
- // Be sure to use .addOperand(), and not .addReg() with these. We want to be
- // sure we preserve the subregister index and any register flags set on them.
- const MachineOperand *AddrReg =
- TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
- const MachineOperand *Data0 =
- TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0);
- const MachineOperand *Data1 =
- TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::data0);
- unsigned NewOffset0 = CI.Offset0;
- unsigned NewOffset1 = CI.Offset1;
- unsigned Opc =
- CI.UseST64 ? write2ST64Opcode(CI.EltSize) : write2Opcode(CI.EltSize);
- if (NewOffset0 > NewOffset1) {
- // Canonicalize the merged instruction so the smaller offset comes first.
- std::swap(NewOffset0, NewOffset1);
- std::swap(Data0, Data1);
- }
- assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
- (NewOffset0 != NewOffset1) && "Computed offset doesn't fit");
- const MCInstrDesc &Write2Desc = TII->get(Opc);
- DebugLoc DL = CI.I->getDebugLoc();
- Register BaseReg = AddrReg->getReg();
- unsigned BaseSubReg = AddrReg->getSubReg();
- unsigned BaseRegFlags = 0;
- if (CI.BaseOff) {
- Register ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
- BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
- .addImm(CI.BaseOff);
- BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
- BaseRegFlags = RegState::Kill;
- TII->getAddNoCarry(*MBB, CI.Paired, DL, BaseReg)
- .addReg(ImmReg)
- .addReg(AddrReg->getReg(), 0, BaseSubReg)
- .addImm(0); // clamp bit
- BaseSubReg = 0;
- }
- MachineInstrBuilder Write2 =
- BuildMI(*MBB, CI.Paired, DL, Write2Desc)
- .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr
- .add(*Data0) // data0
- .add(*Data1) // data1
- .addImm(NewOffset0) // offset0
- .addImm(NewOffset1) // offset1
- .addImm(0) // gds
- .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
- moveInstsAfter(Write2, CI.InstsToMove);
- MachineBasicBlock::iterator Next = std::next(CI.I);
- CI.I->eraseFromParent();
- CI.Paired->eraseFromParent();
- LLVM_DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
- return Next;
- }
- MachineBasicBlock::iterator
- SILoadStoreOptimizer::mergeSBufferLoadImmPair(CombineInfo &CI) {
- MachineBasicBlock *MBB = CI.I->getParent();
- DebugLoc DL = CI.I->getDebugLoc();
- const unsigned Opcode = getNewOpcode(CI);
- const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
- Register DestReg = MRI->createVirtualRegister(SuperRC);
- unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
- // It shouldn't be possible to get this far if the two instructions
- // don't have a single memoperand, because MachineInstr::mayAlias()
- // will return true if this is the case.
- assert(CI.I->hasOneMemOperand() && CI.Paired->hasOneMemOperand());
- const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
- const MachineMemOperand *MMOb = *CI.Paired->memoperands_begin();
- BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg)
- .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::sbase))
- .addImm(MergedOffset) // offset
- .addImm(CI.GLC0) // glc
- .addImm(CI.DLC0) // dlc
- .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
- std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
- const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
- const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
- // Copy to the old destination registers.
- const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
- const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::sdst);
- const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::sdst);
- BuildMI(*MBB, CI.Paired, DL, CopyDesc)
- .add(*Dest0) // Copy to same destination including flags and sub reg.
- .addReg(DestReg, 0, SubRegIdx0);
- MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
- .add(*Dest1)
- .addReg(DestReg, RegState::Kill, SubRegIdx1);
- moveInstsAfter(Copy1, CI.InstsToMove);
- MachineBasicBlock::iterator Next = std::next(CI.I);
- CI.I->eraseFromParent();
- CI.Paired->eraseFromParent();
- return Next;
- }
- MachineBasicBlock::iterator
- SILoadStoreOptimizer::mergeBufferLoadPair(CombineInfo &CI) {
- MachineBasicBlock *MBB = CI.I->getParent();
- DebugLoc DL = CI.I->getDebugLoc();
- const unsigned Opcode = getNewOpcode(CI);
- const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
- // Copy to the new source register.
- Register DestReg = MRI->createVirtualRegister(SuperRC);
- unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
- auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg);
- const unsigned Regs = getRegs(Opcode);
- if (Regs & VADDR)
- MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
- // It shouldn't be possible to get this far if the two instructions
- // don't have a single memoperand, because MachineInstr::mayAlias()
- // will return true if this is the case.
- assert(CI.I->hasOneMemOperand() && CI.Paired->hasOneMemOperand());
- const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
- const MachineMemOperand *MMOb = *CI.Paired->memoperands_begin();
- MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
- .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
- .addImm(MergedOffset) // offset
- .addImm(CI.GLC0) // glc
- .addImm(CI.SLC0) // slc
- .addImm(0) // tfe
- .addImm(CI.DLC0) // dlc
- .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
- std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
- const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
- const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
- // Copy to the old destination registers.
- const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
- const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
- const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
- BuildMI(*MBB, CI.Paired, DL, CopyDesc)
- .add(*Dest0) // Copy to same destination including flags and sub reg.
- .addReg(DestReg, 0, SubRegIdx0);
- MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
- .add(*Dest1)
- .addReg(DestReg, RegState::Kill, SubRegIdx1);
- moveInstsAfter(Copy1, CI.InstsToMove);
- MachineBasicBlock::iterator Next = std::next(CI.I);
- CI.I->eraseFromParent();
- CI.Paired->eraseFromParent();
- return Next;
- }
- unsigned SILoadStoreOptimizer::getNewOpcode(const CombineInfo &CI) {
- const unsigned Width = CI.Width0 + CI.Width1;
- switch (CI.InstClass) {
- default:
- // FIXME: Handle d16 correctly
- return AMDGPU::getMUBUFOpcode(CI.InstClass, Width);
- case UNKNOWN:
- llvm_unreachable("Unknown instruction class");
- case S_BUFFER_LOAD_IMM:
- switch (Width) {
- default:
- return 0;
- case 2:
- return AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
- case 4:
- return AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM;
- }
- }
- }
- std::pair<unsigned, unsigned>
- SILoadStoreOptimizer::getSubRegIdxs(const CombineInfo &CI) {
- if (CI.Offset0 > CI.Offset1) {
- switch (CI.Width0) {
- default:
- return std::make_pair(0, 0);
- case 1:
- switch (CI.Width1) {
- default:
- return std::make_pair(0, 0);
- case 1:
- return std::make_pair(AMDGPU::sub1, AMDGPU::sub0);
- case 2:
- return std::make_pair(AMDGPU::sub2, AMDGPU::sub0_sub1);
- case 3:
- return std::make_pair(AMDGPU::sub3, AMDGPU::sub0_sub1_sub2);
- }
- case 2:
- switch (CI.Width1) {
- default:
- return std::make_pair(0, 0);
- case 1:
- return std::make_pair(AMDGPU::sub1_sub2, AMDGPU::sub0);
- case 2:
- return std::make_pair(AMDGPU::sub2_sub3, AMDGPU::sub0_sub1);
- }
- case 3:
- switch (CI.Width1) {
- default:
- return std::make_pair(0, 0);
- case 1:
- return std::make_pair(AMDGPU::sub1_sub2_sub3, AMDGPU::sub0);
- }
- }
- } else {
- switch (CI.Width0) {
- default:
- return std::make_pair(0, 0);
- case 1:
- switch (CI.Width1) {
- default:
- return std::make_pair(0, 0);
- case 1:
- return std::make_pair(AMDGPU::sub0, AMDGPU::sub1);
- case 2:
- return std::make_pair(AMDGPU::sub0, AMDGPU::sub1_sub2);
- case 3:
- return std::make_pair(AMDGPU::sub0, AMDGPU::sub1_sub2_sub3);
- }
- case 2:
- switch (CI.Width1) {
- default:
- return std::make_pair(0, 0);
- case 1:
- return std::make_pair(AMDGPU::sub0_sub1, AMDGPU::sub2);
- case 2:
- return std::make_pair(AMDGPU::sub0_sub1, AMDGPU::sub2_sub3);
- }
- case 3:
- switch (CI.Width1) {
- default:
- return std::make_pair(0, 0);
- case 1:
- return std::make_pair(AMDGPU::sub0_sub1_sub2, AMDGPU::sub3);
- }
- }
- }
- }
- const TargetRegisterClass *
- SILoadStoreOptimizer::getTargetRegisterClass(const CombineInfo &CI) {
- if (CI.InstClass == S_BUFFER_LOAD_IMM) {
- switch (CI.Width0 + CI.Width1) {
- default:
- return nullptr;
- case 2:
- return &AMDGPU::SReg_64_XEXECRegClass;
- case 4:
- return &AMDGPU::SReg_128RegClass;
- case 8:
- return &AMDGPU::SReg_256RegClass;
- case 16:
- return &AMDGPU::SReg_512RegClass;
- }
- } else {
- switch (CI.Width0 + CI.Width1) {
- default:
- return nullptr;
- case 2:
- return &AMDGPU::VReg_64RegClass;
- case 3:
- return &AMDGPU::VReg_96RegClass;
- case 4:
- return &AMDGPU::VReg_128RegClass;
- }
- }
- }
- MachineBasicBlock::iterator
- SILoadStoreOptimizer::mergeBufferStorePair(CombineInfo &CI) {
- MachineBasicBlock *MBB = CI.I->getParent();
- DebugLoc DL = CI.I->getDebugLoc();
- const unsigned Opcode = getNewOpcode(CI);
- std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
- const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
- const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
- // Copy to the new source register.
- const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
- Register SrcReg = MRI->createVirtualRegister(SuperRC);
- const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
- const auto *Src1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
- BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg)
- .add(*Src0)
- .addImm(SubRegIdx0)
- .add(*Src1)
- .addImm(SubRegIdx1);
- auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode))
- .addReg(SrcReg, RegState::Kill);
- const unsigned Regs = getRegs(Opcode);
- if (Regs & VADDR)
- MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
- // It shouldn't be possible to get this far if the two instructions
- // don't have a single memoperand, because MachineInstr::mayAlias()
- // will return true if this is the case.
- assert(CI.I->hasOneMemOperand() && CI.Paired->hasOneMemOperand());
- const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
- const MachineMemOperand *MMOb = *CI.Paired->memoperands_begin();
- MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
- .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
- .addImm(std::min(CI.Offset0, CI.Offset1)) // offset
- .addImm(CI.GLC0) // glc
- .addImm(CI.SLC0) // slc
- .addImm(0) // tfe
- .addImm(CI.DLC0) // dlc
- .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
- moveInstsAfter(MIB, CI.InstsToMove);
- MachineBasicBlock::iterator Next = std::next(CI.I);
- CI.I->eraseFromParent();
- CI.Paired->eraseFromParent();
- return Next;
- }
- MachineOperand
- SILoadStoreOptimizer::createRegOrImm(int32_t Val, MachineInstr &MI) const {
- APInt V(32, Val, true);
- if (TII->isInlineConstant(V))
- return MachineOperand::CreateImm(Val);
- Register Reg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
- MachineInstr *Mov =
- BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
- TII->get(AMDGPU::S_MOV_B32), Reg)
- .addImm(Val);
- (void)Mov;
- LLVM_DEBUG(dbgs() << " "; Mov->dump());
- return MachineOperand::CreateReg(Reg, false);
- }
- // Compute base address using Addr and return the final register.
- unsigned SILoadStoreOptimizer::computeBase(MachineInstr &MI,
- const MemAddress &Addr) const {
- MachineBasicBlock *MBB = MI.getParent();
- MachineBasicBlock::iterator MBBI = MI.getIterator();
- DebugLoc DL = MI.getDebugLoc();
- assert((TRI->getRegSizeInBits(Addr.Base.LoReg, *MRI) == 32 ||
- Addr.Base.LoSubReg) &&
- "Expected 32-bit Base-Register-Low!!");
- assert((TRI->getRegSizeInBits(Addr.Base.HiReg, *MRI) == 32 ||
- Addr.Base.HiSubReg) &&
- "Expected 32-bit Base-Register-Hi!!");
- LLVM_DEBUG(dbgs() << " Re-Computed Anchor-Base:\n");
- MachineOperand OffsetLo = createRegOrImm(static_cast<int32_t>(Addr.Offset), MI);
- MachineOperand OffsetHi =
- createRegOrImm(static_cast<int32_t>(Addr.Offset >> 32), MI);
- const auto *CarryRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
- Register CarryReg = MRI->createVirtualRegister(CarryRC);
- Register DeadCarryReg = MRI->createVirtualRegister(CarryRC);
- Register DestSub0 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
- Register DestSub1 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
- MachineInstr *LoHalf =
- BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADD_I32_e64), DestSub0)
- .addReg(CarryReg, RegState::Define)
- .addReg(Addr.Base.LoReg, 0, Addr.Base.LoSubReg)
- .add(OffsetLo)
- .addImm(0); // clamp bit
- (void)LoHalf;
- LLVM_DEBUG(dbgs() << " "; LoHalf->dump(););
- MachineInstr *HiHalf =
- BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADDC_U32_e64), DestSub1)
- .addReg(DeadCarryReg, RegState::Define | RegState::Dead)
- .addReg(Addr.Base.HiReg, 0, Addr.Base.HiSubReg)
- .add(OffsetHi)
- .addReg(CarryReg, RegState::Kill)
- .addImm(0); // clamp bit
- (void)HiHalf;
- LLVM_DEBUG(dbgs() << " "; HiHalf->dump(););
- Register FullDestReg = MRI->createVirtualRegister(&AMDGPU::VReg_64RegClass);
- MachineInstr *FullBase =
- BuildMI(*MBB, MBBI, DL, TII->get(TargetOpcode::REG_SEQUENCE), FullDestReg)
- .addReg(DestSub0)
- .addImm(AMDGPU::sub0)
- .addReg(DestSub1)
- .addImm(AMDGPU::sub1);
- (void)FullBase;
- LLVM_DEBUG(dbgs() << " "; FullBase->dump(); dbgs() << "\n";);
- return FullDestReg;
- }
- // Update base and offset with the NewBase and NewOffset in MI.
- void SILoadStoreOptimizer::updateBaseAndOffset(MachineInstr &MI,
- unsigned NewBase,
- int32_t NewOffset) const {
- TII->getNamedOperand(MI, AMDGPU::OpName::vaddr)->setReg(NewBase);
- TII->getNamedOperand(MI, AMDGPU::OpName::offset)->setImm(NewOffset);
- }
- Optional<int32_t>
- SILoadStoreOptimizer::extractConstOffset(const MachineOperand &Op) const {
- if (Op.isImm())
- return Op.getImm();
- if (!Op.isReg())
- return None;
- MachineInstr *Def = MRI->getUniqueVRegDef(Op.getReg());
- if (!Def || Def->getOpcode() != AMDGPU::S_MOV_B32 ||
- !Def->getOperand(1).isImm())
- return None;
- return Def->getOperand(1).getImm();
- }
- // Analyze Base and extracts:
- // - 32bit base registers, subregisters
- // - 64bit constant offset
- // Expecting base computation as:
- // %OFFSET0:sgpr_32 = S_MOV_B32 8000
- // %LO:vgpr_32, %c:sreg_64_xexec =
- // V_ADD_I32_e64 %BASE_LO:vgpr_32, %103:sgpr_32,
- // %HI:vgpr_32, = V_ADDC_U32_e64 %BASE_HI:vgpr_32, 0, killed %c:sreg_64_xexec
- // %Base:vreg_64 =
- // REG_SEQUENCE %LO:vgpr_32, %subreg.sub0, %HI:vgpr_32, %subreg.sub1
- void SILoadStoreOptimizer::processBaseWithConstOffset(const MachineOperand &Base,
- MemAddress &Addr) const {
- if (!Base.isReg())
- return;
- MachineInstr *Def = MRI->getUniqueVRegDef(Base.getReg());
- if (!Def || Def->getOpcode() != AMDGPU::REG_SEQUENCE
- || Def->getNumOperands() != 5)
- return;
- MachineOperand BaseLo = Def->getOperand(1);
- MachineOperand BaseHi = Def->getOperand(3);
- if (!BaseLo.isReg() || !BaseHi.isReg())
- return;
- MachineInstr *BaseLoDef = MRI->getUniqueVRegDef(BaseLo.getReg());
- MachineInstr *BaseHiDef = MRI->getUniqueVRegDef(BaseHi.getReg());
- if (!BaseLoDef || BaseLoDef->getOpcode() != AMDGPU::V_ADD_I32_e64 ||
- !BaseHiDef || BaseHiDef->getOpcode() != AMDGPU::V_ADDC_U32_e64)
- return;
- const auto *Src0 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src0);
- const auto *Src1 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src1);
- auto Offset0P = extractConstOffset(*Src0);
- if (Offset0P)
- BaseLo = *Src1;
- else {
- if (!(Offset0P = extractConstOffset(*Src1)))
- return;
- BaseLo = *Src0;
- }
- Src0 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src0);
- Src1 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src1);
- if (Src0->isImm())
- std::swap(Src0, Src1);
- if (!Src1->isImm())
- return;
- uint64_t Offset1 = Src1->getImm();
- BaseHi = *Src0;
- Addr.Base.LoReg = BaseLo.getReg();
- Addr.Base.HiReg = BaseHi.getReg();
- Addr.Base.LoSubReg = BaseLo.getSubReg();
- Addr.Base.HiSubReg = BaseHi.getSubReg();
- Addr.Offset = (*Offset0P & 0x00000000ffffffff) | (Offset1 << 32);
- }
- bool SILoadStoreOptimizer::promoteConstantOffsetToImm(
- MachineInstr &MI,
- MemInfoMap &Visited,
- SmallPtrSet<MachineInstr *, 4> &AnchorList) const {
- if (!(MI.mayLoad() ^ MI.mayStore()))
- return false;
- // TODO: Support flat and scratch.
- if (AMDGPU::getGlobalSaddrOp(MI.getOpcode()) < 0)
- return false;
- if (MI.mayLoad() && TII->getNamedOperand(MI, AMDGPU::OpName::vdata) != NULL)
- return false;
- if (AnchorList.count(&MI))
- return false;
- LLVM_DEBUG(dbgs() << "\nTryToPromoteConstantOffsetToImmFor "; MI.dump());
- if (TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm()) {
- LLVM_DEBUG(dbgs() << " Const-offset is already promoted.\n";);
- return false;
- }
- // Step1: Find the base-registers and a 64bit constant offset.
- MachineOperand &Base = *TII->getNamedOperand(MI, AMDGPU::OpName::vaddr);
- MemAddress MAddr;
- if (Visited.find(&MI) == Visited.end()) {
- processBaseWithConstOffset(Base, MAddr);
- Visited[&MI] = MAddr;
- } else
- MAddr = Visited[&MI];
- if (MAddr.Offset == 0) {
- LLVM_DEBUG(dbgs() << " Failed to extract constant-offset or there are no"
- " constant offsets that can be promoted.\n";);
- return false;
- }
- LLVM_DEBUG(dbgs() << " BASE: {" << MAddr.Base.HiReg << ", "
- << MAddr.Base.LoReg << "} Offset: " << MAddr.Offset << "\n\n";);
- // Step2: Traverse through MI's basic block and find an anchor(that has the
- // same base-registers) with the highest 13bit distance from MI's offset.
- // E.g. (64bit loads)
- // bb:
- // addr1 = &a + 4096; load1 = load(addr1, 0)
- // addr2 = &a + 6144; load2 = load(addr2, 0)
- // addr3 = &a + 8192; load3 = load(addr3, 0)
- // addr4 = &a + 10240; load4 = load(addr4, 0)
- // addr5 = &a + 12288; load5 = load(addr5, 0)
- //
- // Starting from the first load, the optimization will try to find a new base
- // from which (&a + 4096) has 13 bit distance. Both &a + 6144 and &a + 8192
- // has 13bit distance from &a + 4096. The heuristic considers &a + 8192
- // as the new-base(anchor) because of the maximum distance which can
- // accomodate more intermediate bases presumeably.
- //
- // Step3: move (&a + 8192) above load1. Compute and promote offsets from
- // (&a + 8192) for load1, load2, load4.
- // addr = &a + 8192
- // load1 = load(addr, -4096)
- // load2 = load(addr, -2048)
- // load3 = load(addr, 0)
- // load4 = load(addr, 2048)
- // addr5 = &a + 12288; load5 = load(addr5, 0)
- //
- MachineInstr *AnchorInst = nullptr;
- MemAddress AnchorAddr;
- uint32_t MaxDist = std::numeric_limits<uint32_t>::min();
- SmallVector<std::pair<MachineInstr *, int64_t>, 4> InstsWCommonBase;
- MachineBasicBlock *MBB = MI.getParent();
- MachineBasicBlock::iterator E = MBB->end();
- MachineBasicBlock::iterator MBBI = MI.getIterator();
- ++MBBI;
- const SITargetLowering *TLI =
- static_cast<const SITargetLowering *>(STM->getTargetLowering());
- for ( ; MBBI != E; ++MBBI) {
- MachineInstr &MINext = *MBBI;
- // TODO: Support finding an anchor(with same base) from store addresses or
- // any other load addresses where the opcodes are different.
- if (MINext.getOpcode() != MI.getOpcode() ||
- TII->getNamedOperand(MINext, AMDGPU::OpName::offset)->getImm())
- continue;
- const MachineOperand &BaseNext =
- *TII->getNamedOperand(MINext, AMDGPU::OpName::vaddr);
- MemAddress MAddrNext;
- if (Visited.find(&MINext) == Visited.end()) {
- processBaseWithConstOffset(BaseNext, MAddrNext);
- Visited[&MINext] = MAddrNext;
- } else
- MAddrNext = Visited[&MINext];
- if (MAddrNext.Base.LoReg != MAddr.Base.LoReg ||
- MAddrNext.Base.HiReg != MAddr.Base.HiReg ||
- MAddrNext.Base.LoSubReg != MAddr.Base.LoSubReg ||
- MAddrNext.Base.HiSubReg != MAddr.Base.HiSubReg)
- continue;
- InstsWCommonBase.push_back(std::make_pair(&MINext, MAddrNext.Offset));
- int64_t Dist = MAddr.Offset - MAddrNext.Offset;
- TargetLoweringBase::AddrMode AM;
- AM.HasBaseReg = true;
- AM.BaseOffs = Dist;
- if (TLI->isLegalGlobalAddressingMode(AM) &&
- (uint32_t)std::abs(Dist) > MaxDist) {
- MaxDist = std::abs(Dist);
- AnchorAddr = MAddrNext;
- AnchorInst = &MINext;
- }
- }
- if (AnchorInst) {
- LLVM_DEBUG(dbgs() << " Anchor-Inst(with max-distance from Offset): ";
- AnchorInst->dump());
- LLVM_DEBUG(dbgs() << " Anchor-Offset from BASE: "
- << AnchorAddr.Offset << "\n\n");
- // Instead of moving up, just re-compute anchor-instruction's base address.
- unsigned Base = computeBase(MI, AnchorAddr);
- updateBaseAndOffset(MI, Base, MAddr.Offset - AnchorAddr.Offset);
- LLVM_DEBUG(dbgs() << " After promotion: "; MI.dump(););
- for (auto P : InstsWCommonBase) {
- TargetLoweringBase::AddrMode AM;
- AM.HasBaseReg = true;
- AM.BaseOffs = P.second - AnchorAddr.Offset;
- if (TLI->isLegalGlobalAddressingMode(AM)) {
- LLVM_DEBUG(dbgs() << " Promote Offset(" << P.second;
- dbgs() << ")"; P.first->dump());
- updateBaseAndOffset(*P.first, Base, P.second - AnchorAddr.Offset);
- LLVM_DEBUG(dbgs() << " After promotion: "; P.first->dump());
- }
- }
- AnchorList.insert(AnchorInst);
- return true;
- }
- return false;
- }
- // Scan through looking for adjacent LDS operations with constant offsets from
- // the same base register. We rely on the scheduler to do the hard work of
- // clustering nearby loads, and assume these are all adjacent.
- bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
- bool Modified = false;
- // Contain the list
- MemInfoMap Visited;
- // Contains the list of instructions for which constant offsets are being
- // promoted to the IMM.
- SmallPtrSet<MachineInstr *, 4> AnchorList;
- for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
- MachineInstr &MI = *I;
- if (promoteConstantOffsetToImm(MI, Visited, AnchorList))
- Modified = true;
- // Don't combine if volatile.
- if (MI.hasOrderedMemoryRef()) {
- ++I;
- continue;
- }
- const unsigned Opc = MI.getOpcode();
- CombineInfo CI;
- CI.I = I;
- CI.InstClass = getInstClass(Opc);
- switch (CI.InstClass) {
- default:
- break;
- case DS_READ:
- CI.EltSize =
- (Opc == AMDGPU::DS_READ_B64 || Opc == AMDGPU::DS_READ_B64_gfx9) ? 8
- : 4;
- if (findMatchingInst(CI)) {
- Modified = true;
- I = mergeRead2Pair(CI);
- } else {
- ++I;
- }
- continue;
- case DS_WRITE:
- CI.EltSize =
- (Opc == AMDGPU::DS_WRITE_B64 || Opc == AMDGPU::DS_WRITE_B64_gfx9) ? 8
- : 4;
- if (findMatchingInst(CI)) {
- Modified = true;
- I = mergeWrite2Pair(CI);
- } else {
- ++I;
- }
- continue;
- case S_BUFFER_LOAD_IMM:
- CI.EltSize = AMDGPU::getSMRDEncodedOffset(*STM, 4);
- if (findMatchingInst(CI)) {
- Modified = true;
- I = mergeSBufferLoadImmPair(CI);
- OptimizeAgain |= (CI.Width0 + CI.Width1) < 16;
- } else {
- ++I;
- }
- continue;
- case BUFFER_LOAD_OFFEN:
- case BUFFER_LOAD_OFFSET:
- case BUFFER_LOAD_OFFEN_exact:
- case BUFFER_LOAD_OFFSET_exact:
- CI.EltSize = 4;
- if (findMatchingInst(CI)) {
- Modified = true;
- I = mergeBufferLoadPair(CI);
- OptimizeAgain |= (CI.Width0 + CI.Width1) < 4;
- } else {
- ++I;
- }
- continue;
- case BUFFER_STORE_OFFEN:
- case BUFFER_STORE_OFFSET:
- case BUFFER_STORE_OFFEN_exact:
- case BUFFER_STORE_OFFSET_exact:
- CI.EltSize = 4;
- if (findMatchingInst(CI)) {
- Modified = true;
- I = mergeBufferStorePair(CI);
- OptimizeAgain |= (CI.Width0 + CI.Width1) < 4;
- } else {
- ++I;
- }
- continue;
- }
- ++I;
- }
- return Modified;
- }
- bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(MF.getFunction()))
- return false;
- STM = &MF.getSubtarget<GCNSubtarget>();
- if (!STM->loadStoreOptEnabled())
- return false;
- TII = STM->getInstrInfo();
- TRI = &TII->getRegisterInfo();
- MRI = &MF.getRegInfo();
- AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
- assert(MRI->isSSA() && "Must be run on SSA");
- LLVM_DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
- bool Modified = false;
- for (MachineBasicBlock &MBB : MF) {
- do {
- OptimizeAgain = false;
- Modified |= optimizeBlock(MBB);
- } while (OptimizeAgain);
- }
- return Modified;
- }
|