Scalarizer.cpp 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824
  1. //===- Scalarizer.cpp - Scalarize vector operations -----------------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This pass converts vector operations into scalar operations, in order
  11. // to expose optimization opportunities on the individual scalar operations.
  12. // It is mainly intended for targets that do not have vector units, but it
  13. // may also be useful for revectorizing code to different vector widths.
  14. //
  15. //===----------------------------------------------------------------------===//
  16. #include "llvm/ADT/PostOrderIterator.h"
  17. #include "llvm/ADT/SmallVector.h"
  18. #include "llvm/ADT/Twine.h"
  19. #include "llvm/Analysis/VectorUtils.h"
  20. #include "llvm/IR/Argument.h"
  21. #include "llvm/IR/BasicBlock.h"
  22. #include "llvm/IR/Constants.h"
  23. #include "llvm/IR/DataLayout.h"
  24. #include "llvm/IR/DerivedTypes.h"
  25. #include "llvm/IR/Function.h"
  26. #include "llvm/IR/IRBuilder.h"
  27. #include "llvm/IR/InstVisitor.h"
  28. #include "llvm/IR/InstrTypes.h"
  29. #include "llvm/IR/Instruction.h"
  30. #include "llvm/IR/Instructions.h"
  31. #include "llvm/IR/Intrinsics.h"
  32. #include "llvm/IR/LLVMContext.h"
  33. #include "llvm/IR/Module.h"
  34. #include "llvm/IR/Type.h"
  35. #include "llvm/IR/Value.h"
  36. #include "llvm/Pass.h"
  37. #include "llvm/Support/Casting.h"
  38. #include "llvm/Support/MathExtras.h"
  39. #include "llvm/Support/Options.h"
  40. #include "llvm/Transforms/Scalar.h"
  41. #include "llvm/Transforms/Scalar/Scalarizer.h"
  42. #include <cassert>
  43. #include <cstdint>
  44. #include <iterator>
  45. #include <map>
  46. #include <utility>
  47. using namespace llvm;
  48. #define DEBUG_TYPE "scalarizer"
  49. // This is disabled by default because having separate loads and stores
  50. // makes it more likely that the -combiner-alias-analysis limits will be
  51. // reached.
  52. static cl::opt<bool>
  53. ScalarizeLoadStore("scalarize-load-store", cl::init(false), cl::Hidden,
  54. cl::desc("Allow the scalarizer pass to scalarize loads and store"));
  55. namespace {
  56. // Used to store the scattered form of a vector.
  57. using ValueVector = SmallVector<Value *, 8>;
  58. // Used to map a vector Value to its scattered form. We use std::map
  59. // because we want iterators to persist across insertion and because the
  60. // values are relatively large.
  61. using ScatterMap = std::map<Value *, ValueVector>;
  62. // Lists Instructions that have been replaced with scalar implementations,
  63. // along with a pointer to their scattered forms.
  64. using GatherList = SmallVector<std::pair<Instruction *, ValueVector *>, 16>;
  65. // Provides a very limited vector-like interface for lazily accessing one
  66. // component of a scattered vector or vector pointer.
  67. class Scatterer {
  68. public:
  69. Scatterer() = default;
  70. // Scatter V into Size components. If new instructions are needed,
  71. // insert them before BBI in BB. If Cache is nonnull, use it to cache
  72. // the results.
  73. Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v,
  74. ValueVector *cachePtr = nullptr);
  75. // Return component I, creating a new Value for it if necessary.
  76. Value *operator[](unsigned I);
  77. // Return the number of components.
  78. unsigned size() const { return Size; }
  79. private:
  80. BasicBlock *BB;
  81. BasicBlock::iterator BBI;
  82. Value *V;
  83. ValueVector *CachePtr;
  84. PointerType *PtrTy;
  85. ValueVector Tmp;
  86. unsigned Size;
  87. };
  88. // FCmpSpliiter(FCI)(Builder, X, Y, Name) uses Builder to create an FCmp
  89. // called Name that compares X and Y in the same way as FCI.
  90. struct FCmpSplitter {
  91. FCmpSplitter(FCmpInst &fci) : FCI(fci) {}
  92. Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1,
  93. const Twine &Name) const {
  94. return Builder.CreateFCmp(FCI.getPredicate(), Op0, Op1, Name);
  95. }
  96. FCmpInst &FCI;
  97. };
  98. // ICmpSpliiter(ICI)(Builder, X, Y, Name) uses Builder to create an ICmp
  99. // called Name that compares X and Y in the same way as ICI.
  100. struct ICmpSplitter {
  101. ICmpSplitter(ICmpInst &ici) : ICI(ici) {}
  102. Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1,
  103. const Twine &Name) const {
  104. return Builder.CreateICmp(ICI.getPredicate(), Op0, Op1, Name);
  105. }
  106. ICmpInst &ICI;
  107. };
  108. // BinarySpliiter(BO)(Builder, X, Y, Name) uses Builder to create
  109. // a binary operator like BO called Name with operands X and Y.
  110. struct BinarySplitter {
  111. BinarySplitter(BinaryOperator &bo) : BO(bo) {}
  112. Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1,
  113. const Twine &Name) const {
  114. return Builder.CreateBinOp(BO.getOpcode(), Op0, Op1, Name);
  115. }
  116. BinaryOperator &BO;
  117. };
  118. // Information about a load or store that we're scalarizing.
  119. struct VectorLayout {
  120. VectorLayout() = default;
  121. // Return the alignment of element I.
  122. uint64_t getElemAlign(unsigned I) {
  123. return MinAlign(VecAlign, I * ElemSize);
  124. }
  125. // The type of the vector.
  126. VectorType *VecTy = nullptr;
  127. // The type of each element.
  128. Type *ElemTy = nullptr;
  129. // The alignment of the vector.
  130. uint64_t VecAlign = 0;
  131. // The size of each element.
  132. uint64_t ElemSize = 0;
  133. };
  134. class ScalarizerVisitor : public InstVisitor<ScalarizerVisitor, bool> {
  135. public:
  136. ScalarizerVisitor(unsigned ParallelLoopAccessMDKind)
  137. : ParallelLoopAccessMDKind(ParallelLoopAccessMDKind) {
  138. }
  139. bool visit(Function &F);
  140. // InstVisitor methods. They return true if the instruction was scalarized,
  141. // false if nothing changed.
  142. bool visitInstruction(Instruction &I) { return false; }
  143. bool visitSelectInst(SelectInst &SI);
  144. bool visitICmpInst(ICmpInst &ICI);
  145. bool visitFCmpInst(FCmpInst &FCI);
  146. bool visitBinaryOperator(BinaryOperator &BO);
  147. bool visitGetElementPtrInst(GetElementPtrInst &GEPI);
  148. bool visitCastInst(CastInst &CI);
  149. bool visitBitCastInst(BitCastInst &BCI);
  150. bool visitShuffleVectorInst(ShuffleVectorInst &SVI);
  151. bool visitPHINode(PHINode &PHI);
  152. bool visitLoadInst(LoadInst &LI);
  153. bool visitStoreInst(StoreInst &SI);
  154. bool visitCallInst(CallInst &ICI);
  155. private:
  156. Scatterer scatter(Instruction *Point, Value *V);
  157. void gather(Instruction *Op, const ValueVector &CV);
  158. bool canTransferMetadata(unsigned Kind);
  159. void transferMetadata(Instruction *Op, const ValueVector &CV);
  160. bool getVectorLayout(Type *Ty, unsigned Alignment, VectorLayout &Layout,
  161. const DataLayout &DL);
  162. bool finish();
  163. template<typename T> bool splitBinary(Instruction &, const T &);
  164. bool splitCall(CallInst &CI);
  165. ScatterMap Scattered;
  166. GatherList Gathered;
  167. unsigned ParallelLoopAccessMDKind;
  168. };
  169. class ScalarizerLegacyPass : public FunctionPass {
  170. public:
  171. static char ID;
  172. ScalarizerLegacyPass() : FunctionPass(ID) {
  173. initializeScalarizerLegacyPassPass(*PassRegistry::getPassRegistry());
  174. }
  175. bool runOnFunction(Function &F) override;
  176. };
  177. } // end anonymous namespace
  178. char ScalarizerLegacyPass::ID = 0;
  179. INITIALIZE_PASS_BEGIN(ScalarizerLegacyPass, "scalarizer",
  180. "Scalarize vector operations", false, false)
  181. INITIALIZE_PASS_END(ScalarizerLegacyPass, "scalarizer",
  182. "Scalarize vector operations", false, false)
  183. Scatterer::Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v,
  184. ValueVector *cachePtr)
  185. : BB(bb), BBI(bbi), V(v), CachePtr(cachePtr) {
  186. Type *Ty = V->getType();
  187. PtrTy = dyn_cast<PointerType>(Ty);
  188. if (PtrTy)
  189. Ty = PtrTy->getElementType();
  190. Size = Ty->getVectorNumElements();
  191. if (!CachePtr)
  192. Tmp.resize(Size, nullptr);
  193. else if (CachePtr->empty())
  194. CachePtr->resize(Size, nullptr);
  195. else
  196. assert(Size == CachePtr->size() && "Inconsistent vector sizes");
  197. }
  198. // Return component I, creating a new Value for it if necessary.
  199. Value *Scatterer::operator[](unsigned I) {
  200. ValueVector &CV = (CachePtr ? *CachePtr : Tmp);
  201. // Try to reuse a previous value.
  202. if (CV[I])
  203. return CV[I];
  204. IRBuilder<> Builder(BB, BBI);
  205. if (PtrTy) {
  206. if (!CV[0]) {
  207. Type *Ty =
  208. PointerType::get(PtrTy->getElementType()->getVectorElementType(),
  209. PtrTy->getAddressSpace());
  210. CV[0] = Builder.CreateBitCast(V, Ty, V->getName() + ".i0");
  211. }
  212. if (I != 0)
  213. CV[I] = Builder.CreateConstGEP1_32(nullptr, CV[0], I,
  214. V->getName() + ".i" + Twine(I));
  215. } else {
  216. // Search through a chain of InsertElementInsts looking for element I.
  217. // Record other elements in the cache. The new V is still suitable
  218. // for all uncached indices.
  219. while (true) {
  220. InsertElementInst *Insert = dyn_cast<InsertElementInst>(V);
  221. if (!Insert)
  222. break;
  223. ConstantInt *Idx = dyn_cast<ConstantInt>(Insert->getOperand(2));
  224. if (!Idx)
  225. break;
  226. unsigned J = Idx->getZExtValue();
  227. V = Insert->getOperand(0);
  228. if (I == J) {
  229. CV[J] = Insert->getOperand(1);
  230. return CV[J];
  231. } else if (!CV[J]) {
  232. // Only cache the first entry we find for each index we're not actively
  233. // searching for. This prevents us from going too far up the chain and
  234. // caching incorrect entries.
  235. CV[J] = Insert->getOperand(1);
  236. }
  237. }
  238. CV[I] = Builder.CreateExtractElement(V, Builder.getInt32(I),
  239. V->getName() + ".i" + Twine(I));
  240. }
  241. return CV[I];
  242. }
  243. bool ScalarizerLegacyPass::runOnFunction(Function &F) {
  244. if (skipFunction(F))
  245. return false;
  246. Module &M = *F.getParent();
  247. unsigned ParallelLoopAccessMDKind =
  248. M.getContext().getMDKindID("llvm.mem.parallel_loop_access");
  249. ScalarizerVisitor Impl(ParallelLoopAccessMDKind);
  250. return Impl.visit(F);
  251. }
  252. FunctionPass *llvm::createScalarizerPass() {
  253. return new ScalarizerLegacyPass();
  254. }
  255. bool ScalarizerVisitor::visit(Function &F) {
  256. assert(Gathered.empty() && Scattered.empty());
  257. // To ensure we replace gathered components correctly we need to do an ordered
  258. // traversal of the basic blocks in the function.
  259. ReversePostOrderTraversal<BasicBlock *> RPOT(&F.getEntryBlock());
  260. for (BasicBlock *BB : RPOT) {
  261. for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
  262. Instruction *I = &*II;
  263. bool Done = InstVisitor::visit(I);
  264. ++II;
  265. if (Done && I->getType()->isVoidTy())
  266. I->eraseFromParent();
  267. }
  268. }
  269. return finish();
  270. }
  271. // Return a scattered form of V that can be accessed by Point. V must be a
  272. // vector or a pointer to a vector.
  273. Scatterer ScalarizerVisitor::scatter(Instruction *Point, Value *V) {
  274. if (Argument *VArg = dyn_cast<Argument>(V)) {
  275. // Put the scattered form of arguments in the entry block,
  276. // so that it can be used everywhere.
  277. Function *F = VArg->getParent();
  278. BasicBlock *BB = &F->getEntryBlock();
  279. return Scatterer(BB, BB->begin(), V, &Scattered[V]);
  280. }
  281. if (Instruction *VOp = dyn_cast<Instruction>(V)) {
  282. // Put the scattered form of an instruction directly after the
  283. // instruction.
  284. BasicBlock *BB = VOp->getParent();
  285. return Scatterer(BB, std::next(BasicBlock::iterator(VOp)),
  286. V, &Scattered[V]);
  287. }
  288. // In the fallback case, just put the scattered before Point and
  289. // keep the result local to Point.
  290. return Scatterer(Point->getParent(), Point->getIterator(), V);
  291. }
  292. // Replace Op with the gathered form of the components in CV. Defer the
  293. // deletion of Op and creation of the gathered form to the end of the pass,
  294. // so that we can avoid creating the gathered form if all uses of Op are
  295. // replaced with uses of CV.
  296. void ScalarizerVisitor::gather(Instruction *Op, const ValueVector &CV) {
  297. // Since we're not deleting Op yet, stub out its operands, so that it
  298. // doesn't make anything live unnecessarily.
  299. for (unsigned I = 0, E = Op->getNumOperands(); I != E; ++I)
  300. Op->setOperand(I, UndefValue::get(Op->getOperand(I)->getType()));
  301. transferMetadata(Op, CV);
  302. // If we already have a scattered form of Op (created from ExtractElements
  303. // of Op itself), replace them with the new form.
  304. ValueVector &SV = Scattered[Op];
  305. if (!SV.empty()) {
  306. for (unsigned I = 0, E = SV.size(); I != E; ++I) {
  307. Value *V = SV[I];
  308. if (V == nullptr)
  309. continue;
  310. Instruction *Old = cast<Instruction>(V);
  311. CV[I]->takeName(Old);
  312. Old->replaceAllUsesWith(CV[I]);
  313. Old->eraseFromParent();
  314. }
  315. }
  316. SV = CV;
  317. Gathered.push_back(GatherList::value_type(Op, &SV));
  318. }
  319. // Return true if it is safe to transfer the given metadata tag from
  320. // vector to scalar instructions.
  321. bool ScalarizerVisitor::canTransferMetadata(unsigned Tag) {
  322. return (Tag == LLVMContext::MD_tbaa
  323. || Tag == LLVMContext::MD_fpmath
  324. || Tag == LLVMContext::MD_tbaa_struct
  325. || Tag == LLVMContext::MD_invariant_load
  326. || Tag == LLVMContext::MD_alias_scope
  327. || Tag == LLVMContext::MD_noalias
  328. || Tag == ParallelLoopAccessMDKind
  329. || Tag == LLVMContext::MD_access_group);
  330. }
  331. // Transfer metadata from Op to the instructions in CV if it is known
  332. // to be safe to do so.
  333. void ScalarizerVisitor::transferMetadata(Instruction *Op, const ValueVector &CV) {
  334. SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
  335. Op->getAllMetadataOtherThanDebugLoc(MDs);
  336. for (unsigned I = 0, E = CV.size(); I != E; ++I) {
  337. if (Instruction *New = dyn_cast<Instruction>(CV[I])) {
  338. for (const auto &MD : MDs)
  339. if (canTransferMetadata(MD.first))
  340. New->setMetadata(MD.first, MD.second);
  341. if (Op->getDebugLoc() && !New->getDebugLoc())
  342. New->setDebugLoc(Op->getDebugLoc());
  343. }
  344. }
  345. }
  346. // Try to fill in Layout from Ty, returning true on success. Alignment is
  347. // the alignment of the vector, or 0 if the ABI default should be used.
  348. bool ScalarizerVisitor::getVectorLayout(Type *Ty, unsigned Alignment,
  349. VectorLayout &Layout, const DataLayout &DL) {
  350. // Make sure we're dealing with a vector.
  351. Layout.VecTy = dyn_cast<VectorType>(Ty);
  352. if (!Layout.VecTy)
  353. return false;
  354. // Check that we're dealing with full-byte elements.
  355. Layout.ElemTy = Layout.VecTy->getElementType();
  356. if (DL.getTypeSizeInBits(Layout.ElemTy) !=
  357. DL.getTypeStoreSizeInBits(Layout.ElemTy))
  358. return false;
  359. if (Alignment)
  360. Layout.VecAlign = Alignment;
  361. else
  362. Layout.VecAlign = DL.getABITypeAlignment(Layout.VecTy);
  363. Layout.ElemSize = DL.getTypeStoreSize(Layout.ElemTy);
  364. return true;
  365. }
  366. // Scalarize two-operand instruction I, using Split(Builder, X, Y, Name)
  367. // to create an instruction like I with operands X and Y and name Name.
  368. template<typename Splitter>
  369. bool ScalarizerVisitor::splitBinary(Instruction &I, const Splitter &Split) {
  370. VectorType *VT = dyn_cast<VectorType>(I.getType());
  371. if (!VT)
  372. return false;
  373. unsigned NumElems = VT->getNumElements();
  374. IRBuilder<> Builder(&I);
  375. Scatterer Op0 = scatter(&I, I.getOperand(0));
  376. Scatterer Op1 = scatter(&I, I.getOperand(1));
  377. assert(Op0.size() == NumElems && "Mismatched binary operation");
  378. assert(Op1.size() == NumElems && "Mismatched binary operation");
  379. ValueVector Res;
  380. Res.resize(NumElems);
  381. for (unsigned Elem = 0; Elem < NumElems; ++Elem)
  382. Res[Elem] = Split(Builder, Op0[Elem], Op1[Elem],
  383. I.getName() + ".i" + Twine(Elem));
  384. gather(&I, Res);
  385. return true;
  386. }
  387. static bool isTriviallyScalariable(Intrinsic::ID ID) {
  388. return isTriviallyVectorizable(ID);
  389. }
  390. // All of the current scalarizable intrinsics only have one mangled type.
  391. static Function *getScalarIntrinsicDeclaration(Module *M,
  392. Intrinsic::ID ID,
  393. VectorType *Ty) {
  394. return Intrinsic::getDeclaration(M, ID, { Ty->getScalarType() });
  395. }
  396. /// If a call to a vector typed intrinsic function, split into a scalar call per
  397. /// element if possible for the intrinsic.
  398. bool ScalarizerVisitor::splitCall(CallInst &CI) {
  399. VectorType *VT = dyn_cast<VectorType>(CI.getType());
  400. if (!VT)
  401. return false;
  402. Function *F = CI.getCalledFunction();
  403. if (!F)
  404. return false;
  405. Intrinsic::ID ID = F->getIntrinsicID();
  406. if (ID == Intrinsic::not_intrinsic || !isTriviallyScalariable(ID))
  407. return false;
  408. unsigned NumElems = VT->getNumElements();
  409. unsigned NumArgs = CI.getNumArgOperands();
  410. ValueVector ScalarOperands(NumArgs);
  411. SmallVector<Scatterer, 8> Scattered(NumArgs);
  412. Scattered.resize(NumArgs);
  413. // Assumes that any vector type has the same number of elements as the return
  414. // vector type, which is true for all current intrinsics.
  415. for (unsigned I = 0; I != NumArgs; ++I) {
  416. Value *OpI = CI.getOperand(I);
  417. if (OpI->getType()->isVectorTy()) {
  418. Scattered[I] = scatter(&CI, OpI);
  419. assert(Scattered[I].size() == NumElems && "mismatched call operands");
  420. } else {
  421. ScalarOperands[I] = OpI;
  422. }
  423. }
  424. ValueVector Res(NumElems);
  425. ValueVector ScalarCallOps(NumArgs);
  426. Function *NewIntrin = getScalarIntrinsicDeclaration(F->getParent(), ID, VT);
  427. IRBuilder<> Builder(&CI);
  428. // Perform actual scalarization, taking care to preserve any scalar operands.
  429. for (unsigned Elem = 0; Elem < NumElems; ++Elem) {
  430. ScalarCallOps.clear();
  431. for (unsigned J = 0; J != NumArgs; ++J) {
  432. if (hasVectorInstrinsicScalarOpd(ID, J))
  433. ScalarCallOps.push_back(ScalarOperands[J]);
  434. else
  435. ScalarCallOps.push_back(Scattered[J][Elem]);
  436. }
  437. Res[Elem] = Builder.CreateCall(NewIntrin, ScalarCallOps,
  438. CI.getName() + ".i" + Twine(Elem));
  439. }
  440. gather(&CI, Res);
  441. return true;
  442. }
  443. bool ScalarizerVisitor::visitSelectInst(SelectInst &SI) {
  444. VectorType *VT = dyn_cast<VectorType>(SI.getType());
  445. if (!VT)
  446. return false;
  447. unsigned NumElems = VT->getNumElements();
  448. IRBuilder<> Builder(&SI);
  449. Scatterer Op1 = scatter(&SI, SI.getOperand(1));
  450. Scatterer Op2 = scatter(&SI, SI.getOperand(2));
  451. assert(Op1.size() == NumElems && "Mismatched select");
  452. assert(Op2.size() == NumElems && "Mismatched select");
  453. ValueVector Res;
  454. Res.resize(NumElems);
  455. if (SI.getOperand(0)->getType()->isVectorTy()) {
  456. Scatterer Op0 = scatter(&SI, SI.getOperand(0));
  457. assert(Op0.size() == NumElems && "Mismatched select");
  458. for (unsigned I = 0; I < NumElems; ++I)
  459. Res[I] = Builder.CreateSelect(Op0[I], Op1[I], Op2[I],
  460. SI.getName() + ".i" + Twine(I));
  461. } else {
  462. Value *Op0 = SI.getOperand(0);
  463. for (unsigned I = 0; I < NumElems; ++I)
  464. Res[I] = Builder.CreateSelect(Op0, Op1[I], Op2[I],
  465. SI.getName() + ".i" + Twine(I));
  466. }
  467. gather(&SI, Res);
  468. return true;
  469. }
  470. bool ScalarizerVisitor::visitICmpInst(ICmpInst &ICI) {
  471. return splitBinary(ICI, ICmpSplitter(ICI));
  472. }
  473. bool ScalarizerVisitor::visitFCmpInst(FCmpInst &FCI) {
  474. return splitBinary(FCI, FCmpSplitter(FCI));
  475. }
  476. bool ScalarizerVisitor::visitBinaryOperator(BinaryOperator &BO) {
  477. return splitBinary(BO, BinarySplitter(BO));
  478. }
  479. bool ScalarizerVisitor::visitGetElementPtrInst(GetElementPtrInst &GEPI) {
  480. VectorType *VT = dyn_cast<VectorType>(GEPI.getType());
  481. if (!VT)
  482. return false;
  483. IRBuilder<> Builder(&GEPI);
  484. unsigned NumElems = VT->getNumElements();
  485. unsigned NumIndices = GEPI.getNumIndices();
  486. // The base pointer might be scalar even if it's a vector GEP. In those cases,
  487. // splat the pointer into a vector value, and scatter that vector.
  488. Value *Op0 = GEPI.getOperand(0);
  489. if (!Op0->getType()->isVectorTy())
  490. Op0 = Builder.CreateVectorSplat(NumElems, Op0);
  491. Scatterer Base = scatter(&GEPI, Op0);
  492. SmallVector<Scatterer, 8> Ops;
  493. Ops.resize(NumIndices);
  494. for (unsigned I = 0; I < NumIndices; ++I) {
  495. Value *Op = GEPI.getOperand(I + 1);
  496. // The indices might be scalars even if it's a vector GEP. In those cases,
  497. // splat the scalar into a vector value, and scatter that vector.
  498. if (!Op->getType()->isVectorTy())
  499. Op = Builder.CreateVectorSplat(NumElems, Op);
  500. Ops[I] = scatter(&GEPI, Op);
  501. }
  502. ValueVector Res;
  503. Res.resize(NumElems);
  504. for (unsigned I = 0; I < NumElems; ++I) {
  505. SmallVector<Value *, 8> Indices;
  506. Indices.resize(NumIndices);
  507. for (unsigned J = 0; J < NumIndices; ++J)
  508. Indices[J] = Ops[J][I];
  509. Res[I] = Builder.CreateGEP(GEPI.getSourceElementType(), Base[I], Indices,
  510. GEPI.getName() + ".i" + Twine(I));
  511. if (GEPI.isInBounds())
  512. if (GetElementPtrInst *NewGEPI = dyn_cast<GetElementPtrInst>(Res[I]))
  513. NewGEPI->setIsInBounds();
  514. }
  515. gather(&GEPI, Res);
  516. return true;
  517. }
  518. bool ScalarizerVisitor::visitCastInst(CastInst &CI) {
  519. VectorType *VT = dyn_cast<VectorType>(CI.getDestTy());
  520. if (!VT)
  521. return false;
  522. unsigned NumElems = VT->getNumElements();
  523. IRBuilder<> Builder(&CI);
  524. Scatterer Op0 = scatter(&CI, CI.getOperand(0));
  525. assert(Op0.size() == NumElems && "Mismatched cast");
  526. ValueVector Res;
  527. Res.resize(NumElems);
  528. for (unsigned I = 0; I < NumElems; ++I)
  529. Res[I] = Builder.CreateCast(CI.getOpcode(), Op0[I], VT->getElementType(),
  530. CI.getName() + ".i" + Twine(I));
  531. gather(&CI, Res);
  532. return true;
  533. }
  534. bool ScalarizerVisitor::visitBitCastInst(BitCastInst &BCI) {
  535. VectorType *DstVT = dyn_cast<VectorType>(BCI.getDestTy());
  536. VectorType *SrcVT = dyn_cast<VectorType>(BCI.getSrcTy());
  537. if (!DstVT || !SrcVT)
  538. return false;
  539. unsigned DstNumElems = DstVT->getNumElements();
  540. unsigned SrcNumElems = SrcVT->getNumElements();
  541. IRBuilder<> Builder(&BCI);
  542. Scatterer Op0 = scatter(&BCI, BCI.getOperand(0));
  543. ValueVector Res;
  544. Res.resize(DstNumElems);
  545. if (DstNumElems == SrcNumElems) {
  546. for (unsigned I = 0; I < DstNumElems; ++I)
  547. Res[I] = Builder.CreateBitCast(Op0[I], DstVT->getElementType(),
  548. BCI.getName() + ".i" + Twine(I));
  549. } else if (DstNumElems > SrcNumElems) {
  550. // <M x t1> -> <N*M x t2>. Convert each t1 to <N x t2> and copy the
  551. // individual elements to the destination.
  552. unsigned FanOut = DstNumElems / SrcNumElems;
  553. Type *MidTy = VectorType::get(DstVT->getElementType(), FanOut);
  554. unsigned ResI = 0;
  555. for (unsigned Op0I = 0; Op0I < SrcNumElems; ++Op0I) {
  556. Value *V = Op0[Op0I];
  557. Instruction *VI;
  558. // Look through any existing bitcasts before converting to <N x t2>.
  559. // In the best case, the resulting conversion might be a no-op.
  560. while ((VI = dyn_cast<Instruction>(V)) &&
  561. VI->getOpcode() == Instruction::BitCast)
  562. V = VI->getOperand(0);
  563. V = Builder.CreateBitCast(V, MidTy, V->getName() + ".cast");
  564. Scatterer Mid = scatter(&BCI, V);
  565. for (unsigned MidI = 0; MidI < FanOut; ++MidI)
  566. Res[ResI++] = Mid[MidI];
  567. }
  568. } else {
  569. // <N*M x t1> -> <M x t2>. Convert each group of <N x t1> into a t2.
  570. unsigned FanIn = SrcNumElems / DstNumElems;
  571. Type *MidTy = VectorType::get(SrcVT->getElementType(), FanIn);
  572. unsigned Op0I = 0;
  573. for (unsigned ResI = 0; ResI < DstNumElems; ++ResI) {
  574. Value *V = UndefValue::get(MidTy);
  575. for (unsigned MidI = 0; MidI < FanIn; ++MidI)
  576. V = Builder.CreateInsertElement(V, Op0[Op0I++], Builder.getInt32(MidI),
  577. BCI.getName() + ".i" + Twine(ResI)
  578. + ".upto" + Twine(MidI));
  579. Res[ResI] = Builder.CreateBitCast(V, DstVT->getElementType(),
  580. BCI.getName() + ".i" + Twine(ResI));
  581. }
  582. }
  583. gather(&BCI, Res);
  584. return true;
  585. }
  586. bool ScalarizerVisitor::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
  587. VectorType *VT = dyn_cast<VectorType>(SVI.getType());
  588. if (!VT)
  589. return false;
  590. unsigned NumElems = VT->getNumElements();
  591. Scatterer Op0 = scatter(&SVI, SVI.getOperand(0));
  592. Scatterer Op1 = scatter(&SVI, SVI.getOperand(1));
  593. ValueVector Res;
  594. Res.resize(NumElems);
  595. for (unsigned I = 0; I < NumElems; ++I) {
  596. int Selector = SVI.getMaskValue(I);
  597. if (Selector < 0)
  598. Res[I] = UndefValue::get(VT->getElementType());
  599. else if (unsigned(Selector) < Op0.size())
  600. Res[I] = Op0[Selector];
  601. else
  602. Res[I] = Op1[Selector - Op0.size()];
  603. }
  604. gather(&SVI, Res);
  605. return true;
  606. }
  607. bool ScalarizerVisitor::visitPHINode(PHINode &PHI) {
  608. VectorType *VT = dyn_cast<VectorType>(PHI.getType());
  609. if (!VT)
  610. return false;
  611. unsigned NumElems = VT->getNumElements();
  612. IRBuilder<> Builder(&PHI);
  613. ValueVector Res;
  614. Res.resize(NumElems);
  615. unsigned NumOps = PHI.getNumOperands();
  616. for (unsigned I = 0; I < NumElems; ++I)
  617. Res[I] = Builder.CreatePHI(VT->getElementType(), NumOps,
  618. PHI.getName() + ".i" + Twine(I));
  619. for (unsigned I = 0; I < NumOps; ++I) {
  620. Scatterer Op = scatter(&PHI, PHI.getIncomingValue(I));
  621. BasicBlock *IncomingBlock = PHI.getIncomingBlock(I);
  622. for (unsigned J = 0; J < NumElems; ++J)
  623. cast<PHINode>(Res[J])->addIncoming(Op[J], IncomingBlock);
  624. }
  625. gather(&PHI, Res);
  626. return true;
  627. }
  628. bool ScalarizerVisitor::visitLoadInst(LoadInst &LI) {
  629. if (!ScalarizeLoadStore)
  630. return false;
  631. if (!LI.isSimple())
  632. return false;
  633. VectorLayout Layout;
  634. if (!getVectorLayout(LI.getType(), LI.getAlignment(), Layout,
  635. LI.getModule()->getDataLayout()))
  636. return false;
  637. unsigned NumElems = Layout.VecTy->getNumElements();
  638. IRBuilder<> Builder(&LI);
  639. Scatterer Ptr = scatter(&LI, LI.getPointerOperand());
  640. ValueVector Res;
  641. Res.resize(NumElems);
  642. for (unsigned I = 0; I < NumElems; ++I)
  643. Res[I] = Builder.CreateAlignedLoad(Ptr[I], Layout.getElemAlign(I),
  644. LI.getName() + ".i" + Twine(I));
  645. gather(&LI, Res);
  646. return true;
  647. }
  648. bool ScalarizerVisitor::visitStoreInst(StoreInst &SI) {
  649. if (!ScalarizeLoadStore)
  650. return false;
  651. if (!SI.isSimple())
  652. return false;
  653. VectorLayout Layout;
  654. Value *FullValue = SI.getValueOperand();
  655. if (!getVectorLayout(FullValue->getType(), SI.getAlignment(), Layout,
  656. SI.getModule()->getDataLayout()))
  657. return false;
  658. unsigned NumElems = Layout.VecTy->getNumElements();
  659. IRBuilder<> Builder(&SI);
  660. Scatterer Ptr = scatter(&SI, SI.getPointerOperand());
  661. Scatterer Val = scatter(&SI, FullValue);
  662. ValueVector Stores;
  663. Stores.resize(NumElems);
  664. for (unsigned I = 0; I < NumElems; ++I) {
  665. unsigned Align = Layout.getElemAlign(I);
  666. Stores[I] = Builder.CreateAlignedStore(Val[I], Ptr[I], Align);
  667. }
  668. transferMetadata(&SI, Stores);
  669. return true;
  670. }
  671. bool ScalarizerVisitor::visitCallInst(CallInst &CI) {
  672. return splitCall(CI);
  673. }
  674. // Delete the instructions that we scalarized. If a full vector result
  675. // is still needed, recreate it using InsertElements.
  676. bool ScalarizerVisitor::finish() {
  677. // The presence of data in Gathered or Scattered indicates changes
  678. // made to the Function.
  679. if (Gathered.empty() && Scattered.empty())
  680. return false;
  681. for (const auto &GMI : Gathered) {
  682. Instruction *Op = GMI.first;
  683. ValueVector &CV = *GMI.second;
  684. if (!Op->use_empty()) {
  685. // The value is still needed, so recreate it using a series of
  686. // InsertElements.
  687. Type *Ty = Op->getType();
  688. Value *Res = UndefValue::get(Ty);
  689. BasicBlock *BB = Op->getParent();
  690. unsigned Count = Ty->getVectorNumElements();
  691. IRBuilder<> Builder(Op);
  692. if (isa<PHINode>(Op))
  693. Builder.SetInsertPoint(BB, BB->getFirstInsertionPt());
  694. for (unsigned I = 0; I < Count; ++I)
  695. Res = Builder.CreateInsertElement(Res, CV[I], Builder.getInt32(I),
  696. Op->getName() + ".upto" + Twine(I));
  697. Res->takeName(Op);
  698. Op->replaceAllUsesWith(Res);
  699. }
  700. Op->eraseFromParent();
  701. }
  702. Gathered.clear();
  703. Scattered.clear();
  704. return true;
  705. }
  706. PreservedAnalyses ScalarizerPass::run(Function &F, FunctionAnalysisManager &AM) {
  707. Module &M = *F.getParent();
  708. unsigned ParallelLoopAccessMDKind =
  709. M.getContext().getMDKindID("llvm.mem.parallel_loop_access");
  710. ScalarizerVisitor Impl(ParallelLoopAccessMDKind);
  711. bool Changed = Impl.visit(F);
  712. return Changed ? PreservedAnalyses::none() : PreservedAnalyses::all();
  713. }