InterleavedLoadCombinePass.cpp 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365
  1. //===- InterleavedLoadCombine.cpp - Combine Interleaved Loads ---*- C++ -*-===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // \file
  11. //
  12. // This file defines the interleaved-load-combine pass. The pass searches for
  13. // ShuffleVectorInstruction that execute interleaving loads. If a matching
  14. // pattern is found, it adds a combined load and further instructions in a
  15. // pattern that is detectable by InterleavedAccesPass. The old instructions are
  16. // left dead to be removed later. The pass is specifically designed to be
  17. // executed just before InterleavedAccesPass to find any left-over instances
  18. // that are not detected within former passes.
  19. //
  20. //===----------------------------------------------------------------------===//
  21. #include "llvm/ADT/Statistic.h"
  22. #include "llvm/Analysis/MemoryLocation.h"
  23. #include "llvm/Analysis/MemorySSA.h"
  24. #include "llvm/Analysis/MemorySSAUpdater.h"
  25. #include "llvm/Analysis/OptimizationRemarkEmitter.h"
  26. #include "llvm/Analysis/TargetTransformInfo.h"
  27. #include "llvm/CodeGen/Passes.h"
  28. #include "llvm/CodeGen/TargetLowering.h"
  29. #include "llvm/CodeGen/TargetPassConfig.h"
  30. #include "llvm/CodeGen/TargetSubtargetInfo.h"
  31. #include "llvm/IR/DataLayout.h"
  32. #include "llvm/IR/Dominators.h"
  33. #include "llvm/IR/Function.h"
  34. #include "llvm/IR/Instructions.h"
  35. #include "llvm/IR/LegacyPassManager.h"
  36. #include "llvm/IR/Module.h"
  37. #include "llvm/Pass.h"
  38. #include "llvm/Support/Debug.h"
  39. #include "llvm/Support/ErrorHandling.h"
  40. #include "llvm/Support/raw_ostream.h"
  41. #include "llvm/Target/TargetMachine.h"
  42. #include <algorithm>
  43. #include <cassert>
  44. #include <list>
  45. using namespace llvm;
  46. #define DEBUG_TYPE "interleaved-load-combine"
  47. namespace {
  48. /// Statistic counter
  49. STATISTIC(NumInterleavedLoadCombine, "Number of combined loads");
  50. /// Option to disable the pass
  51. static cl::opt<bool> DisableInterleavedLoadCombine(
  52. "disable-" DEBUG_TYPE, cl::init(false), cl::Hidden,
  53. cl::desc("Disable combining of interleaved loads"));
  54. struct VectorInfo;
  55. struct InterleavedLoadCombineImpl {
  56. public:
  57. InterleavedLoadCombineImpl(Function &F, DominatorTree &DT, MemorySSA &MSSA,
  58. TargetMachine &TM)
  59. : F(F), DT(DT), MSSA(MSSA),
  60. TLI(*TM.getSubtargetImpl(F)->getTargetLowering()),
  61. TTI(TM.getTargetTransformInfo(F)) {}
  62. /// Scan the function for interleaved load candidates and execute the
  63. /// replacement if applicable.
  64. bool run();
  65. private:
  66. /// Function this pass is working on
  67. Function &F;
  68. /// Dominator Tree Analysis
  69. DominatorTree &DT;
  70. /// Memory Alias Analyses
  71. MemorySSA &MSSA;
  72. /// Target Lowering Information
  73. const TargetLowering &TLI;
  74. /// Target Transform Information
  75. const TargetTransformInfo TTI;
  76. /// Find the instruction in sets LIs that dominates all others, return nullptr
  77. /// if there is none.
  78. LoadInst *findFirstLoad(const std::set<LoadInst *> &LIs);
  79. /// Replace interleaved load candidates. It does additional
  80. /// analyses if this makes sense. Returns true on success and false
  81. /// of nothing has been changed.
  82. bool combine(std::list<VectorInfo> &InterleavedLoad,
  83. OptimizationRemarkEmitter &ORE);
  84. /// Given a set of VectorInfo containing candidates for a given interleave
  85. /// factor, find a set that represents a 'factor' interleaved load.
  86. bool findPattern(std::list<VectorInfo> &Candidates,
  87. std::list<VectorInfo> &InterleavedLoad, unsigned Factor,
  88. const DataLayout &DL);
  89. }; // InterleavedLoadCombine
  90. /// First Order Polynomial on an n-Bit Integer Value
  91. ///
  92. /// Polynomial(Value) = Value * B + A + E*2^(n-e)
  93. ///
  94. /// A and B are the coefficients. E*2^(n-e) is an error within 'e' most
  95. /// significant bits. It is introduced if an exact computation cannot be proven
  96. /// (e.q. division by 2).
  97. ///
  98. /// As part of this optimization multiple loads will be combined. It necessary
  99. /// to prove that loads are within some relative offset to each other. This
  100. /// class is used to prove relative offsets of values loaded from memory.
  101. ///
  102. /// Representing an integer in this form is sound since addition in two's
  103. /// complement is associative (trivial) and multiplication distributes over the
  104. /// addition (see Proof(1) in Polynomial::mul). Further, both operations
  105. /// commute.
  106. //
  107. // Example:
  108. // declare @fn(i64 %IDX, <4 x float>* %PTR) {
  109. // %Pa1 = add i64 %IDX, 2
  110. // %Pa2 = lshr i64 %Pa1, 1
  111. // %Pa3 = getelementptr inbounds <4 x float>, <4 x float>* %PTR, i64 %Pa2
  112. // %Va = load <4 x float>, <4 x float>* %Pa3
  113. //
  114. // %Pb1 = add i64 %IDX, 4
  115. // %Pb2 = lshr i64 %Pb1, 1
  116. // %Pb3 = getelementptr inbounds <4 x float>, <4 x float>* %PTR, i64 %Pb2
  117. // %Vb = load <4 x float>, <4 x float>* %Pb3
  118. // ... }
  119. //
  120. // The goal is to prove that two loads load consecutive addresses.
  121. //
  122. // In this case the polynomials are constructed by the following
  123. // steps.
  124. //
  125. // The number tag #e specifies the error bits.
  126. //
  127. // Pa_0 = %IDX #0
  128. // Pa_1 = %IDX + 2 #0 | add 2
  129. // Pa_2 = %IDX/2 + 1 #1 | lshr 1
  130. // Pa_3 = %IDX/2 + 1 #1 | GEP, step signext to i64
  131. // Pa_4 = (%IDX/2)*16 + 16 #0 | GEP, multiply index by sizeof(4) for floats
  132. // Pa_5 = (%IDX/2)*16 + 16 #0 | GEP, add offset of leading components
  133. //
  134. // Pb_0 = %IDX #0
  135. // Pb_1 = %IDX + 4 #0 | add 2
  136. // Pb_2 = %IDX/2 + 2 #1 | lshr 1
  137. // Pb_3 = %IDX/2 + 2 #1 | GEP, step signext to i64
  138. // Pb_4 = (%IDX/2)*16 + 32 #0 | GEP, multiply index by sizeof(4) for floats
  139. // Pb_5 = (%IDX/2)*16 + 16 #0 | GEP, add offset of leading components
  140. //
  141. // Pb_5 - Pa_5 = 16 #0 | subtract to get the offset
  142. //
  143. // Remark: %PTR is not maintained within this class. So in this instance the
  144. // offset of 16 can only be assumed if the pointers are equal.
  145. //
  146. class Polynomial {
  147. /// Operations on B
  148. enum BOps {
  149. LShr,
  150. Mul,
  151. SExt,
  152. Trunc,
  153. };
  154. /// Number of Error Bits e
  155. unsigned ErrorMSBs;
  156. /// Value
  157. Value *V;
  158. /// Coefficient B
  159. SmallVector<std::pair<BOps, APInt>, 4> B;
  160. /// Coefficient A
  161. APInt A;
  162. public:
  163. Polynomial(Value *V) : ErrorMSBs((unsigned)-1), V(V), B(), A() {
  164. IntegerType *Ty = dyn_cast<IntegerType>(V->getType());
  165. if (Ty) {
  166. ErrorMSBs = 0;
  167. this->V = V;
  168. A = APInt(Ty->getBitWidth(), 0);
  169. }
  170. }
  171. Polynomial(const APInt &A, unsigned ErrorMSBs = 0)
  172. : ErrorMSBs(ErrorMSBs), V(NULL), B(), A(A) {}
  173. Polynomial(unsigned BitWidth, uint64_t A, unsigned ErrorMSBs = 0)
  174. : ErrorMSBs(ErrorMSBs), V(NULL), B(), A(BitWidth, A) {}
  175. Polynomial() : ErrorMSBs((unsigned)-1), V(NULL), B(), A() {}
  176. /// Increment and clamp the number of undefined bits.
  177. void incErrorMSBs(unsigned amt) {
  178. if (ErrorMSBs == (unsigned)-1)
  179. return;
  180. ErrorMSBs += amt;
  181. if (ErrorMSBs > A.getBitWidth())
  182. ErrorMSBs = A.getBitWidth();
  183. }
  184. /// Decrement and clamp the number of undefined bits.
  185. void decErrorMSBs(unsigned amt) {
  186. if (ErrorMSBs == (unsigned)-1)
  187. return;
  188. if (ErrorMSBs > amt)
  189. ErrorMSBs -= amt;
  190. else
  191. ErrorMSBs = 0;
  192. }
  193. /// Apply an add on the polynomial
  194. Polynomial &add(const APInt &C) {
  195. // Note: Addition is associative in two's complement even when in case of
  196. // signed overflow.
  197. //
  198. // Error bits can only propagate into higher significant bits. As these are
  199. // already regarded as undefined, there is no change.
  200. //
  201. // Theorem: Adding a constant to a polynomial does not change the error
  202. // term.
  203. //
  204. // Proof:
  205. //
  206. // Since the addition is associative and commutes:
  207. //
  208. // (B + A + E*2^(n-e)) + C = B + (A + C) + E*2^(n-e)
  209. // [qed]
  210. if (C.getBitWidth() != A.getBitWidth()) {
  211. ErrorMSBs = (unsigned)-1;
  212. return *this;
  213. }
  214. A += C;
  215. return *this;
  216. }
  217. /// Apply a multiplication onto the polynomial.
  218. Polynomial &mul(const APInt &C) {
  219. // Note: Multiplication distributes over the addition
  220. //
  221. // Theorem: Multiplication distributes over the addition
  222. //
  223. // Proof(1):
  224. //
  225. // (B+A)*C =-
  226. // = (B + A) + (B + A) + .. {C Times}
  227. // addition is associative and commutes, hence
  228. // = B + B + .. {C Times} .. + A + A + .. {C times}
  229. // = B*C + A*C
  230. // (see (function add) for signed values and overflows)
  231. // [qed]
  232. //
  233. // Theorem: If C has c trailing zeros, errors bits in A or B are shifted out
  234. // to the left.
  235. //
  236. // Proof(2):
  237. //
  238. // Let B' and A' be the n-Bit inputs with some unknown errors EA,
  239. // EB at e leading bits. B' and A' can be written down as:
  240. //
  241. // B' = B + 2^(n-e)*EB
  242. // A' = A + 2^(n-e)*EA
  243. //
  244. // Let C' be an input with c trailing zero bits. C' can be written as
  245. //
  246. // C' = C*2^c
  247. //
  248. // Therefore we can compute the result by using distributivity and
  249. // commutativity.
  250. //
  251. // (B'*C' + A'*C') = [B + 2^(n-e)*EB] * C' + [A + 2^(n-e)*EA] * C' =
  252. // = [B + 2^(n-e)*EB + A + 2^(n-e)*EA] * C' =
  253. // = (B'+A') * C' =
  254. // = [B + 2^(n-e)*EB + A + 2^(n-e)*EA] * C' =
  255. // = [B + A + 2^(n-e)*EB + 2^(n-e)*EA] * C' =
  256. // = (B + A) * C' + [2^(n-e)*EB + 2^(n-e)*EA)] * C' =
  257. // = (B + A) * C' + [2^(n-e)*EB + 2^(n-e)*EA)] * C*2^c =
  258. // = (B + A) * C' + C*(EB + EA)*2^(n-e)*2^c =
  259. //
  260. // Let EC be the final error with EC = C*(EB + EA)
  261. //
  262. // = (B + A)*C' + EC*2^(n-e)*2^c =
  263. // = (B + A)*C' + EC*2^(n-(e-c))
  264. //
  265. // Since EC is multiplied by 2^(n-(e-c)) the resulting error contains c
  266. // less error bits than the input. c bits are shifted out to the left.
  267. // [qed]
  268. if (C.getBitWidth() != A.getBitWidth()) {
  269. ErrorMSBs = (unsigned)-1;
  270. return *this;
  271. }
  272. // Multiplying by one is a no-op.
  273. if (C.isOneValue()) {
  274. return *this;
  275. }
  276. // Multiplying by zero removes the coefficient B and defines all bits.
  277. if (C.isNullValue()) {
  278. ErrorMSBs = 0;
  279. deleteB();
  280. }
  281. // See Proof(2): Trailing zero bits indicate a left shift. This removes
  282. // leading bits from the result even if they are undefined.
  283. decErrorMSBs(C.countTrailingZeros());
  284. A *= C;
  285. pushBOperation(Mul, C);
  286. return *this;
  287. }
  288. /// Apply a logical shift right on the polynomial
  289. Polynomial &lshr(const APInt &C) {
  290. // Theorem(1): (B + A + E*2^(n-e)) >> 1 => (B >> 1) + (A >> 1) + E'*2^(n-e')
  291. // where
  292. // e' = e + 1,
  293. // E is a e-bit number,
  294. // E' is a e'-bit number,
  295. // holds under the following precondition:
  296. // pre(1): A % 2 = 0
  297. // pre(2): e < n, (see Theorem(2) for the trivial case with e=n)
  298. // where >> expresses a logical shift to the right, with adding zeros.
  299. //
  300. // We need to show that for every, E there is a E'
  301. //
  302. // B = b_h * 2^(n-1) + b_m * 2 + b_l
  303. // A = a_h * 2^(n-1) + a_m * 2 (pre(1))
  304. //
  305. // where a_h, b_h, b_l are single bits, and a_m, b_m are (n-2) bit numbers
  306. //
  307. // Let X = (B + A + E*2^(n-e)) >> 1
  308. // Let Y = (B >> 1) + (A >> 1) + E*2^(n-e) >> 1
  309. //
  310. // X = [B + A + E*2^(n-e)] >> 1 =
  311. // = [ b_h * 2^(n-1) + b_m * 2 + b_l +
  312. // + a_h * 2^(n-1) + a_m * 2 +
  313. // + E * 2^(n-e) ] >> 1 =
  314. //
  315. // The sum is built by putting the overflow of [a_m + b+n] into the term
  316. // 2^(n-1). As there are no more bits beyond 2^(n-1) the overflow within
  317. // this bit is discarded. This is expressed by % 2.
  318. //
  319. // The bit in position 0 cannot overflow into the term (b_m + a_m).
  320. //
  321. // = [ ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-1) +
  322. // + ((b_m + a_m) % 2^(n-2)) * 2 +
  323. // + b_l + E * 2^(n-e) ] >> 1 =
  324. //
  325. // The shift is computed by dividing the terms by 2 and by cutting off
  326. // b_l.
  327. //
  328. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  329. // + ((b_m + a_m) % 2^(n-2)) +
  330. // + E * 2^(n-(e+1)) =
  331. //
  332. // by the definition in the Theorem e+1 = e'
  333. //
  334. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  335. // + ((b_m + a_m) % 2^(n-2)) +
  336. // + E * 2^(n-e') =
  337. //
  338. // Compute Y by applying distributivity first
  339. //
  340. // Y = (B >> 1) + (A >> 1) + E*2^(n-e') =
  341. // = (b_h * 2^(n-1) + b_m * 2 + b_l) >> 1 +
  342. // + (a_h * 2^(n-1) + a_m * 2) >> 1 +
  343. // + E * 2^(n-e) >> 1 =
  344. //
  345. // Again, the shift is computed by dividing the terms by 2 and by cutting
  346. // off b_l.
  347. //
  348. // = b_h * 2^(n-2) + b_m +
  349. // + a_h * 2^(n-2) + a_m +
  350. // + E * 2^(n-(e+1)) =
  351. //
  352. // Again, the sum is built by putting the overflow of [a_m + b+n] into
  353. // the term 2^(n-1). But this time there is room for a second bit in the
  354. // term 2^(n-2) we add this bit to a new term and denote it o_h in a
  355. // second step.
  356. //
  357. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] >> 1) * 2^(n-1) +
  358. // + ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  359. // + ((b_m + a_m) % 2^(n-2)) +
  360. // + E * 2^(n-(e+1)) =
  361. //
  362. // Let o_h = [b_h + a_h + (b_m + a_m) >> (n-2)] >> 1
  363. // Further replace e+1 by e'.
  364. //
  365. // = o_h * 2^(n-1) +
  366. // + ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  367. // + ((b_m + a_m) % 2^(n-2)) +
  368. // + E * 2^(n-e') =
  369. //
  370. // Move o_h into the error term and construct E'. To ensure that there is
  371. // no 2^x with negative x, this step requires pre(2) (e < n).
  372. //
  373. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  374. // + ((b_m + a_m) % 2^(n-2)) +
  375. // + o_h * 2^(e'-1) * 2^(n-e') + | pre(2), move 2^(e'-1)
  376. // | out of the old exponent
  377. // + E * 2^(n-e') =
  378. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  379. // + ((b_m + a_m) % 2^(n-2)) +
  380. // + [o_h * 2^(e'-1) + E] * 2^(n-e') + | move 2^(e'-1) out of
  381. // | the old exponent
  382. //
  383. // Let E' = o_h * 2^(e'-1) + E
  384. //
  385. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  386. // + ((b_m + a_m) % 2^(n-2)) +
  387. // + E' * 2^(n-e')
  388. //
  389. // Because X and Y are distinct only in there error terms and E' can be
  390. // constructed as shown the theorem holds.
  391. // [qed]
  392. //
  393. // For completeness in case of the case e=n it is also required to show that
  394. // distributivity can be applied.
  395. //
  396. // In this case Theorem(1) transforms to (the pre-condition on A can also be
  397. // dropped)
  398. //
  399. // Theorem(2): (B + A + E) >> 1 => (B >> 1) + (A >> 1) + E'
  400. // where
  401. // A, B, E, E' are two's complement numbers with the same bit
  402. // width
  403. //
  404. // Let A + B + E = X
  405. // Let (B >> 1) + (A >> 1) = Y
  406. //
  407. // Therefore we need to show that for every X and Y there is an E' which
  408. // makes the equation
  409. //
  410. // X = Y + E'
  411. //
  412. // hold. This is trivially the case for E' = X - Y.
  413. //
  414. // [qed]
  415. //
  416. // Remark: Distributing lshr with and arbitrary number n can be expressed as
  417. // ((((B + A) lshr 1) lshr 1) ... ) {n times}.
  418. // This construction induces n additional error bits at the left.
  419. if (C.getBitWidth() != A.getBitWidth()) {
  420. ErrorMSBs = (unsigned)-1;
  421. return *this;
  422. }
  423. if (C.isNullValue())
  424. return *this;
  425. // Test if the result will be zero
  426. unsigned shiftAmt = C.getZExtValue();
  427. if (shiftAmt >= C.getBitWidth())
  428. return mul(APInt(C.getBitWidth(), 0));
  429. // The proof that shiftAmt LSBs are zero for at least one summand is only
  430. // possible for the constant number.
  431. //
  432. // If this can be proven add shiftAmt to the error counter
  433. // `ErrorMSBs`. Otherwise set all bits as undefined.
  434. if (A.countTrailingZeros() < shiftAmt)
  435. ErrorMSBs = A.getBitWidth();
  436. else
  437. incErrorMSBs(shiftAmt);
  438. // Apply the operation.
  439. pushBOperation(LShr, C);
  440. A = A.lshr(shiftAmt);
  441. return *this;
  442. }
  443. /// Apply a sign-extend or truncate operation on the polynomial.
  444. Polynomial &sextOrTrunc(unsigned n) {
  445. if (n < A.getBitWidth()) {
  446. // Truncate: Clearly undefined Bits on the MSB side are removed
  447. // if there are any.
  448. decErrorMSBs(A.getBitWidth() - n);
  449. A = A.trunc(n);
  450. pushBOperation(Trunc, APInt(sizeof(n) * 8, n));
  451. }
  452. if (n > A.getBitWidth()) {
  453. // Extend: Clearly extending first and adding later is different
  454. // to adding first and extending later in all extended bits.
  455. incErrorMSBs(n - A.getBitWidth());
  456. A = A.sext(n);
  457. pushBOperation(SExt, APInt(sizeof(n) * 8, n));
  458. }
  459. return *this;
  460. }
  461. /// Test if there is a coefficient B.
  462. bool isFirstOrder() const { return V != nullptr; }
  463. /// Test coefficient B of two Polynomials are equal.
  464. bool isCompatibleTo(const Polynomial &o) const {
  465. // The polynomial use different bit width.
  466. if (A.getBitWidth() != o.A.getBitWidth())
  467. return false;
  468. // If neither Polynomial has the Coefficient B.
  469. if (!isFirstOrder() && !o.isFirstOrder())
  470. return true;
  471. // The index variable is different.
  472. if (V != o.V)
  473. return false;
  474. // Check the operations.
  475. if (B.size() != o.B.size())
  476. return false;
  477. auto ob = o.B.begin();
  478. for (auto &b : B) {
  479. if (b != *ob)
  480. return false;
  481. ob++;
  482. }
  483. return true;
  484. }
  485. /// Subtract two polynomials, return an undefined polynomial if
  486. /// subtraction is not possible.
  487. Polynomial operator-(const Polynomial &o) const {
  488. // Return an undefined polynomial if incompatible.
  489. if (!isCompatibleTo(o))
  490. return Polynomial();
  491. // If the polynomials are compatible (meaning they have the same
  492. // coefficient on B), B is eliminated. Thus a polynomial solely
  493. // containing A is returned
  494. return Polynomial(A - o.A, std::max(ErrorMSBs, o.ErrorMSBs));
  495. }
  496. /// Subtract a constant from a polynomial,
  497. Polynomial operator-(uint64_t C) const {
  498. Polynomial Result(*this);
  499. Result.A -= C;
  500. return Result;
  501. }
  502. /// Add a constant to a polynomial,
  503. Polynomial operator+(uint64_t C) const {
  504. Polynomial Result(*this);
  505. Result.A += C;
  506. return Result;
  507. }
  508. /// Returns true if it can be proven that two Polynomials are equal.
  509. bool isProvenEqualTo(const Polynomial &o) {
  510. // Subtract both polynomials and test if it is fully defined and zero.
  511. Polynomial r = *this - o;
  512. return (r.ErrorMSBs == 0) && (!r.isFirstOrder()) && (r.A.isNullValue());
  513. }
  514. /// Print the polynomial into a stream.
  515. void print(raw_ostream &OS) const {
  516. OS << "[{#ErrBits:" << ErrorMSBs << "} ";
  517. if (V) {
  518. for (auto b : B)
  519. OS << "(";
  520. OS << "(" << *V << ") ";
  521. for (auto b : B) {
  522. switch (b.first) {
  523. case LShr:
  524. OS << "LShr ";
  525. break;
  526. case Mul:
  527. OS << "Mul ";
  528. break;
  529. case SExt:
  530. OS << "SExt ";
  531. break;
  532. case Trunc:
  533. OS << "Trunc ";
  534. break;
  535. }
  536. OS << b.second << ") ";
  537. }
  538. }
  539. OS << "+ " << A << "]";
  540. }
  541. private:
  542. void deleteB() {
  543. V = nullptr;
  544. B.clear();
  545. }
  546. void pushBOperation(const BOps Op, const APInt &C) {
  547. if (isFirstOrder()) {
  548. B.push_back(std::make_pair(Op, C));
  549. return;
  550. }
  551. }
  552. };
  553. static raw_ostream &operator<<(raw_ostream &OS, const Polynomial &P) {
  554. P.print(OS);
  555. return OS;
  556. }
  557. /// VectorInfo stores abstract the following information for each vector
  558. /// element:
  559. ///
  560. /// 1) The the memory address loaded into the element as Polynomial
  561. /// 2) a set of load instruction necessary to construct the vector,
  562. /// 3) a set of all other instructions that are necessary to create the vector and
  563. /// 4) a pointer value that can be used as relative base for all elements.
  564. struct VectorInfo {
  565. private:
  566. VectorInfo(const VectorInfo &c) : VTy(c.VTy) {
  567. llvm_unreachable(
  568. "Copying VectorInfo is neither implemented nor necessary,");
  569. }
  570. public:
  571. /// Information of a Vector Element
  572. struct ElementInfo {
  573. /// Offset Polynomial.
  574. Polynomial Ofs;
  575. /// The Load Instruction used to Load the entry. LI is null if the pointer
  576. /// of the load instruction does not point on to the entry
  577. LoadInst *LI;
  578. ElementInfo(Polynomial Offset = Polynomial(), LoadInst *LI = nullptr)
  579. : Ofs(Offset), LI(LI) {}
  580. };
  581. /// Basic-block the load instructions are within
  582. BasicBlock *BB;
  583. /// Pointer value of all participation load instructions
  584. Value *PV;
  585. /// Participating load instructions
  586. std::set<LoadInst *> LIs;
  587. /// Participating instructions
  588. std::set<Instruction *> Is;
  589. /// Final shuffle-vector instruction
  590. ShuffleVectorInst *SVI;
  591. /// Information of the offset for each vector element
  592. ElementInfo *EI;
  593. /// Vector Type
  594. VectorType *const VTy;
  595. VectorInfo(VectorType *VTy)
  596. : BB(nullptr), PV(nullptr), LIs(), Is(), SVI(nullptr), VTy(VTy) {
  597. EI = new ElementInfo[VTy->getNumElements()];
  598. }
  599. virtual ~VectorInfo() { delete[] EI; }
  600. unsigned getDimension() const { return VTy->getNumElements(); }
  601. /// Test if the VectorInfo can be part of an interleaved load with the
  602. /// specified factor.
  603. ///
  604. /// \param Factor of the interleave
  605. /// \param DL Targets Datalayout
  606. ///
  607. /// \returns true if this is possible and false if not
  608. bool isInterleaved(unsigned Factor, const DataLayout &DL) const {
  609. unsigned Size = DL.getTypeAllocSize(VTy->getElementType());
  610. for (unsigned i = 1; i < getDimension(); i++) {
  611. if (!EI[i].Ofs.isProvenEqualTo(EI[0].Ofs + i * Factor * Size)) {
  612. return false;
  613. }
  614. }
  615. return true;
  616. }
  617. /// Recursively computes the vector information stored in V.
  618. ///
  619. /// This function delegates the work to specialized implementations
  620. ///
  621. /// \param V Value to operate on
  622. /// \param Result Result of the computation
  623. ///
  624. /// \returns false if no sensible information can be gathered.
  625. static bool compute(Value *V, VectorInfo &Result, const DataLayout &DL) {
  626. ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V);
  627. if (SVI)
  628. return computeFromSVI(SVI, Result, DL);
  629. LoadInst *LI = dyn_cast<LoadInst>(V);
  630. if (LI)
  631. return computeFromLI(LI, Result, DL);
  632. BitCastInst *BCI = dyn_cast<BitCastInst>(V);
  633. if (BCI)
  634. return computeFromBCI(BCI, Result, DL);
  635. return false;
  636. }
  637. /// BitCastInst specialization to compute the vector information.
  638. ///
  639. /// \param BCI BitCastInst to operate on
  640. /// \param Result Result of the computation
  641. ///
  642. /// \returns false if no sensible information can be gathered.
  643. static bool computeFromBCI(BitCastInst *BCI, VectorInfo &Result,
  644. const DataLayout &DL) {
  645. Instruction *Op = dyn_cast<Instruction>(BCI->getOperand(0));
  646. if (!Op)
  647. return false;
  648. VectorType *VTy = dyn_cast<VectorType>(Op->getType());
  649. if (!VTy)
  650. return false;
  651. // We can only cast from large to smaller vectors
  652. if (Result.VTy->getNumElements() % VTy->getNumElements())
  653. return false;
  654. unsigned Factor = Result.VTy->getNumElements() / VTy->getNumElements();
  655. unsigned NewSize = DL.getTypeAllocSize(Result.VTy->getElementType());
  656. unsigned OldSize = DL.getTypeAllocSize(VTy->getElementType());
  657. if (NewSize * Factor != OldSize)
  658. return false;
  659. VectorInfo Old(VTy);
  660. if (!compute(Op, Old, DL))
  661. return false;
  662. for (unsigned i = 0; i < Result.VTy->getNumElements(); i += Factor) {
  663. for (unsigned j = 0; j < Factor; j++) {
  664. Result.EI[i + j] =
  665. ElementInfo(Old.EI[i / Factor].Ofs + j * NewSize,
  666. j == 0 ? Old.EI[i / Factor].LI : nullptr);
  667. }
  668. }
  669. Result.BB = Old.BB;
  670. Result.PV = Old.PV;
  671. Result.LIs.insert(Old.LIs.begin(), Old.LIs.end());
  672. Result.Is.insert(Old.Is.begin(), Old.Is.end());
  673. Result.Is.insert(BCI);
  674. Result.SVI = nullptr;
  675. return true;
  676. }
  677. /// ShuffleVectorInst specialization to compute vector information.
  678. ///
  679. /// \param SVI ShuffleVectorInst to operate on
  680. /// \param Result Result of the computation
  681. ///
  682. /// Compute the left and the right side vector information and merge them by
  683. /// applying the shuffle operation. This function also ensures that the left
  684. /// and right side have compatible loads. This means that all loads are with
  685. /// in the same basic block and are based on the same pointer.
  686. ///
  687. /// \returns false if no sensible information can be gathered.
  688. static bool computeFromSVI(ShuffleVectorInst *SVI, VectorInfo &Result,
  689. const DataLayout &DL) {
  690. VectorType *ArgTy = dyn_cast<VectorType>(SVI->getOperand(0)->getType());
  691. assert(ArgTy && "ShuffleVector Operand is not a VectorType");
  692. // Compute the left hand vector information.
  693. VectorInfo LHS(ArgTy);
  694. if (!compute(SVI->getOperand(0), LHS, DL))
  695. LHS.BB = nullptr;
  696. // Compute the right hand vector information.
  697. VectorInfo RHS(ArgTy);
  698. if (!compute(SVI->getOperand(1), RHS, DL))
  699. RHS.BB = nullptr;
  700. // Neither operand produced sensible results?
  701. if (!LHS.BB && !RHS.BB)
  702. return false;
  703. // Only RHS produced sensible results?
  704. else if (!LHS.BB) {
  705. Result.BB = RHS.BB;
  706. Result.PV = RHS.PV;
  707. }
  708. // Only LHS produced sensible results?
  709. else if (!RHS.BB) {
  710. Result.BB = LHS.BB;
  711. Result.PV = LHS.PV;
  712. }
  713. // Both operands produced sensible results?
  714. else if ((LHS.BB == RHS.BB) && (LHS.PV == LHS.PV)) {
  715. Result.BB = LHS.BB;
  716. Result.PV = LHS.PV;
  717. }
  718. // Both operands produced sensible results but they are incompatible.
  719. else {
  720. return false;
  721. }
  722. // Merge and apply the operation on the offset information.
  723. if (LHS.BB) {
  724. Result.LIs.insert(LHS.LIs.begin(), LHS.LIs.end());
  725. Result.Is.insert(LHS.Is.begin(), LHS.Is.end());
  726. }
  727. if (RHS.BB) {
  728. Result.LIs.insert(RHS.LIs.begin(), RHS.LIs.end());
  729. Result.Is.insert(RHS.Is.begin(), RHS.Is.end());
  730. }
  731. Result.Is.insert(SVI);
  732. Result.SVI = SVI;
  733. int j = 0;
  734. for (int i : SVI->getShuffleMask()) {
  735. assert((i < 2 * (signed)ArgTy->getNumElements()) &&
  736. "Invalid ShuffleVectorInst (index out of bounds)");
  737. if (i < 0)
  738. Result.EI[j] = ElementInfo();
  739. else if (i < (signed)ArgTy->getNumElements()) {
  740. if (LHS.BB)
  741. Result.EI[j] = LHS.EI[i];
  742. else
  743. Result.EI[j] = ElementInfo();
  744. } else {
  745. if (RHS.BB)
  746. Result.EI[j] = RHS.EI[i - ArgTy->getNumElements()];
  747. else
  748. Result.EI[j] = ElementInfo();
  749. }
  750. j++;
  751. }
  752. return true;
  753. }
  754. /// LoadInst specialization to compute vector information.
  755. ///
  756. /// This function also acts as abort condition to the recursion.
  757. ///
  758. /// \param LI LoadInst to operate on
  759. /// \param Result Result of the computation
  760. ///
  761. /// \returns false if no sensible information can be gathered.
  762. static bool computeFromLI(LoadInst *LI, VectorInfo &Result,
  763. const DataLayout &DL) {
  764. Value *BasePtr;
  765. Polynomial Offset;
  766. if (LI->isVolatile())
  767. return false;
  768. if (LI->isAtomic())
  769. return false;
  770. // Get the base polynomial
  771. computePolynomialFromPointer(*LI->getPointerOperand(), Offset, BasePtr, DL);
  772. Result.BB = LI->getParent();
  773. Result.PV = BasePtr;
  774. Result.LIs.insert(LI);
  775. Result.Is.insert(LI);
  776. for (unsigned i = 0; i < Result.getDimension(); i++) {
  777. Value *Idx[2] = {
  778. ConstantInt::get(Type::getInt32Ty(LI->getContext()), 0),
  779. ConstantInt::get(Type::getInt32Ty(LI->getContext()), i),
  780. };
  781. int64_t Ofs = DL.getIndexedOffsetInType(Result.VTy, makeArrayRef(Idx, 2));
  782. Result.EI[i] = ElementInfo(Offset + Ofs, i == 0 ? LI : nullptr);
  783. }
  784. return true;
  785. }
  786. /// Recursively compute polynomial of a value.
  787. ///
  788. /// \param BO Input binary operation
  789. /// \param Result Result polynomial
  790. static void computePolynomialBinOp(BinaryOperator &BO, Polynomial &Result) {
  791. Value *LHS = BO.getOperand(0);
  792. Value *RHS = BO.getOperand(1);
  793. // Find the RHS Constant if any
  794. ConstantInt *C = dyn_cast<ConstantInt>(RHS);
  795. if ((!C) && BO.isCommutative()) {
  796. C = dyn_cast<ConstantInt>(LHS);
  797. if (C)
  798. std::swap(LHS, RHS);
  799. }
  800. switch (BO.getOpcode()) {
  801. case Instruction::Add:
  802. if (!C)
  803. break;
  804. computePolynomial(*LHS, Result);
  805. Result.add(C->getValue());
  806. return;
  807. case Instruction::LShr:
  808. if (!C)
  809. break;
  810. computePolynomial(*LHS, Result);
  811. Result.lshr(C->getValue());
  812. return;
  813. default:
  814. break;
  815. }
  816. Result = Polynomial(&BO);
  817. }
  818. /// Recursively compute polynomial of a value
  819. ///
  820. /// \param V input value
  821. /// \param Result result polynomial
  822. static void computePolynomial(Value &V, Polynomial &Result) {
  823. if (isa<BinaryOperator>(&V))
  824. computePolynomialBinOp(*dyn_cast<BinaryOperator>(&V), Result);
  825. else
  826. Result = Polynomial(&V);
  827. }
  828. /// Compute the Polynomial representation of a Pointer type.
  829. ///
  830. /// \param Ptr input pointer value
  831. /// \param Result result polynomial
  832. /// \param BasePtr pointer the polynomial is based on
  833. /// \param DL Datalayout of the target machine
  834. static void computePolynomialFromPointer(Value &Ptr, Polynomial &Result,
  835. Value *&BasePtr,
  836. const DataLayout &DL) {
  837. // Not a pointer type? Return an undefined polynomial
  838. PointerType *PtrTy = dyn_cast<PointerType>(Ptr.getType());
  839. if (!PtrTy) {
  840. Result = Polynomial();
  841. BasePtr = nullptr;
  842. }
  843. unsigned PointerBits =
  844. DL.getIndexSizeInBits(PtrTy->getPointerAddressSpace());
  845. /// Skip pointer casts. Return Zero polynomial otherwise
  846. if (isa<CastInst>(&Ptr)) {
  847. CastInst &CI = *cast<CastInst>(&Ptr);
  848. switch (CI.getOpcode()) {
  849. case Instruction::BitCast:
  850. computePolynomialFromPointer(*CI.getOperand(0), Result, BasePtr, DL);
  851. break;
  852. default:
  853. BasePtr = &Ptr;
  854. Polynomial(PointerBits, 0);
  855. break;
  856. }
  857. }
  858. /// Resolve GetElementPtrInst.
  859. else if (isa<GetElementPtrInst>(&Ptr)) {
  860. GetElementPtrInst &GEP = *cast<GetElementPtrInst>(&Ptr);
  861. APInt BaseOffset(PointerBits, 0);
  862. // Check if we can compute the Offset with accumulateConstantOffset
  863. if (GEP.accumulateConstantOffset(DL, BaseOffset)) {
  864. Result = Polynomial(BaseOffset);
  865. BasePtr = GEP.getPointerOperand();
  866. return;
  867. } else {
  868. // Otherwise we allow that the last index operand of the GEP is
  869. // non-constant.
  870. unsigned idxOperand, e;
  871. SmallVector<Value *, 4> Indices;
  872. for (idxOperand = 1, e = GEP.getNumOperands(); idxOperand < e;
  873. idxOperand++) {
  874. ConstantInt *IDX = dyn_cast<ConstantInt>(GEP.getOperand(idxOperand));
  875. if (!IDX)
  876. break;
  877. Indices.push_back(IDX);
  878. }
  879. // It must also be the last operand.
  880. if (idxOperand + 1 != e) {
  881. Result = Polynomial();
  882. BasePtr = nullptr;
  883. return;
  884. }
  885. // Compute the polynomial of the index operand.
  886. computePolynomial(*GEP.getOperand(idxOperand), Result);
  887. // Compute base offset from zero based index, excluding the last
  888. // variable operand.
  889. BaseOffset =
  890. DL.getIndexedOffsetInType(GEP.getSourceElementType(), Indices);
  891. // Apply the operations of GEP to the polynomial.
  892. unsigned ResultSize = DL.getTypeAllocSize(GEP.getResultElementType());
  893. Result.sextOrTrunc(PointerBits);
  894. Result.mul(APInt(PointerBits, ResultSize));
  895. Result.add(BaseOffset);
  896. BasePtr = GEP.getPointerOperand();
  897. }
  898. }
  899. // All other instructions are handled by using the value as base pointer and
  900. // a zero polynomial.
  901. else {
  902. BasePtr = &Ptr;
  903. Polynomial(DL.getIndexSizeInBits(PtrTy->getPointerAddressSpace()), 0);
  904. }
  905. }
  906. #ifndef NDEBUG
  907. void print(raw_ostream &OS) const {
  908. if (PV)
  909. OS << *PV;
  910. else
  911. OS << "(none)";
  912. OS << " + ";
  913. for (unsigned i = 0; i < getDimension(); i++)
  914. OS << ((i == 0) ? "[" : ", ") << EI[i].Ofs;
  915. OS << "]";
  916. }
  917. #endif
  918. };
  919. #ifndef NDEBUG
  920. static raw_ostream &operator<<(raw_ostream &OS, const VectorInfo &S) {
  921. S.print(OS);
  922. return OS;
  923. }
  924. #endif
  925. } // anonymous namespace
  926. bool InterleavedLoadCombineImpl::findPattern(
  927. std::list<VectorInfo> &Candidates, std::list<VectorInfo> &InterleavedLoad,
  928. unsigned Factor, const DataLayout &DL) {
  929. for (auto C0 = Candidates.begin(), E0 = Candidates.end(); C0 != E0; ++C0) {
  930. unsigned i;
  931. // Try to find an interleaved load using the front of Worklist as first line
  932. unsigned Size = DL.getTypeAllocSize(C0->VTy->getElementType());
  933. // List containing iterators pointing to the VectorInfos of the candidates
  934. std::vector<std::list<VectorInfo>::iterator> Res(Factor, Candidates.end());
  935. for (auto C = Candidates.begin(), E = Candidates.end(); C != E; C++) {
  936. if (C->VTy != C0->VTy)
  937. continue;
  938. if (C->BB != C0->BB)
  939. continue;
  940. if (C->PV != C0->PV)
  941. continue;
  942. // Check the current value matches any of factor - 1 remaining lines
  943. for (i = 1; i < Factor; i++) {
  944. if (C->EI[0].Ofs.isProvenEqualTo(C0->EI[0].Ofs + i * Size)) {
  945. Res[i] = C;
  946. }
  947. }
  948. for (i = 1; i < Factor; i++) {
  949. if (Res[i] == Candidates.end())
  950. break;
  951. }
  952. if (i == Factor) {
  953. Res[0] = C0;
  954. break;
  955. }
  956. }
  957. if (Res[0] != Candidates.end()) {
  958. // Move the result into the output
  959. for (unsigned i = 0; i < Factor; i++) {
  960. InterleavedLoad.splice(InterleavedLoad.end(), Candidates, Res[i]);
  961. }
  962. return true;
  963. }
  964. }
  965. return false;
  966. }
  967. LoadInst *
  968. InterleavedLoadCombineImpl::findFirstLoad(const std::set<LoadInst *> &LIs) {
  969. assert(!LIs.empty() && "No load instructions given.");
  970. // All LIs are within the same BB. Select the first for a reference.
  971. BasicBlock *BB = (*LIs.begin())->getParent();
  972. BasicBlock::iterator FLI =
  973. std::find_if(BB->begin(), BB->end(), [&LIs](Instruction &I) -> bool {
  974. return is_contained(LIs, &I);
  975. });
  976. assert(FLI != BB->end());
  977. return cast<LoadInst>(FLI);
  978. }
  979. bool InterleavedLoadCombineImpl::combine(std::list<VectorInfo> &InterleavedLoad,
  980. OptimizationRemarkEmitter &ORE) {
  981. LLVM_DEBUG(dbgs() << "Checking interleaved load\n");
  982. for (auto &VI : InterleavedLoad)
  983. LLVM_DEBUG(dbgs() << VI << "\n");
  984. // The insertion point is the LoadInst which loads the first values. The
  985. // following tests are used to proof that the combined load can be inserted
  986. // just before InsertionPoint.
  987. LoadInst *InsertionPoint = InterleavedLoad.front().EI[0].LI;
  988. // Test if the offset is computed
  989. if (!InsertionPoint)
  990. return false;
  991. std::set<LoadInst *> LIs;
  992. std::set<Instruction *> Is;
  993. std::set<Instruction *> SVIs;
  994. unsigned InterleavedCost;
  995. unsigned InstructionCost = 0;
  996. // Get the interleave factor
  997. unsigned Factor = InterleavedLoad.size();
  998. // Merge all input sets used in analysis
  999. for (auto &VI : InterleavedLoad) {
  1000. // Generate a set of all load instructions to be combined
  1001. LIs.insert(VI.LIs.begin(), VI.LIs.end());
  1002. // Generate a set of all instructions taking part in load
  1003. // interleaved. This list excludes the instructions necessary for the
  1004. // polynomial construction.
  1005. Is.insert(VI.Is.begin(), VI.Is.end());
  1006. // Generate the set of the final ShuffleVectorInst.
  1007. SVIs.insert(VI.SVI);
  1008. }
  1009. // There is nothing to combine.
  1010. if (LIs.size() < 2)
  1011. return false;
  1012. // Test if all participating instruction will be dead after the
  1013. // transformation. If intermediate results are used, no performance gain can
  1014. // be expected. Also sum the cost of the Instructions beeing left dead.
  1015. for (auto &I : Is) {
  1016. // Compute the old cost
  1017. InstructionCost +=
  1018. TTI.getInstructionCost(I, TargetTransformInfo::TCK_Latency);
  1019. // The final SVIs are allowed not to be dead, all uses will be replaced
  1020. if (SVIs.find(I) != SVIs.end())
  1021. continue;
  1022. // If there are users outside the set to be eliminated, we abort the
  1023. // transformation. No gain can be expected.
  1024. for (const auto &U : I->users()) {
  1025. if (Is.find(dyn_cast<Instruction>(U)) == Is.end())
  1026. return false;
  1027. }
  1028. }
  1029. // We know that all LoadInst are within the same BB. This guarantees that
  1030. // either everything or nothing is loaded.
  1031. LoadInst *First = findFirstLoad(LIs);
  1032. // To be safe that the loads can be combined, iterate over all loads and test
  1033. // that the corresponding defining access dominates first LI. This guarantees
  1034. // that there are no aliasing stores in between the loads.
  1035. auto FMA = MSSA.getMemoryAccess(First);
  1036. for (auto LI : LIs) {
  1037. auto MADef = MSSA.getMemoryAccess(LI)->getDefiningAccess();
  1038. if (!MSSA.dominates(MADef, FMA))
  1039. return false;
  1040. }
  1041. assert(!LIs.empty() && "There are no LoadInst to combine");
  1042. // It is necessary that insertion point dominates all final ShuffleVectorInst.
  1043. for (auto &VI : InterleavedLoad) {
  1044. if (!DT.dominates(InsertionPoint, VI.SVI))
  1045. return false;
  1046. }
  1047. // All checks are done. Add instructions detectable by InterleavedAccessPass
  1048. // The old instruction will are left dead.
  1049. IRBuilder<> Builder(InsertionPoint);
  1050. Type *ETy = InterleavedLoad.front().SVI->getType()->getElementType();
  1051. unsigned ElementsPerSVI =
  1052. InterleavedLoad.front().SVI->getType()->getNumElements();
  1053. VectorType *ILTy = VectorType::get(ETy, Factor * ElementsPerSVI);
  1054. SmallVector<unsigned, 4> Indices;
  1055. for (unsigned i = 0; i < Factor; i++)
  1056. Indices.push_back(i);
  1057. InterleavedCost = TTI.getInterleavedMemoryOpCost(
  1058. Instruction::Load, ILTy, Factor, Indices, InsertionPoint->getAlignment(),
  1059. InsertionPoint->getPointerAddressSpace());
  1060. if (InterleavedCost >= InstructionCost) {
  1061. return false;
  1062. }
  1063. // Create a pointer cast for the wide load.
  1064. auto CI = Builder.CreatePointerCast(InsertionPoint->getOperand(0),
  1065. ILTy->getPointerTo(),
  1066. "interleaved.wide.ptrcast");
  1067. // Create the wide load and update the MemorySSA.
  1068. auto LI = Builder.CreateAlignedLoad(CI, InsertionPoint->getAlignment(),
  1069. "interleaved.wide.load");
  1070. auto MSSAU = MemorySSAUpdater(&MSSA);
  1071. MemoryUse *MSSALoad = cast<MemoryUse>(MSSAU.createMemoryAccessBefore(
  1072. LI, nullptr, MSSA.getMemoryAccess(InsertionPoint)));
  1073. MSSAU.insertUse(MSSALoad);
  1074. // Create the final SVIs and replace all uses.
  1075. int i = 0;
  1076. for (auto &VI : InterleavedLoad) {
  1077. SmallVector<uint32_t, 4> Mask;
  1078. for (unsigned j = 0; j < ElementsPerSVI; j++)
  1079. Mask.push_back(i + j * Factor);
  1080. Builder.SetInsertPoint(VI.SVI);
  1081. auto SVI = Builder.CreateShuffleVector(LI, UndefValue::get(LI->getType()),
  1082. Mask, "interleaved.shuffle");
  1083. VI.SVI->replaceAllUsesWith(SVI);
  1084. i++;
  1085. }
  1086. NumInterleavedLoadCombine++;
  1087. ORE.emit([&]() {
  1088. return OptimizationRemark(DEBUG_TYPE, "Combined Interleaved Load", LI)
  1089. << "Load interleaved combined with factor "
  1090. << ore::NV("Factor", Factor);
  1091. });
  1092. return true;
  1093. }
  1094. bool InterleavedLoadCombineImpl::run() {
  1095. OptimizationRemarkEmitter ORE(&F);
  1096. bool changed = false;
  1097. unsigned MaxFactor = TLI.getMaxSupportedInterleaveFactor();
  1098. auto &DL = F.getParent()->getDataLayout();
  1099. // Start with the highest factor to avoid combining and recombining.
  1100. for (unsigned Factor = MaxFactor; Factor >= 2; Factor--) {
  1101. std::list<VectorInfo> Candidates;
  1102. for (BasicBlock &BB : F) {
  1103. for (Instruction &I : BB) {
  1104. if (auto SVI = dyn_cast<ShuffleVectorInst>(&I)) {
  1105. Candidates.emplace_back(SVI->getType());
  1106. if (!VectorInfo::computeFromSVI(SVI, Candidates.back(), DL)) {
  1107. Candidates.pop_back();
  1108. continue;
  1109. }
  1110. if (!Candidates.back().isInterleaved(Factor, DL)) {
  1111. Candidates.pop_back();
  1112. }
  1113. }
  1114. }
  1115. }
  1116. std::list<VectorInfo> InterleavedLoad;
  1117. while (findPattern(Candidates, InterleavedLoad, Factor, DL)) {
  1118. if (combine(InterleavedLoad, ORE)) {
  1119. changed = true;
  1120. } else {
  1121. // Remove the first element of the Interleaved Load but put the others
  1122. // back on the list and continue searching
  1123. Candidates.splice(Candidates.begin(), InterleavedLoad,
  1124. std::next(InterleavedLoad.begin()),
  1125. InterleavedLoad.end());
  1126. }
  1127. InterleavedLoad.clear();
  1128. }
  1129. }
  1130. return changed;
  1131. }
  1132. namespace {
  1133. /// This pass combines interleaved loads into a pattern detectable by
  1134. /// InterleavedAccessPass.
  1135. struct InterleavedLoadCombine : public FunctionPass {
  1136. static char ID;
  1137. InterleavedLoadCombine() : FunctionPass(ID) {
  1138. initializeInterleavedLoadCombinePass(*PassRegistry::getPassRegistry());
  1139. }
  1140. StringRef getPassName() const override {
  1141. return "Interleaved Load Combine Pass";
  1142. }
  1143. bool runOnFunction(Function &F) override {
  1144. if (DisableInterleavedLoadCombine)
  1145. return false;
  1146. auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
  1147. if (!TPC)
  1148. return false;
  1149. LLVM_DEBUG(dbgs() << "*** " << getPassName() << ": " << F.getName()
  1150. << "\n");
  1151. return InterleavedLoadCombineImpl(
  1152. F, getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
  1153. getAnalysis<MemorySSAWrapperPass>().getMSSA(),
  1154. TPC->getTM<TargetMachine>())
  1155. .run();
  1156. }
  1157. void getAnalysisUsage(AnalysisUsage &AU) const override {
  1158. AU.addRequired<MemorySSAWrapperPass>();
  1159. AU.addRequired<DominatorTreeWrapperPass>();
  1160. FunctionPass::getAnalysisUsage(AU);
  1161. }
  1162. private:
  1163. };
  1164. } // anonymous namespace
  1165. char InterleavedLoadCombine::ID = 0;
  1166. INITIALIZE_PASS_BEGIN(
  1167. InterleavedLoadCombine, DEBUG_TYPE,
  1168. "Combine interleaved loads into wide loads and shufflevector instructions",
  1169. false, false)
  1170. INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
  1171. INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
  1172. INITIALIZE_PASS_END(
  1173. InterleavedLoadCombine, DEBUG_TYPE,
  1174. "Combine interleaved loads into wide loads and shufflevector instructions",
  1175. false, false)
  1176. FunctionPass *
  1177. llvm::createInterleavedLoadCombinePass() {
  1178. auto P = new InterleavedLoadCombine();
  1179. return P;
  1180. }