InterleavedLoadCombinePass.cpp 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359
  1. //===- InterleavedLoadCombine.cpp - Combine Interleaved Loads ---*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // \file
  10. //
  11. // This file defines the interleaved-load-combine pass. The pass searches for
  12. // ShuffleVectorInstruction that execute interleaving loads. If a matching
  13. // pattern is found, it adds a combined load and further instructions in a
  14. // pattern that is detectable by InterleavedAccesPass. The old instructions are
  15. // left dead to be removed later. The pass is specifically designed to be
  16. // executed just before InterleavedAccesPass to find any left-over instances
  17. // that are not detected within former passes.
  18. //
  19. //===----------------------------------------------------------------------===//
  20. #include "llvm/ADT/Statistic.h"
  21. #include "llvm/Analysis/MemoryLocation.h"
  22. #include "llvm/Analysis/MemorySSA.h"
  23. #include "llvm/Analysis/MemorySSAUpdater.h"
  24. #include "llvm/Analysis/OptimizationRemarkEmitter.h"
  25. #include "llvm/Analysis/TargetTransformInfo.h"
  26. #include "llvm/CodeGen/Passes.h"
  27. #include "llvm/CodeGen/TargetLowering.h"
  28. #include "llvm/CodeGen/TargetPassConfig.h"
  29. #include "llvm/CodeGen/TargetSubtargetInfo.h"
  30. #include "llvm/IR/DataLayout.h"
  31. #include "llvm/IR/Dominators.h"
  32. #include "llvm/IR/Function.h"
  33. #include "llvm/IR/Instructions.h"
  34. #include "llvm/IR/LegacyPassManager.h"
  35. #include "llvm/IR/Module.h"
  36. #include "llvm/Pass.h"
  37. #include "llvm/Support/Debug.h"
  38. #include "llvm/Support/ErrorHandling.h"
  39. #include "llvm/Support/raw_ostream.h"
  40. #include "llvm/Target/TargetMachine.h"
  41. #include <algorithm>
  42. #include <cassert>
  43. #include <list>
  44. using namespace llvm;
  45. #define DEBUG_TYPE "interleaved-load-combine"
  46. namespace {
  47. /// Statistic counter
  48. STATISTIC(NumInterleavedLoadCombine, "Number of combined loads");
  49. /// Option to disable the pass
  50. static cl::opt<bool> DisableInterleavedLoadCombine(
  51. "disable-" DEBUG_TYPE, cl::init(false), cl::Hidden,
  52. cl::desc("Disable combining of interleaved loads"));
  53. struct VectorInfo;
  54. struct InterleavedLoadCombineImpl {
  55. public:
  56. InterleavedLoadCombineImpl(Function &F, DominatorTree &DT, MemorySSA &MSSA,
  57. TargetMachine &TM)
  58. : F(F), DT(DT), MSSA(MSSA),
  59. TLI(*TM.getSubtargetImpl(F)->getTargetLowering()),
  60. TTI(TM.getTargetTransformInfo(F)) {}
  61. /// Scan the function for interleaved load candidates and execute the
  62. /// replacement if applicable.
  63. bool run();
  64. private:
  65. /// Function this pass is working on
  66. Function &F;
  67. /// Dominator Tree Analysis
  68. DominatorTree &DT;
  69. /// Memory Alias Analyses
  70. MemorySSA &MSSA;
  71. /// Target Lowering Information
  72. const TargetLowering &TLI;
  73. /// Target Transform Information
  74. const TargetTransformInfo TTI;
  75. /// Find the instruction in sets LIs that dominates all others, return nullptr
  76. /// if there is none.
  77. LoadInst *findFirstLoad(const std::set<LoadInst *> &LIs);
  78. /// Replace interleaved load candidates. It does additional
  79. /// analyses if this makes sense. Returns true on success and false
  80. /// of nothing has been changed.
  81. bool combine(std::list<VectorInfo> &InterleavedLoad,
  82. OptimizationRemarkEmitter &ORE);
  83. /// Given a set of VectorInfo containing candidates for a given interleave
  84. /// factor, find a set that represents a 'factor' interleaved load.
  85. bool findPattern(std::list<VectorInfo> &Candidates,
  86. std::list<VectorInfo> &InterleavedLoad, unsigned Factor,
  87. const DataLayout &DL);
  88. }; // InterleavedLoadCombine
  89. /// First Order Polynomial on an n-Bit Integer Value
  90. ///
  91. /// Polynomial(Value) = Value * B + A + E*2^(n-e)
  92. ///
  93. /// A and B are the coefficients. E*2^(n-e) is an error within 'e' most
  94. /// significant bits. It is introduced if an exact computation cannot be proven
  95. /// (e.q. division by 2).
  96. ///
  97. /// As part of this optimization multiple loads will be combined. It necessary
  98. /// to prove that loads are within some relative offset to each other. This
  99. /// class is used to prove relative offsets of values loaded from memory.
  100. ///
  101. /// Representing an integer in this form is sound since addition in two's
  102. /// complement is associative (trivial) and multiplication distributes over the
  103. /// addition (see Proof(1) in Polynomial::mul). Further, both operations
  104. /// commute.
  105. //
  106. // Example:
  107. // declare @fn(i64 %IDX, <4 x float>* %PTR) {
  108. // %Pa1 = add i64 %IDX, 2
  109. // %Pa2 = lshr i64 %Pa1, 1
  110. // %Pa3 = getelementptr inbounds <4 x float>, <4 x float>* %PTR, i64 %Pa2
  111. // %Va = load <4 x float>, <4 x float>* %Pa3
  112. //
  113. // %Pb1 = add i64 %IDX, 4
  114. // %Pb2 = lshr i64 %Pb1, 1
  115. // %Pb3 = getelementptr inbounds <4 x float>, <4 x float>* %PTR, i64 %Pb2
  116. // %Vb = load <4 x float>, <4 x float>* %Pb3
  117. // ... }
  118. //
  119. // The goal is to prove that two loads load consecutive addresses.
  120. //
  121. // In this case the polynomials are constructed by the following
  122. // steps.
  123. //
  124. // The number tag #e specifies the error bits.
  125. //
  126. // Pa_0 = %IDX #0
  127. // Pa_1 = %IDX + 2 #0 | add 2
  128. // Pa_2 = %IDX/2 + 1 #1 | lshr 1
  129. // Pa_3 = %IDX/2 + 1 #1 | GEP, step signext to i64
  130. // Pa_4 = (%IDX/2)*16 + 16 #0 | GEP, multiply index by sizeof(4) for floats
  131. // Pa_5 = (%IDX/2)*16 + 16 #0 | GEP, add offset of leading components
  132. //
  133. // Pb_0 = %IDX #0
  134. // Pb_1 = %IDX + 4 #0 | add 2
  135. // Pb_2 = %IDX/2 + 2 #1 | lshr 1
  136. // Pb_3 = %IDX/2 + 2 #1 | GEP, step signext to i64
  137. // Pb_4 = (%IDX/2)*16 + 32 #0 | GEP, multiply index by sizeof(4) for floats
  138. // Pb_5 = (%IDX/2)*16 + 16 #0 | GEP, add offset of leading components
  139. //
  140. // Pb_5 - Pa_5 = 16 #0 | subtract to get the offset
  141. //
  142. // Remark: %PTR is not maintained within this class. So in this instance the
  143. // offset of 16 can only be assumed if the pointers are equal.
  144. //
  145. class Polynomial {
  146. /// Operations on B
  147. enum BOps {
  148. LShr,
  149. Mul,
  150. SExt,
  151. Trunc,
  152. };
  153. /// Number of Error Bits e
  154. unsigned ErrorMSBs;
  155. /// Value
  156. Value *V;
  157. /// Coefficient B
  158. SmallVector<std::pair<BOps, APInt>, 4> B;
  159. /// Coefficient A
  160. APInt A;
  161. public:
  162. Polynomial(Value *V) : ErrorMSBs((unsigned)-1), V(V), B(), A() {
  163. IntegerType *Ty = dyn_cast<IntegerType>(V->getType());
  164. if (Ty) {
  165. ErrorMSBs = 0;
  166. this->V = V;
  167. A = APInt(Ty->getBitWidth(), 0);
  168. }
  169. }
  170. Polynomial(const APInt &A, unsigned ErrorMSBs = 0)
  171. : ErrorMSBs(ErrorMSBs), V(NULL), B(), A(A) {}
  172. Polynomial(unsigned BitWidth, uint64_t A, unsigned ErrorMSBs = 0)
  173. : ErrorMSBs(ErrorMSBs), V(NULL), B(), A(BitWidth, A) {}
  174. Polynomial() : ErrorMSBs((unsigned)-1), V(NULL), B(), A() {}
  175. /// Increment and clamp the number of undefined bits.
  176. void incErrorMSBs(unsigned amt) {
  177. if (ErrorMSBs == (unsigned)-1)
  178. return;
  179. ErrorMSBs += amt;
  180. if (ErrorMSBs > A.getBitWidth())
  181. ErrorMSBs = A.getBitWidth();
  182. }
  183. /// Decrement and clamp the number of undefined bits.
  184. void decErrorMSBs(unsigned amt) {
  185. if (ErrorMSBs == (unsigned)-1)
  186. return;
  187. if (ErrorMSBs > amt)
  188. ErrorMSBs -= amt;
  189. else
  190. ErrorMSBs = 0;
  191. }
  192. /// Apply an add on the polynomial
  193. Polynomial &add(const APInt &C) {
  194. // Note: Addition is associative in two's complement even when in case of
  195. // signed overflow.
  196. //
  197. // Error bits can only propagate into higher significant bits. As these are
  198. // already regarded as undefined, there is no change.
  199. //
  200. // Theorem: Adding a constant to a polynomial does not change the error
  201. // term.
  202. //
  203. // Proof:
  204. //
  205. // Since the addition is associative and commutes:
  206. //
  207. // (B + A + E*2^(n-e)) + C = B + (A + C) + E*2^(n-e)
  208. // [qed]
  209. if (C.getBitWidth() != A.getBitWidth()) {
  210. ErrorMSBs = (unsigned)-1;
  211. return *this;
  212. }
  213. A += C;
  214. return *this;
  215. }
  216. /// Apply a multiplication onto the polynomial.
  217. Polynomial &mul(const APInt &C) {
  218. // Note: Multiplication distributes over the addition
  219. //
  220. // Theorem: Multiplication distributes over the addition
  221. //
  222. // Proof(1):
  223. //
  224. // (B+A)*C =-
  225. // = (B + A) + (B + A) + .. {C Times}
  226. // addition is associative and commutes, hence
  227. // = B + B + .. {C Times} .. + A + A + .. {C times}
  228. // = B*C + A*C
  229. // (see (function add) for signed values and overflows)
  230. // [qed]
  231. //
  232. // Theorem: If C has c trailing zeros, errors bits in A or B are shifted out
  233. // to the left.
  234. //
  235. // Proof(2):
  236. //
  237. // Let B' and A' be the n-Bit inputs with some unknown errors EA,
  238. // EB at e leading bits. B' and A' can be written down as:
  239. //
  240. // B' = B + 2^(n-e)*EB
  241. // A' = A + 2^(n-e)*EA
  242. //
  243. // Let C' be an input with c trailing zero bits. C' can be written as
  244. //
  245. // C' = C*2^c
  246. //
  247. // Therefore we can compute the result by using distributivity and
  248. // commutativity.
  249. //
  250. // (B'*C' + A'*C') = [B + 2^(n-e)*EB] * C' + [A + 2^(n-e)*EA] * C' =
  251. // = [B + 2^(n-e)*EB + A + 2^(n-e)*EA] * C' =
  252. // = (B'+A') * C' =
  253. // = [B + 2^(n-e)*EB + A + 2^(n-e)*EA] * C' =
  254. // = [B + A + 2^(n-e)*EB + 2^(n-e)*EA] * C' =
  255. // = (B + A) * C' + [2^(n-e)*EB + 2^(n-e)*EA)] * C' =
  256. // = (B + A) * C' + [2^(n-e)*EB + 2^(n-e)*EA)] * C*2^c =
  257. // = (B + A) * C' + C*(EB + EA)*2^(n-e)*2^c =
  258. //
  259. // Let EC be the final error with EC = C*(EB + EA)
  260. //
  261. // = (B + A)*C' + EC*2^(n-e)*2^c =
  262. // = (B + A)*C' + EC*2^(n-(e-c))
  263. //
  264. // Since EC is multiplied by 2^(n-(e-c)) the resulting error contains c
  265. // less error bits than the input. c bits are shifted out to the left.
  266. // [qed]
  267. if (C.getBitWidth() != A.getBitWidth()) {
  268. ErrorMSBs = (unsigned)-1;
  269. return *this;
  270. }
  271. // Multiplying by one is a no-op.
  272. if (C.isOneValue()) {
  273. return *this;
  274. }
  275. // Multiplying by zero removes the coefficient B and defines all bits.
  276. if (C.isNullValue()) {
  277. ErrorMSBs = 0;
  278. deleteB();
  279. }
  280. // See Proof(2): Trailing zero bits indicate a left shift. This removes
  281. // leading bits from the result even if they are undefined.
  282. decErrorMSBs(C.countTrailingZeros());
  283. A *= C;
  284. pushBOperation(Mul, C);
  285. return *this;
  286. }
  287. /// Apply a logical shift right on the polynomial
  288. Polynomial &lshr(const APInt &C) {
  289. // Theorem(1): (B + A + E*2^(n-e)) >> 1 => (B >> 1) + (A >> 1) + E'*2^(n-e')
  290. // where
  291. // e' = e + 1,
  292. // E is a e-bit number,
  293. // E' is a e'-bit number,
  294. // holds under the following precondition:
  295. // pre(1): A % 2 = 0
  296. // pre(2): e < n, (see Theorem(2) for the trivial case with e=n)
  297. // where >> expresses a logical shift to the right, with adding zeros.
  298. //
  299. // We need to show that for every, E there is a E'
  300. //
  301. // B = b_h * 2^(n-1) + b_m * 2 + b_l
  302. // A = a_h * 2^(n-1) + a_m * 2 (pre(1))
  303. //
  304. // where a_h, b_h, b_l are single bits, and a_m, b_m are (n-2) bit numbers
  305. //
  306. // Let X = (B + A + E*2^(n-e)) >> 1
  307. // Let Y = (B >> 1) + (A >> 1) + E*2^(n-e) >> 1
  308. //
  309. // X = [B + A + E*2^(n-e)] >> 1 =
  310. // = [ b_h * 2^(n-1) + b_m * 2 + b_l +
  311. // + a_h * 2^(n-1) + a_m * 2 +
  312. // + E * 2^(n-e) ] >> 1 =
  313. //
  314. // The sum is built by putting the overflow of [a_m + b+n] into the term
  315. // 2^(n-1). As there are no more bits beyond 2^(n-1) the overflow within
  316. // this bit is discarded. This is expressed by % 2.
  317. //
  318. // The bit in position 0 cannot overflow into the term (b_m + a_m).
  319. //
  320. // = [ ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-1) +
  321. // + ((b_m + a_m) % 2^(n-2)) * 2 +
  322. // + b_l + E * 2^(n-e) ] >> 1 =
  323. //
  324. // The shift is computed by dividing the terms by 2 and by cutting off
  325. // b_l.
  326. //
  327. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  328. // + ((b_m + a_m) % 2^(n-2)) +
  329. // + E * 2^(n-(e+1)) =
  330. //
  331. // by the definition in the Theorem e+1 = e'
  332. //
  333. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  334. // + ((b_m + a_m) % 2^(n-2)) +
  335. // + E * 2^(n-e') =
  336. //
  337. // Compute Y by applying distributivity first
  338. //
  339. // Y = (B >> 1) + (A >> 1) + E*2^(n-e') =
  340. // = (b_h * 2^(n-1) + b_m * 2 + b_l) >> 1 +
  341. // + (a_h * 2^(n-1) + a_m * 2) >> 1 +
  342. // + E * 2^(n-e) >> 1 =
  343. //
  344. // Again, the shift is computed by dividing the terms by 2 and by cutting
  345. // off b_l.
  346. //
  347. // = b_h * 2^(n-2) + b_m +
  348. // + a_h * 2^(n-2) + a_m +
  349. // + E * 2^(n-(e+1)) =
  350. //
  351. // Again, the sum is built by putting the overflow of [a_m + b+n] into
  352. // the term 2^(n-1). But this time there is room for a second bit in the
  353. // term 2^(n-2) we add this bit to a new term and denote it o_h in a
  354. // second step.
  355. //
  356. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] >> 1) * 2^(n-1) +
  357. // + ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  358. // + ((b_m + a_m) % 2^(n-2)) +
  359. // + E * 2^(n-(e+1)) =
  360. //
  361. // Let o_h = [b_h + a_h + (b_m + a_m) >> (n-2)] >> 1
  362. // Further replace e+1 by e'.
  363. //
  364. // = o_h * 2^(n-1) +
  365. // + ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  366. // + ((b_m + a_m) % 2^(n-2)) +
  367. // + E * 2^(n-e') =
  368. //
  369. // Move o_h into the error term and construct E'. To ensure that there is
  370. // no 2^x with negative x, this step requires pre(2) (e < n).
  371. //
  372. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  373. // + ((b_m + a_m) % 2^(n-2)) +
  374. // + o_h * 2^(e'-1) * 2^(n-e') + | pre(2), move 2^(e'-1)
  375. // | out of the old exponent
  376. // + E * 2^(n-e') =
  377. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  378. // + ((b_m + a_m) % 2^(n-2)) +
  379. // + [o_h * 2^(e'-1) + E] * 2^(n-e') + | move 2^(e'-1) out of
  380. // | the old exponent
  381. //
  382. // Let E' = o_h * 2^(e'-1) + E
  383. //
  384. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  385. // + ((b_m + a_m) % 2^(n-2)) +
  386. // + E' * 2^(n-e')
  387. //
  388. // Because X and Y are distinct only in there error terms and E' can be
  389. // constructed as shown the theorem holds.
  390. // [qed]
  391. //
  392. // For completeness in case of the case e=n it is also required to show that
  393. // distributivity can be applied.
  394. //
  395. // In this case Theorem(1) transforms to (the pre-condition on A can also be
  396. // dropped)
  397. //
  398. // Theorem(2): (B + A + E) >> 1 => (B >> 1) + (A >> 1) + E'
  399. // where
  400. // A, B, E, E' are two's complement numbers with the same bit
  401. // width
  402. //
  403. // Let A + B + E = X
  404. // Let (B >> 1) + (A >> 1) = Y
  405. //
  406. // Therefore we need to show that for every X and Y there is an E' which
  407. // makes the equation
  408. //
  409. // X = Y + E'
  410. //
  411. // hold. This is trivially the case for E' = X - Y.
  412. //
  413. // [qed]
  414. //
  415. // Remark: Distributing lshr with and arbitrary number n can be expressed as
  416. // ((((B + A) lshr 1) lshr 1) ... ) {n times}.
  417. // This construction induces n additional error bits at the left.
  418. if (C.getBitWidth() != A.getBitWidth()) {
  419. ErrorMSBs = (unsigned)-1;
  420. return *this;
  421. }
  422. if (C.isNullValue())
  423. return *this;
  424. // Test if the result will be zero
  425. unsigned shiftAmt = C.getZExtValue();
  426. if (shiftAmt >= C.getBitWidth())
  427. return mul(APInt(C.getBitWidth(), 0));
  428. // The proof that shiftAmt LSBs are zero for at least one summand is only
  429. // possible for the constant number.
  430. //
  431. // If this can be proven add shiftAmt to the error counter
  432. // `ErrorMSBs`. Otherwise set all bits as undefined.
  433. if (A.countTrailingZeros() < shiftAmt)
  434. ErrorMSBs = A.getBitWidth();
  435. else
  436. incErrorMSBs(shiftAmt);
  437. // Apply the operation.
  438. pushBOperation(LShr, C);
  439. A = A.lshr(shiftAmt);
  440. return *this;
  441. }
  442. /// Apply a sign-extend or truncate operation on the polynomial.
  443. Polynomial &sextOrTrunc(unsigned n) {
  444. if (n < A.getBitWidth()) {
  445. // Truncate: Clearly undefined Bits on the MSB side are removed
  446. // if there are any.
  447. decErrorMSBs(A.getBitWidth() - n);
  448. A = A.trunc(n);
  449. pushBOperation(Trunc, APInt(sizeof(n) * 8, n));
  450. }
  451. if (n > A.getBitWidth()) {
  452. // Extend: Clearly extending first and adding later is different
  453. // to adding first and extending later in all extended bits.
  454. incErrorMSBs(n - A.getBitWidth());
  455. A = A.sext(n);
  456. pushBOperation(SExt, APInt(sizeof(n) * 8, n));
  457. }
  458. return *this;
  459. }
  460. /// Test if there is a coefficient B.
  461. bool isFirstOrder() const { return V != nullptr; }
  462. /// Test coefficient B of two Polynomials are equal.
  463. bool isCompatibleTo(const Polynomial &o) const {
  464. // The polynomial use different bit width.
  465. if (A.getBitWidth() != o.A.getBitWidth())
  466. return false;
  467. // If neither Polynomial has the Coefficient B.
  468. if (!isFirstOrder() && !o.isFirstOrder())
  469. return true;
  470. // The index variable is different.
  471. if (V != o.V)
  472. return false;
  473. // Check the operations.
  474. if (B.size() != o.B.size())
  475. return false;
  476. auto ob = o.B.begin();
  477. for (auto &b : B) {
  478. if (b != *ob)
  479. return false;
  480. ob++;
  481. }
  482. return true;
  483. }
  484. /// Subtract two polynomials, return an undefined polynomial if
  485. /// subtraction is not possible.
  486. Polynomial operator-(const Polynomial &o) const {
  487. // Return an undefined polynomial if incompatible.
  488. if (!isCompatibleTo(o))
  489. return Polynomial();
  490. // If the polynomials are compatible (meaning they have the same
  491. // coefficient on B), B is eliminated. Thus a polynomial solely
  492. // containing A is returned
  493. return Polynomial(A - o.A, std::max(ErrorMSBs, o.ErrorMSBs));
  494. }
  495. /// Subtract a constant from a polynomial,
  496. Polynomial operator-(uint64_t C) const {
  497. Polynomial Result(*this);
  498. Result.A -= C;
  499. return Result;
  500. }
  501. /// Add a constant to a polynomial,
  502. Polynomial operator+(uint64_t C) const {
  503. Polynomial Result(*this);
  504. Result.A += C;
  505. return Result;
  506. }
  507. /// Returns true if it can be proven that two Polynomials are equal.
  508. bool isProvenEqualTo(const Polynomial &o) {
  509. // Subtract both polynomials and test if it is fully defined and zero.
  510. Polynomial r = *this - o;
  511. return (r.ErrorMSBs == 0) && (!r.isFirstOrder()) && (r.A.isNullValue());
  512. }
  513. /// Print the polynomial into a stream.
  514. void print(raw_ostream &OS) const {
  515. OS << "[{#ErrBits:" << ErrorMSBs << "} ";
  516. if (V) {
  517. for (auto b : B)
  518. OS << "(";
  519. OS << "(" << *V << ") ";
  520. for (auto b : B) {
  521. switch (b.first) {
  522. case LShr:
  523. OS << "LShr ";
  524. break;
  525. case Mul:
  526. OS << "Mul ";
  527. break;
  528. case SExt:
  529. OS << "SExt ";
  530. break;
  531. case Trunc:
  532. OS << "Trunc ";
  533. break;
  534. }
  535. OS << b.second << ") ";
  536. }
  537. }
  538. OS << "+ " << A << "]";
  539. }
  540. private:
  541. void deleteB() {
  542. V = nullptr;
  543. B.clear();
  544. }
  545. void pushBOperation(const BOps Op, const APInt &C) {
  546. if (isFirstOrder()) {
  547. B.push_back(std::make_pair(Op, C));
  548. return;
  549. }
  550. }
  551. };
  552. #ifndef NDEBUG
  553. static raw_ostream &operator<<(raw_ostream &OS, const Polynomial &S) {
  554. S.print(OS);
  555. return OS;
  556. }
  557. #endif
  558. /// VectorInfo stores abstract the following information for each vector
  559. /// element:
  560. ///
  561. /// 1) The the memory address loaded into the element as Polynomial
  562. /// 2) a set of load instruction necessary to construct the vector,
  563. /// 3) a set of all other instructions that are necessary to create the vector and
  564. /// 4) a pointer value that can be used as relative base for all elements.
  565. struct VectorInfo {
  566. private:
  567. VectorInfo(const VectorInfo &c) : VTy(c.VTy) {
  568. llvm_unreachable(
  569. "Copying VectorInfo is neither implemented nor necessary,");
  570. }
  571. public:
  572. /// Information of a Vector Element
  573. struct ElementInfo {
  574. /// Offset Polynomial.
  575. Polynomial Ofs;
  576. /// The Load Instruction used to Load the entry. LI is null if the pointer
  577. /// of the load instruction does not point on to the entry
  578. LoadInst *LI;
  579. ElementInfo(Polynomial Offset = Polynomial(), LoadInst *LI = nullptr)
  580. : Ofs(Offset), LI(LI) {}
  581. };
  582. /// Basic-block the load instructions are within
  583. BasicBlock *BB;
  584. /// Pointer value of all participation load instructions
  585. Value *PV;
  586. /// Participating load instructions
  587. std::set<LoadInst *> LIs;
  588. /// Participating instructions
  589. std::set<Instruction *> Is;
  590. /// Final shuffle-vector instruction
  591. ShuffleVectorInst *SVI;
  592. /// Information of the offset for each vector element
  593. ElementInfo *EI;
  594. /// Vector Type
  595. VectorType *const VTy;
  596. VectorInfo(VectorType *VTy)
  597. : BB(nullptr), PV(nullptr), LIs(), Is(), SVI(nullptr), VTy(VTy) {
  598. EI = new ElementInfo[VTy->getNumElements()];
  599. }
  600. virtual ~VectorInfo() { delete[] EI; }
  601. unsigned getDimension() const { return VTy->getNumElements(); }
  602. /// Test if the VectorInfo can be part of an interleaved load with the
  603. /// specified factor.
  604. ///
  605. /// \param Factor of the interleave
  606. /// \param DL Targets Datalayout
  607. ///
  608. /// \returns true if this is possible and false if not
  609. bool isInterleaved(unsigned Factor, const DataLayout &DL) const {
  610. unsigned Size = DL.getTypeAllocSize(VTy->getElementType());
  611. for (unsigned i = 1; i < getDimension(); i++) {
  612. if (!EI[i].Ofs.isProvenEqualTo(EI[0].Ofs + i * Factor * Size)) {
  613. return false;
  614. }
  615. }
  616. return true;
  617. }
  618. /// Recursively computes the vector information stored in V.
  619. ///
  620. /// This function delegates the work to specialized implementations
  621. ///
  622. /// \param V Value to operate on
  623. /// \param Result Result of the computation
  624. ///
  625. /// \returns false if no sensible information can be gathered.
  626. static bool compute(Value *V, VectorInfo &Result, const DataLayout &DL) {
  627. ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V);
  628. if (SVI)
  629. return computeFromSVI(SVI, Result, DL);
  630. LoadInst *LI = dyn_cast<LoadInst>(V);
  631. if (LI)
  632. return computeFromLI(LI, Result, DL);
  633. BitCastInst *BCI = dyn_cast<BitCastInst>(V);
  634. if (BCI)
  635. return computeFromBCI(BCI, Result, DL);
  636. return false;
  637. }
  638. /// BitCastInst specialization to compute the vector information.
  639. ///
  640. /// \param BCI BitCastInst to operate on
  641. /// \param Result Result of the computation
  642. ///
  643. /// \returns false if no sensible information can be gathered.
  644. static bool computeFromBCI(BitCastInst *BCI, VectorInfo &Result,
  645. const DataLayout &DL) {
  646. Instruction *Op = dyn_cast<Instruction>(BCI->getOperand(0));
  647. if (!Op)
  648. return false;
  649. VectorType *VTy = dyn_cast<VectorType>(Op->getType());
  650. if (!VTy)
  651. return false;
  652. // We can only cast from large to smaller vectors
  653. if (Result.VTy->getNumElements() % VTy->getNumElements())
  654. return false;
  655. unsigned Factor = Result.VTy->getNumElements() / VTy->getNumElements();
  656. unsigned NewSize = DL.getTypeAllocSize(Result.VTy->getElementType());
  657. unsigned OldSize = DL.getTypeAllocSize(VTy->getElementType());
  658. if (NewSize * Factor != OldSize)
  659. return false;
  660. VectorInfo Old(VTy);
  661. if (!compute(Op, Old, DL))
  662. return false;
  663. for (unsigned i = 0; i < Result.VTy->getNumElements(); i += Factor) {
  664. for (unsigned j = 0; j < Factor; j++) {
  665. Result.EI[i + j] =
  666. ElementInfo(Old.EI[i / Factor].Ofs + j * NewSize,
  667. j == 0 ? Old.EI[i / Factor].LI : nullptr);
  668. }
  669. }
  670. Result.BB = Old.BB;
  671. Result.PV = Old.PV;
  672. Result.LIs.insert(Old.LIs.begin(), Old.LIs.end());
  673. Result.Is.insert(Old.Is.begin(), Old.Is.end());
  674. Result.Is.insert(BCI);
  675. Result.SVI = nullptr;
  676. return true;
  677. }
  678. /// ShuffleVectorInst specialization to compute vector information.
  679. ///
  680. /// \param SVI ShuffleVectorInst to operate on
  681. /// \param Result Result of the computation
  682. ///
  683. /// Compute the left and the right side vector information and merge them by
  684. /// applying the shuffle operation. This function also ensures that the left
  685. /// and right side have compatible loads. This means that all loads are with
  686. /// in the same basic block and are based on the same pointer.
  687. ///
  688. /// \returns false if no sensible information can be gathered.
  689. static bool computeFromSVI(ShuffleVectorInst *SVI, VectorInfo &Result,
  690. const DataLayout &DL) {
  691. VectorType *ArgTy = dyn_cast<VectorType>(SVI->getOperand(0)->getType());
  692. assert(ArgTy && "ShuffleVector Operand is not a VectorType");
  693. // Compute the left hand vector information.
  694. VectorInfo LHS(ArgTy);
  695. if (!compute(SVI->getOperand(0), LHS, DL))
  696. LHS.BB = nullptr;
  697. // Compute the right hand vector information.
  698. VectorInfo RHS(ArgTy);
  699. if (!compute(SVI->getOperand(1), RHS, DL))
  700. RHS.BB = nullptr;
  701. // Neither operand produced sensible results?
  702. if (!LHS.BB && !RHS.BB)
  703. return false;
  704. // Only RHS produced sensible results?
  705. else if (!LHS.BB) {
  706. Result.BB = RHS.BB;
  707. Result.PV = RHS.PV;
  708. }
  709. // Only LHS produced sensible results?
  710. else if (!RHS.BB) {
  711. Result.BB = LHS.BB;
  712. Result.PV = LHS.PV;
  713. }
  714. // Both operands produced sensible results?
  715. else if ((LHS.BB == RHS.BB) && (LHS.PV == RHS.PV)) {
  716. Result.BB = LHS.BB;
  717. Result.PV = LHS.PV;
  718. }
  719. // Both operands produced sensible results but they are incompatible.
  720. else {
  721. return false;
  722. }
  723. // Merge and apply the operation on the offset information.
  724. if (LHS.BB) {
  725. Result.LIs.insert(LHS.LIs.begin(), LHS.LIs.end());
  726. Result.Is.insert(LHS.Is.begin(), LHS.Is.end());
  727. }
  728. if (RHS.BB) {
  729. Result.LIs.insert(RHS.LIs.begin(), RHS.LIs.end());
  730. Result.Is.insert(RHS.Is.begin(), RHS.Is.end());
  731. }
  732. Result.Is.insert(SVI);
  733. Result.SVI = SVI;
  734. int j = 0;
  735. for (int i : SVI->getShuffleMask()) {
  736. assert((i < 2 * (signed)ArgTy->getNumElements()) &&
  737. "Invalid ShuffleVectorInst (index out of bounds)");
  738. if (i < 0)
  739. Result.EI[j] = ElementInfo();
  740. else if (i < (signed)ArgTy->getNumElements()) {
  741. if (LHS.BB)
  742. Result.EI[j] = LHS.EI[i];
  743. else
  744. Result.EI[j] = ElementInfo();
  745. } else {
  746. if (RHS.BB)
  747. Result.EI[j] = RHS.EI[i - ArgTy->getNumElements()];
  748. else
  749. Result.EI[j] = ElementInfo();
  750. }
  751. j++;
  752. }
  753. return true;
  754. }
  755. /// LoadInst specialization to compute vector information.
  756. ///
  757. /// This function also acts as abort condition to the recursion.
  758. ///
  759. /// \param LI LoadInst to operate on
  760. /// \param Result Result of the computation
  761. ///
  762. /// \returns false if no sensible information can be gathered.
  763. static bool computeFromLI(LoadInst *LI, VectorInfo &Result,
  764. const DataLayout &DL) {
  765. Value *BasePtr;
  766. Polynomial Offset;
  767. if (LI->isVolatile())
  768. return false;
  769. if (LI->isAtomic())
  770. return false;
  771. // Get the base polynomial
  772. computePolynomialFromPointer(*LI->getPointerOperand(), Offset, BasePtr, DL);
  773. Result.BB = LI->getParent();
  774. Result.PV = BasePtr;
  775. Result.LIs.insert(LI);
  776. Result.Is.insert(LI);
  777. for (unsigned i = 0; i < Result.getDimension(); i++) {
  778. Value *Idx[2] = {
  779. ConstantInt::get(Type::getInt32Ty(LI->getContext()), 0),
  780. ConstantInt::get(Type::getInt32Ty(LI->getContext()), i),
  781. };
  782. int64_t Ofs = DL.getIndexedOffsetInType(Result.VTy, makeArrayRef(Idx, 2));
  783. Result.EI[i] = ElementInfo(Offset + Ofs, i == 0 ? LI : nullptr);
  784. }
  785. return true;
  786. }
  787. /// Recursively compute polynomial of a value.
  788. ///
  789. /// \param BO Input binary operation
  790. /// \param Result Result polynomial
  791. static void computePolynomialBinOp(BinaryOperator &BO, Polynomial &Result) {
  792. Value *LHS = BO.getOperand(0);
  793. Value *RHS = BO.getOperand(1);
  794. // Find the RHS Constant if any
  795. ConstantInt *C = dyn_cast<ConstantInt>(RHS);
  796. if ((!C) && BO.isCommutative()) {
  797. C = dyn_cast<ConstantInt>(LHS);
  798. if (C)
  799. std::swap(LHS, RHS);
  800. }
  801. switch (BO.getOpcode()) {
  802. case Instruction::Add:
  803. if (!C)
  804. break;
  805. computePolynomial(*LHS, Result);
  806. Result.add(C->getValue());
  807. return;
  808. case Instruction::LShr:
  809. if (!C)
  810. break;
  811. computePolynomial(*LHS, Result);
  812. Result.lshr(C->getValue());
  813. return;
  814. default:
  815. break;
  816. }
  817. Result = Polynomial(&BO);
  818. }
  819. /// Recursively compute polynomial of a value
  820. ///
  821. /// \param V input value
  822. /// \param Result result polynomial
  823. static void computePolynomial(Value &V, Polynomial &Result) {
  824. if (auto *BO = dyn_cast<BinaryOperator>(&V))
  825. computePolynomialBinOp(*BO, Result);
  826. else
  827. Result = Polynomial(&V);
  828. }
  829. /// Compute the Polynomial representation of a Pointer type.
  830. ///
  831. /// \param Ptr input pointer value
  832. /// \param Result result polynomial
  833. /// \param BasePtr pointer the polynomial is based on
  834. /// \param DL Datalayout of the target machine
  835. static void computePolynomialFromPointer(Value &Ptr, Polynomial &Result,
  836. Value *&BasePtr,
  837. const DataLayout &DL) {
  838. // Not a pointer type? Return an undefined polynomial
  839. PointerType *PtrTy = dyn_cast<PointerType>(Ptr.getType());
  840. if (!PtrTy) {
  841. Result = Polynomial();
  842. BasePtr = nullptr;
  843. return;
  844. }
  845. unsigned PointerBits =
  846. DL.getIndexSizeInBits(PtrTy->getPointerAddressSpace());
  847. /// Skip pointer casts. Return Zero polynomial otherwise
  848. if (isa<CastInst>(&Ptr)) {
  849. CastInst &CI = *cast<CastInst>(&Ptr);
  850. switch (CI.getOpcode()) {
  851. case Instruction::BitCast:
  852. computePolynomialFromPointer(*CI.getOperand(0), Result, BasePtr, DL);
  853. break;
  854. default:
  855. BasePtr = &Ptr;
  856. Polynomial(PointerBits, 0);
  857. break;
  858. }
  859. }
  860. /// Resolve GetElementPtrInst.
  861. else if (isa<GetElementPtrInst>(&Ptr)) {
  862. GetElementPtrInst &GEP = *cast<GetElementPtrInst>(&Ptr);
  863. APInt BaseOffset(PointerBits, 0);
  864. // Check if we can compute the Offset with accumulateConstantOffset
  865. if (GEP.accumulateConstantOffset(DL, BaseOffset)) {
  866. Result = Polynomial(BaseOffset);
  867. BasePtr = GEP.getPointerOperand();
  868. return;
  869. } else {
  870. // Otherwise we allow that the last index operand of the GEP is
  871. // non-constant.
  872. unsigned idxOperand, e;
  873. SmallVector<Value *, 4> Indices;
  874. for (idxOperand = 1, e = GEP.getNumOperands(); idxOperand < e;
  875. idxOperand++) {
  876. ConstantInt *IDX = dyn_cast<ConstantInt>(GEP.getOperand(idxOperand));
  877. if (!IDX)
  878. break;
  879. Indices.push_back(IDX);
  880. }
  881. // It must also be the last operand.
  882. if (idxOperand + 1 != e) {
  883. Result = Polynomial();
  884. BasePtr = nullptr;
  885. return;
  886. }
  887. // Compute the polynomial of the index operand.
  888. computePolynomial(*GEP.getOperand(idxOperand), Result);
  889. // Compute base offset from zero based index, excluding the last
  890. // variable operand.
  891. BaseOffset =
  892. DL.getIndexedOffsetInType(GEP.getSourceElementType(), Indices);
  893. // Apply the operations of GEP to the polynomial.
  894. unsigned ResultSize = DL.getTypeAllocSize(GEP.getResultElementType());
  895. Result.sextOrTrunc(PointerBits);
  896. Result.mul(APInt(PointerBits, ResultSize));
  897. Result.add(BaseOffset);
  898. BasePtr = GEP.getPointerOperand();
  899. }
  900. }
  901. // All other instructions are handled by using the value as base pointer and
  902. // a zero polynomial.
  903. else {
  904. BasePtr = &Ptr;
  905. Polynomial(DL.getIndexSizeInBits(PtrTy->getPointerAddressSpace()), 0);
  906. }
  907. }
  908. #ifndef NDEBUG
  909. void print(raw_ostream &OS) const {
  910. if (PV)
  911. OS << *PV;
  912. else
  913. OS << "(none)";
  914. OS << " + ";
  915. for (unsigned i = 0; i < getDimension(); i++)
  916. OS << ((i == 0) ? "[" : ", ") << EI[i].Ofs;
  917. OS << "]";
  918. }
  919. #endif
  920. };
  921. } // anonymous namespace
  922. bool InterleavedLoadCombineImpl::findPattern(
  923. std::list<VectorInfo> &Candidates, std::list<VectorInfo> &InterleavedLoad,
  924. unsigned Factor, const DataLayout &DL) {
  925. for (auto C0 = Candidates.begin(), E0 = Candidates.end(); C0 != E0; ++C0) {
  926. unsigned i;
  927. // Try to find an interleaved load using the front of Worklist as first line
  928. unsigned Size = DL.getTypeAllocSize(C0->VTy->getElementType());
  929. // List containing iterators pointing to the VectorInfos of the candidates
  930. std::vector<std::list<VectorInfo>::iterator> Res(Factor, Candidates.end());
  931. for (auto C = Candidates.begin(), E = Candidates.end(); C != E; C++) {
  932. if (C->VTy != C0->VTy)
  933. continue;
  934. if (C->BB != C0->BB)
  935. continue;
  936. if (C->PV != C0->PV)
  937. continue;
  938. // Check the current value matches any of factor - 1 remaining lines
  939. for (i = 1; i < Factor; i++) {
  940. if (C->EI[0].Ofs.isProvenEqualTo(C0->EI[0].Ofs + i * Size)) {
  941. Res[i] = C;
  942. }
  943. }
  944. for (i = 1; i < Factor; i++) {
  945. if (Res[i] == Candidates.end())
  946. break;
  947. }
  948. if (i == Factor) {
  949. Res[0] = C0;
  950. break;
  951. }
  952. }
  953. if (Res[0] != Candidates.end()) {
  954. // Move the result into the output
  955. for (unsigned i = 0; i < Factor; i++) {
  956. InterleavedLoad.splice(InterleavedLoad.end(), Candidates, Res[i]);
  957. }
  958. return true;
  959. }
  960. }
  961. return false;
  962. }
  963. LoadInst *
  964. InterleavedLoadCombineImpl::findFirstLoad(const std::set<LoadInst *> &LIs) {
  965. assert(!LIs.empty() && "No load instructions given.");
  966. // All LIs are within the same BB. Select the first for a reference.
  967. BasicBlock *BB = (*LIs.begin())->getParent();
  968. BasicBlock::iterator FLI =
  969. std::find_if(BB->begin(), BB->end(), [&LIs](Instruction &I) -> bool {
  970. return is_contained(LIs, &I);
  971. });
  972. assert(FLI != BB->end());
  973. return cast<LoadInst>(FLI);
  974. }
  975. bool InterleavedLoadCombineImpl::combine(std::list<VectorInfo> &InterleavedLoad,
  976. OptimizationRemarkEmitter &ORE) {
  977. LLVM_DEBUG(dbgs() << "Checking interleaved load\n");
  978. // The insertion point is the LoadInst which loads the first values. The
  979. // following tests are used to proof that the combined load can be inserted
  980. // just before InsertionPoint.
  981. LoadInst *InsertionPoint = InterleavedLoad.front().EI[0].LI;
  982. // Test if the offset is computed
  983. if (!InsertionPoint)
  984. return false;
  985. std::set<LoadInst *> LIs;
  986. std::set<Instruction *> Is;
  987. std::set<Instruction *> SVIs;
  988. unsigned InterleavedCost;
  989. unsigned InstructionCost = 0;
  990. // Get the interleave factor
  991. unsigned Factor = InterleavedLoad.size();
  992. // Merge all input sets used in analysis
  993. for (auto &VI : InterleavedLoad) {
  994. // Generate a set of all load instructions to be combined
  995. LIs.insert(VI.LIs.begin(), VI.LIs.end());
  996. // Generate a set of all instructions taking part in load
  997. // interleaved. This list excludes the instructions necessary for the
  998. // polynomial construction.
  999. Is.insert(VI.Is.begin(), VI.Is.end());
  1000. // Generate the set of the final ShuffleVectorInst.
  1001. SVIs.insert(VI.SVI);
  1002. }
  1003. // There is nothing to combine.
  1004. if (LIs.size() < 2)
  1005. return false;
  1006. // Test if all participating instruction will be dead after the
  1007. // transformation. If intermediate results are used, no performance gain can
  1008. // be expected. Also sum the cost of the Instructions beeing left dead.
  1009. for (auto &I : Is) {
  1010. // Compute the old cost
  1011. InstructionCost +=
  1012. TTI.getInstructionCost(I, TargetTransformInfo::TCK_Latency);
  1013. // The final SVIs are allowed not to be dead, all uses will be replaced
  1014. if (SVIs.find(I) != SVIs.end())
  1015. continue;
  1016. // If there are users outside the set to be eliminated, we abort the
  1017. // transformation. No gain can be expected.
  1018. for (const auto &U : I->users()) {
  1019. if (Is.find(dyn_cast<Instruction>(U)) == Is.end())
  1020. return false;
  1021. }
  1022. }
  1023. // We know that all LoadInst are within the same BB. This guarantees that
  1024. // either everything or nothing is loaded.
  1025. LoadInst *First = findFirstLoad(LIs);
  1026. // To be safe that the loads can be combined, iterate over all loads and test
  1027. // that the corresponding defining access dominates first LI. This guarantees
  1028. // that there are no aliasing stores in between the loads.
  1029. auto FMA = MSSA.getMemoryAccess(First);
  1030. for (auto LI : LIs) {
  1031. auto MADef = MSSA.getMemoryAccess(LI)->getDefiningAccess();
  1032. if (!MSSA.dominates(MADef, FMA))
  1033. return false;
  1034. }
  1035. assert(!LIs.empty() && "There are no LoadInst to combine");
  1036. // It is necessary that insertion point dominates all final ShuffleVectorInst.
  1037. for (auto &VI : InterleavedLoad) {
  1038. if (!DT.dominates(InsertionPoint, VI.SVI))
  1039. return false;
  1040. }
  1041. // All checks are done. Add instructions detectable by InterleavedAccessPass
  1042. // The old instruction will are left dead.
  1043. IRBuilder<> Builder(InsertionPoint);
  1044. Type *ETy = InterleavedLoad.front().SVI->getType()->getElementType();
  1045. unsigned ElementsPerSVI =
  1046. InterleavedLoad.front().SVI->getType()->getNumElements();
  1047. VectorType *ILTy = VectorType::get(ETy, Factor * ElementsPerSVI);
  1048. SmallVector<unsigned, 4> Indices;
  1049. for (unsigned i = 0; i < Factor; i++)
  1050. Indices.push_back(i);
  1051. InterleavedCost = TTI.getInterleavedMemoryOpCost(
  1052. Instruction::Load, ILTy, Factor, Indices, InsertionPoint->getAlignment(),
  1053. InsertionPoint->getPointerAddressSpace());
  1054. if (InterleavedCost >= InstructionCost) {
  1055. return false;
  1056. }
  1057. // Create a pointer cast for the wide load.
  1058. auto CI = Builder.CreatePointerCast(InsertionPoint->getOperand(0),
  1059. ILTy->getPointerTo(),
  1060. "interleaved.wide.ptrcast");
  1061. // Create the wide load and update the MemorySSA.
  1062. auto LI = Builder.CreateAlignedLoad(ILTy, CI, InsertionPoint->getAlignment(),
  1063. "interleaved.wide.load");
  1064. auto MSSAU = MemorySSAUpdater(&MSSA);
  1065. MemoryUse *MSSALoad = cast<MemoryUse>(MSSAU.createMemoryAccessBefore(
  1066. LI, nullptr, MSSA.getMemoryAccess(InsertionPoint)));
  1067. MSSAU.insertUse(MSSALoad);
  1068. // Create the final SVIs and replace all uses.
  1069. int i = 0;
  1070. for (auto &VI : InterleavedLoad) {
  1071. SmallVector<uint32_t, 4> Mask;
  1072. for (unsigned j = 0; j < ElementsPerSVI; j++)
  1073. Mask.push_back(i + j * Factor);
  1074. Builder.SetInsertPoint(VI.SVI);
  1075. auto SVI = Builder.CreateShuffleVector(LI, UndefValue::get(LI->getType()),
  1076. Mask, "interleaved.shuffle");
  1077. VI.SVI->replaceAllUsesWith(SVI);
  1078. i++;
  1079. }
  1080. NumInterleavedLoadCombine++;
  1081. ORE.emit([&]() {
  1082. return OptimizationRemark(DEBUG_TYPE, "Combined Interleaved Load", LI)
  1083. << "Load interleaved combined with factor "
  1084. << ore::NV("Factor", Factor);
  1085. });
  1086. return true;
  1087. }
  1088. bool InterleavedLoadCombineImpl::run() {
  1089. OptimizationRemarkEmitter ORE(&F);
  1090. bool changed = false;
  1091. unsigned MaxFactor = TLI.getMaxSupportedInterleaveFactor();
  1092. auto &DL = F.getParent()->getDataLayout();
  1093. // Start with the highest factor to avoid combining and recombining.
  1094. for (unsigned Factor = MaxFactor; Factor >= 2; Factor--) {
  1095. std::list<VectorInfo> Candidates;
  1096. for (BasicBlock &BB : F) {
  1097. for (Instruction &I : BB) {
  1098. if (auto SVI = dyn_cast<ShuffleVectorInst>(&I)) {
  1099. Candidates.emplace_back(SVI->getType());
  1100. if (!VectorInfo::computeFromSVI(SVI, Candidates.back(), DL)) {
  1101. Candidates.pop_back();
  1102. continue;
  1103. }
  1104. if (!Candidates.back().isInterleaved(Factor, DL)) {
  1105. Candidates.pop_back();
  1106. }
  1107. }
  1108. }
  1109. }
  1110. std::list<VectorInfo> InterleavedLoad;
  1111. while (findPattern(Candidates, InterleavedLoad, Factor, DL)) {
  1112. if (combine(InterleavedLoad, ORE)) {
  1113. changed = true;
  1114. } else {
  1115. // Remove the first element of the Interleaved Load but put the others
  1116. // back on the list and continue searching
  1117. Candidates.splice(Candidates.begin(), InterleavedLoad,
  1118. std::next(InterleavedLoad.begin()),
  1119. InterleavedLoad.end());
  1120. }
  1121. InterleavedLoad.clear();
  1122. }
  1123. }
  1124. return changed;
  1125. }
  1126. namespace {
  1127. /// This pass combines interleaved loads into a pattern detectable by
  1128. /// InterleavedAccessPass.
  1129. struct InterleavedLoadCombine : public FunctionPass {
  1130. static char ID;
  1131. InterleavedLoadCombine() : FunctionPass(ID) {
  1132. initializeInterleavedLoadCombinePass(*PassRegistry::getPassRegistry());
  1133. }
  1134. StringRef getPassName() const override {
  1135. return "Interleaved Load Combine Pass";
  1136. }
  1137. bool runOnFunction(Function &F) override {
  1138. if (DisableInterleavedLoadCombine)
  1139. return false;
  1140. auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
  1141. if (!TPC)
  1142. return false;
  1143. LLVM_DEBUG(dbgs() << "*** " << getPassName() << ": " << F.getName()
  1144. << "\n");
  1145. return InterleavedLoadCombineImpl(
  1146. F, getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
  1147. getAnalysis<MemorySSAWrapperPass>().getMSSA(),
  1148. TPC->getTM<TargetMachine>())
  1149. .run();
  1150. }
  1151. void getAnalysisUsage(AnalysisUsage &AU) const override {
  1152. AU.addRequired<MemorySSAWrapperPass>();
  1153. AU.addRequired<DominatorTreeWrapperPass>();
  1154. FunctionPass::getAnalysisUsage(AU);
  1155. }
  1156. private:
  1157. };
  1158. } // anonymous namespace
  1159. char InterleavedLoadCombine::ID = 0;
  1160. INITIALIZE_PASS_BEGIN(
  1161. InterleavedLoadCombine, DEBUG_TYPE,
  1162. "Combine interleaved loads into wide loads and shufflevector instructions",
  1163. false, false)
  1164. INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
  1165. INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
  1166. INITIALIZE_PASS_END(
  1167. InterleavedLoadCombine, DEBUG_TYPE,
  1168. "Combine interleaved loads into wide loads and shufflevector instructions",
  1169. false, false)
  1170. FunctionPass *
  1171. llvm::createInterleavedLoadCombinePass() {
  1172. auto P = new InterleavedLoadCombine();
  1173. return P;
  1174. }