InterleavedLoadCombinePass.cpp 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363
  1. //===- InterleavedLoadCombine.cpp - Combine Interleaved Loads ---*- C++ -*-===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // \file
  11. //
  12. // This file defines the interleaved-load-combine pass. The pass searches for
  13. // ShuffleVectorInstruction that execute interleaving loads. If a matching
  14. // pattern is found, it adds a combined load and further instructions in a
  15. // pattern that is detectable by InterleavedAccesPass. The old instructions are
  16. // left dead to be removed later. The pass is specifically designed to be
  17. // executed just before InterleavedAccesPass to find any left-over instances
  18. // that are not detected within former passes.
  19. //
  20. //===----------------------------------------------------------------------===//
  21. #include "llvm/ADT/Statistic.h"
  22. #include "llvm/Analysis/MemoryLocation.h"
  23. #include "llvm/Analysis/MemorySSA.h"
  24. #include "llvm/Analysis/MemorySSAUpdater.h"
  25. #include "llvm/Analysis/OptimizationRemarkEmitter.h"
  26. #include "llvm/Analysis/TargetTransformInfo.h"
  27. #include "llvm/CodeGen/Passes.h"
  28. #include "llvm/CodeGen/TargetLowering.h"
  29. #include "llvm/CodeGen/TargetPassConfig.h"
  30. #include "llvm/CodeGen/TargetSubtargetInfo.h"
  31. #include "llvm/IR/DataLayout.h"
  32. #include "llvm/IR/Dominators.h"
  33. #include "llvm/IR/Function.h"
  34. #include "llvm/IR/Instructions.h"
  35. #include "llvm/IR/LegacyPassManager.h"
  36. #include "llvm/IR/Module.h"
  37. #include "llvm/Pass.h"
  38. #include "llvm/Support/Debug.h"
  39. #include "llvm/Support/ErrorHandling.h"
  40. #include "llvm/Support/raw_ostream.h"
  41. #include "llvm/Target/TargetMachine.h"
  42. #include <algorithm>
  43. #include <cassert>
  44. #include <list>
  45. using namespace llvm;
  46. #define DEBUG_TYPE "interleaved-load-combine"
  47. namespace {
  48. /// Statistic counter
  49. STATISTIC(NumInterleavedLoadCombine, "Number of combined loads");
  50. /// Option to disable the pass
  51. static cl::opt<bool> DisableInterleavedLoadCombine(
  52. "disable-" DEBUG_TYPE, cl::init(false), cl::Hidden,
  53. cl::desc("Disable combining of interleaved loads"));
  54. struct VectorInfo;
  55. struct InterleavedLoadCombineImpl {
  56. public:
  57. InterleavedLoadCombineImpl(Function &F, DominatorTree &DT, MemorySSA &MSSA,
  58. TargetMachine &TM)
  59. : F(F), DT(DT), MSSA(MSSA),
  60. TLI(*TM.getSubtargetImpl(F)->getTargetLowering()),
  61. TTI(TM.getTargetTransformInfo(F)) {}
  62. /// Scan the function for interleaved load candidates and execute the
  63. /// replacement if applicable.
  64. bool run();
  65. private:
  66. /// Function this pass is working on
  67. Function &F;
  68. /// Dominator Tree Analysis
  69. DominatorTree &DT;
  70. /// Memory Alias Analyses
  71. MemorySSA &MSSA;
  72. /// Target Lowering Information
  73. const TargetLowering &TLI;
  74. /// Target Transform Information
  75. const TargetTransformInfo TTI;
  76. /// Find the instruction in sets LIs that dominates all others, return nullptr
  77. /// if there is none.
  78. LoadInst *findFirstLoad(const std::set<LoadInst *> &LIs);
  79. /// Replace interleaved load candidates. It does additional
  80. /// analyses if this makes sense. Returns true on success and false
  81. /// of nothing has been changed.
  82. bool combine(std::list<VectorInfo> &InterleavedLoad,
  83. OptimizationRemarkEmitter &ORE);
  84. /// Given a set of VectorInfo containing candidates for a given interleave
  85. /// factor, find a set that represents a 'factor' interleaved load.
  86. bool findPattern(std::list<VectorInfo> &Candidates,
  87. std::list<VectorInfo> &InterleavedLoad, unsigned Factor,
  88. const DataLayout &DL);
  89. }; // InterleavedLoadCombine
  90. /// First Order Polynomial on an n-Bit Integer Value
  91. ///
  92. /// Polynomial(Value) = Value * B + A + E*2^(n-e)
  93. ///
  94. /// A and B are the coefficients. E*2^(n-e) is an error within 'e' most
  95. /// significant bits. It is introduced if an exact computation cannot be proven
  96. /// (e.q. division by 2).
  97. ///
  98. /// As part of this optimization multiple loads will be combined. It necessary
  99. /// to prove that loads are within some relative offset to each other. This
  100. /// class is used to prove relative offsets of values loaded from memory.
  101. ///
  102. /// Representing an integer in this form is sound since addition in two's
  103. /// complement is associative (trivial) and multiplication distributes over the
  104. /// addition (see Proof(1) in Polynomial::mul). Further, both operations
  105. /// commute.
  106. //
  107. // Example:
  108. // declare @fn(i64 %IDX, <4 x float>* %PTR) {
  109. // %Pa1 = add i64 %IDX, 2
  110. // %Pa2 = lshr i64 %Pa1, 1
  111. // %Pa3 = getelementptr inbounds <4 x float>, <4 x float>* %PTR, i64 %Pa2
  112. // %Va = load <4 x float>, <4 x float>* %Pa3
  113. //
  114. // %Pb1 = add i64 %IDX, 4
  115. // %Pb2 = lshr i64 %Pb1, 1
  116. // %Pb3 = getelementptr inbounds <4 x float>, <4 x float>* %PTR, i64 %Pb2
  117. // %Vb = load <4 x float>, <4 x float>* %Pb3
  118. // ... }
  119. //
  120. // The goal is to prove that two loads load consecutive addresses.
  121. //
  122. // In this case the polynomials are constructed by the following
  123. // steps.
  124. //
  125. // The number tag #e specifies the error bits.
  126. //
  127. // Pa_0 = %IDX #0
  128. // Pa_1 = %IDX + 2 #0 | add 2
  129. // Pa_2 = %IDX/2 + 1 #1 | lshr 1
  130. // Pa_3 = %IDX/2 + 1 #1 | GEP, step signext to i64
  131. // Pa_4 = (%IDX/2)*16 + 16 #0 | GEP, multiply index by sizeof(4) for floats
  132. // Pa_5 = (%IDX/2)*16 + 16 #0 | GEP, add offset of leading components
  133. //
  134. // Pb_0 = %IDX #0
  135. // Pb_1 = %IDX + 4 #0 | add 2
  136. // Pb_2 = %IDX/2 + 2 #1 | lshr 1
  137. // Pb_3 = %IDX/2 + 2 #1 | GEP, step signext to i64
  138. // Pb_4 = (%IDX/2)*16 + 32 #0 | GEP, multiply index by sizeof(4) for floats
  139. // Pb_5 = (%IDX/2)*16 + 16 #0 | GEP, add offset of leading components
  140. //
  141. // Pb_5 - Pa_5 = 16 #0 | subtract to get the offset
  142. //
  143. // Remark: %PTR is not maintained within this class. So in this instance the
  144. // offset of 16 can only be assumed if the pointers are equal.
  145. //
  146. class Polynomial {
  147. /// Operations on B
  148. enum BOps {
  149. LShr,
  150. Mul,
  151. SExt,
  152. Trunc,
  153. };
  154. /// Number of Error Bits e
  155. unsigned ErrorMSBs;
  156. /// Value
  157. Value *V;
  158. /// Coefficient B
  159. SmallVector<std::pair<BOps, APInt>, 4> B;
  160. /// Coefficient A
  161. APInt A;
  162. public:
  163. Polynomial(Value *V) : ErrorMSBs((unsigned)-1), V(V), B(), A() {
  164. IntegerType *Ty = dyn_cast<IntegerType>(V->getType());
  165. if (Ty) {
  166. ErrorMSBs = 0;
  167. this->V = V;
  168. A = APInt(Ty->getBitWidth(), 0);
  169. }
  170. }
  171. Polynomial(const APInt &A, unsigned ErrorMSBs = 0)
  172. : ErrorMSBs(ErrorMSBs), V(NULL), B(), A(A) {}
  173. Polynomial(unsigned BitWidth, uint64_t A, unsigned ErrorMSBs = 0)
  174. : ErrorMSBs(ErrorMSBs), V(NULL), B(), A(BitWidth, A) {}
  175. Polynomial() : ErrorMSBs((unsigned)-1), V(NULL), B(), A() {}
  176. /// Increment and clamp the number of undefined bits.
  177. void incErrorMSBs(unsigned amt) {
  178. if (ErrorMSBs == (unsigned)-1)
  179. return;
  180. ErrorMSBs += amt;
  181. if (ErrorMSBs > A.getBitWidth())
  182. ErrorMSBs = A.getBitWidth();
  183. }
  184. /// Decrement and clamp the number of undefined bits.
  185. void decErrorMSBs(unsigned amt) {
  186. if (ErrorMSBs == (unsigned)-1)
  187. return;
  188. if (ErrorMSBs > amt)
  189. ErrorMSBs -= amt;
  190. else
  191. ErrorMSBs = 0;
  192. }
  193. /// Apply an add on the polynomial
  194. Polynomial &add(const APInt &C) {
  195. // Note: Addition is associative in two's complement even when in case of
  196. // signed overflow.
  197. //
  198. // Error bits can only propagate into higher significant bits. As these are
  199. // already regarded as undefined, there is no change.
  200. //
  201. // Theorem: Adding a constant to a polynomial does not change the error
  202. // term.
  203. //
  204. // Proof:
  205. //
  206. // Since the addition is associative and commutes:
  207. //
  208. // (B + A + E*2^(n-e)) + C = B + (A + C) + E*2^(n-e)
  209. // [qed]
  210. if (C.getBitWidth() != A.getBitWidth()) {
  211. ErrorMSBs = (unsigned)-1;
  212. return *this;
  213. }
  214. A += C;
  215. return *this;
  216. }
  217. /// Apply a multiplication onto the polynomial.
  218. Polynomial &mul(const APInt &C) {
  219. // Note: Multiplication distributes over the addition
  220. //
  221. // Theorem: Multiplication distributes over the addition
  222. //
  223. // Proof(1):
  224. //
  225. // (B+A)*C =-
  226. // = (B + A) + (B + A) + .. {C Times}
  227. // addition is associative and commutes, hence
  228. // = B + B + .. {C Times} .. + A + A + .. {C times}
  229. // = B*C + A*C
  230. // (see (function add) for signed values and overflows)
  231. // [qed]
  232. //
  233. // Theorem: If C has c trailing zeros, errors bits in A or B are shifted out
  234. // to the left.
  235. //
  236. // Proof(2):
  237. //
  238. // Let B' and A' be the n-Bit inputs with some unknown errors EA,
  239. // EB at e leading bits. B' and A' can be written down as:
  240. //
  241. // B' = B + 2^(n-e)*EB
  242. // A' = A + 2^(n-e)*EA
  243. //
  244. // Let C' be an input with c trailing zero bits. C' can be written as
  245. //
  246. // C' = C*2^c
  247. //
  248. // Therefore we can compute the result by using distributivity and
  249. // commutativity.
  250. //
  251. // (B'*C' + A'*C') = [B + 2^(n-e)*EB] * C' + [A + 2^(n-e)*EA] * C' =
  252. // = [B + 2^(n-e)*EB + A + 2^(n-e)*EA] * C' =
  253. // = (B'+A') * C' =
  254. // = [B + 2^(n-e)*EB + A + 2^(n-e)*EA] * C' =
  255. // = [B + A + 2^(n-e)*EB + 2^(n-e)*EA] * C' =
  256. // = (B + A) * C' + [2^(n-e)*EB + 2^(n-e)*EA)] * C' =
  257. // = (B + A) * C' + [2^(n-e)*EB + 2^(n-e)*EA)] * C*2^c =
  258. // = (B + A) * C' + C*(EB + EA)*2^(n-e)*2^c =
  259. //
  260. // Let EC be the final error with EC = C*(EB + EA)
  261. //
  262. // = (B + A)*C' + EC*2^(n-e)*2^c =
  263. // = (B + A)*C' + EC*2^(n-(e-c))
  264. //
  265. // Since EC is multiplied by 2^(n-(e-c)) the resulting error contains c
  266. // less error bits than the input. c bits are shifted out to the left.
  267. // [qed]
  268. if (C.getBitWidth() != A.getBitWidth()) {
  269. ErrorMSBs = (unsigned)-1;
  270. return *this;
  271. }
  272. // Multiplying by one is a no-op.
  273. if (C.isOneValue()) {
  274. return *this;
  275. }
  276. // Multiplying by zero removes the coefficient B and defines all bits.
  277. if (C.isNullValue()) {
  278. ErrorMSBs = 0;
  279. deleteB();
  280. }
  281. // See Proof(2): Trailing zero bits indicate a left shift. This removes
  282. // leading bits from the result even if they are undefined.
  283. decErrorMSBs(C.countTrailingZeros());
  284. A *= C;
  285. pushBOperation(Mul, C);
  286. return *this;
  287. }
  288. /// Apply a logical shift right on the polynomial
  289. Polynomial &lshr(const APInt &C) {
  290. // Theorem(1): (B + A + E*2^(n-e)) >> 1 => (B >> 1) + (A >> 1) + E'*2^(n-e')
  291. // where
  292. // e' = e + 1,
  293. // E is a e-bit number,
  294. // E' is a e'-bit number,
  295. // holds under the following precondition:
  296. // pre(1): A % 2 = 0
  297. // pre(2): e < n, (see Theorem(2) for the trivial case with e=n)
  298. // where >> expresses a logical shift to the right, with adding zeros.
  299. //
  300. // We need to show that for every, E there is a E'
  301. //
  302. // B = b_h * 2^(n-1) + b_m * 2 + b_l
  303. // A = a_h * 2^(n-1) + a_m * 2 (pre(1))
  304. //
  305. // where a_h, b_h, b_l are single bits, and a_m, b_m are (n-2) bit numbers
  306. //
  307. // Let X = (B + A + E*2^(n-e)) >> 1
  308. // Let Y = (B >> 1) + (A >> 1) + E*2^(n-e) >> 1
  309. //
  310. // X = [B + A + E*2^(n-e)] >> 1 =
  311. // = [ b_h * 2^(n-1) + b_m * 2 + b_l +
  312. // + a_h * 2^(n-1) + a_m * 2 +
  313. // + E * 2^(n-e) ] >> 1 =
  314. //
  315. // The sum is built by putting the overflow of [a_m + b+n] into the term
  316. // 2^(n-1). As there are no more bits beyond 2^(n-1) the overflow within
  317. // this bit is discarded. This is expressed by % 2.
  318. //
  319. // The bit in position 0 cannot overflow into the term (b_m + a_m).
  320. //
  321. // = [ ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-1) +
  322. // + ((b_m + a_m) % 2^(n-2)) * 2 +
  323. // + b_l + E * 2^(n-e) ] >> 1 =
  324. //
  325. // The shift is computed by dividing the terms by 2 and by cutting off
  326. // b_l.
  327. //
  328. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  329. // + ((b_m + a_m) % 2^(n-2)) +
  330. // + E * 2^(n-(e+1)) =
  331. //
  332. // by the definition in the Theorem e+1 = e'
  333. //
  334. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  335. // + ((b_m + a_m) % 2^(n-2)) +
  336. // + E * 2^(n-e') =
  337. //
  338. // Compute Y by applying distributivity first
  339. //
  340. // Y = (B >> 1) + (A >> 1) + E*2^(n-e') =
  341. // = (b_h * 2^(n-1) + b_m * 2 + b_l) >> 1 +
  342. // + (a_h * 2^(n-1) + a_m * 2) >> 1 +
  343. // + E * 2^(n-e) >> 1 =
  344. //
  345. // Again, the shift is computed by dividing the terms by 2 and by cutting
  346. // off b_l.
  347. //
  348. // = b_h * 2^(n-2) + b_m +
  349. // + a_h * 2^(n-2) + a_m +
  350. // + E * 2^(n-(e+1)) =
  351. //
  352. // Again, the sum is built by putting the overflow of [a_m + b+n] into
  353. // the term 2^(n-1). But this time there is room for a second bit in the
  354. // term 2^(n-2) we add this bit to a new term and denote it o_h in a
  355. // second step.
  356. //
  357. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] >> 1) * 2^(n-1) +
  358. // + ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  359. // + ((b_m + a_m) % 2^(n-2)) +
  360. // + E * 2^(n-(e+1)) =
  361. //
  362. // Let o_h = [b_h + a_h + (b_m + a_m) >> (n-2)] >> 1
  363. // Further replace e+1 by e'.
  364. //
  365. // = o_h * 2^(n-1) +
  366. // + ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  367. // + ((b_m + a_m) % 2^(n-2)) +
  368. // + E * 2^(n-e') =
  369. //
  370. // Move o_h into the error term and construct E'. To ensure that there is
  371. // no 2^x with negative x, this step requires pre(2) (e < n).
  372. //
  373. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  374. // + ((b_m + a_m) % 2^(n-2)) +
  375. // + o_h * 2^(e'-1) * 2^(n-e') + | pre(2), move 2^(e'-1)
  376. // | out of the old exponent
  377. // + E * 2^(n-e') =
  378. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  379. // + ((b_m + a_m) % 2^(n-2)) +
  380. // + [o_h * 2^(e'-1) + E] * 2^(n-e') + | move 2^(e'-1) out of
  381. // | the old exponent
  382. //
  383. // Let E' = o_h * 2^(e'-1) + E
  384. //
  385. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  386. // + ((b_m + a_m) % 2^(n-2)) +
  387. // + E' * 2^(n-e')
  388. //
  389. // Because X and Y are distinct only in there error terms and E' can be
  390. // constructed as shown the theorem holds.
  391. // [qed]
  392. //
  393. // For completeness in case of the case e=n it is also required to show that
  394. // distributivity can be applied.
  395. //
  396. // In this case Theorem(1) transforms to (the pre-condition on A can also be
  397. // dropped)
  398. //
  399. // Theorem(2): (B + A + E) >> 1 => (B >> 1) + (A >> 1) + E'
  400. // where
  401. // A, B, E, E' are two's complement numbers with the same bit
  402. // width
  403. //
  404. // Let A + B + E = X
  405. // Let (B >> 1) + (A >> 1) = Y
  406. //
  407. // Therefore we need to show that for every X and Y there is an E' which
  408. // makes the equation
  409. //
  410. // X = Y + E'
  411. //
  412. // hold. This is trivially the case for E' = X - Y.
  413. //
  414. // [qed]
  415. //
  416. // Remark: Distributing lshr with and arbitrary number n can be expressed as
  417. // ((((B + A) lshr 1) lshr 1) ... ) {n times}.
  418. // This construction induces n additional error bits at the left.
  419. if (C.getBitWidth() != A.getBitWidth()) {
  420. ErrorMSBs = (unsigned)-1;
  421. return *this;
  422. }
  423. if (C.isNullValue())
  424. return *this;
  425. // Test if the result will be zero
  426. unsigned shiftAmt = C.getZExtValue();
  427. if (shiftAmt >= C.getBitWidth())
  428. return mul(APInt(C.getBitWidth(), 0));
  429. // The proof that shiftAmt LSBs are zero for at least one summand is only
  430. // possible for the constant number.
  431. //
  432. // If this can be proven add shiftAmt to the error counter
  433. // `ErrorMSBs`. Otherwise set all bits as undefined.
  434. if (A.countTrailingZeros() < shiftAmt)
  435. ErrorMSBs = A.getBitWidth();
  436. else
  437. incErrorMSBs(shiftAmt);
  438. // Apply the operation.
  439. pushBOperation(LShr, C);
  440. A = A.lshr(shiftAmt);
  441. return *this;
  442. }
  443. /// Apply a sign-extend or truncate operation on the polynomial.
  444. Polynomial &sextOrTrunc(unsigned n) {
  445. if (n < A.getBitWidth()) {
  446. // Truncate: Clearly undefined Bits on the MSB side are removed
  447. // if there are any.
  448. decErrorMSBs(A.getBitWidth() - n);
  449. A = A.trunc(n);
  450. pushBOperation(Trunc, APInt(sizeof(n) * 8, n));
  451. }
  452. if (n > A.getBitWidth()) {
  453. // Extend: Clearly extending first and adding later is different
  454. // to adding first and extending later in all extended bits.
  455. incErrorMSBs(n - A.getBitWidth());
  456. A = A.sext(n);
  457. pushBOperation(SExt, APInt(sizeof(n) * 8, n));
  458. }
  459. return *this;
  460. }
  461. /// Test if there is a coefficient B.
  462. bool isFirstOrder() const { return V != nullptr; }
  463. /// Test coefficient B of two Polynomials are equal.
  464. bool isCompatibleTo(const Polynomial &o) const {
  465. // The polynomial use different bit width.
  466. if (A.getBitWidth() != o.A.getBitWidth())
  467. return false;
  468. // If neither Polynomial has the Coefficient B.
  469. if (!isFirstOrder() && !o.isFirstOrder())
  470. return true;
  471. // The index variable is different.
  472. if (V != o.V)
  473. return false;
  474. // Check the operations.
  475. if (B.size() != o.B.size())
  476. return false;
  477. auto ob = o.B.begin();
  478. for (auto &b : B) {
  479. if (b != *ob)
  480. return false;
  481. ob++;
  482. }
  483. return true;
  484. }
  485. /// Subtract two polynomials, return an undefined polynomial if
  486. /// subtraction is not possible.
  487. Polynomial operator-(const Polynomial &o) const {
  488. // Return an undefined polynomial if incompatible.
  489. if (!isCompatibleTo(o))
  490. return Polynomial();
  491. // If the polynomials are compatible (meaning they have the same
  492. // coefficient on B), B is eliminated. Thus a polynomial solely
  493. // containing A is returned
  494. return Polynomial(A - o.A, std::max(ErrorMSBs, o.ErrorMSBs));
  495. }
  496. /// Subtract a constant from a polynomial,
  497. Polynomial operator-(uint64_t C) const {
  498. Polynomial Result(*this);
  499. Result.A -= C;
  500. return Result;
  501. }
  502. /// Add a constant to a polynomial,
  503. Polynomial operator+(uint64_t C) const {
  504. Polynomial Result(*this);
  505. Result.A += C;
  506. return Result;
  507. }
  508. /// Returns true if it can be proven that two Polynomials are equal.
  509. bool isProvenEqualTo(const Polynomial &o) {
  510. // Subtract both polynomials and test if it is fully defined and zero.
  511. Polynomial r = *this - o;
  512. return (r.ErrorMSBs == 0) && (!r.isFirstOrder()) && (r.A.isNullValue());
  513. }
  514. /// Print the polynomial into a stream.
  515. void print(raw_ostream &OS) const {
  516. OS << "[{#ErrBits:" << ErrorMSBs << "} ";
  517. if (V) {
  518. for (auto b : B)
  519. OS << "(";
  520. OS << "(" << *V << ") ";
  521. for (auto b : B) {
  522. switch (b.first) {
  523. case LShr:
  524. OS << "LShr ";
  525. break;
  526. case Mul:
  527. OS << "Mul ";
  528. break;
  529. case SExt:
  530. OS << "SExt ";
  531. break;
  532. case Trunc:
  533. OS << "Trunc ";
  534. break;
  535. }
  536. OS << b.second << ") ";
  537. }
  538. }
  539. OS << "+ " << A << "]";
  540. }
  541. private:
  542. void deleteB() {
  543. V = nullptr;
  544. B.clear();
  545. }
  546. void pushBOperation(const BOps Op, const APInt &C) {
  547. if (isFirstOrder()) {
  548. B.push_back(std::make_pair(Op, C));
  549. return;
  550. }
  551. }
  552. };
  553. static raw_ostream &operator<<(raw_ostream &OS, const Polynomial &P) {
  554. P.print(OS);
  555. return OS;
  556. }
  557. /// VectorInfo stores abstract the following information for each vector
  558. /// element:
  559. ///
  560. /// 1) The the memory address loaded into the element as Polynomial
  561. /// 2) a set of load instruction necessary to construct the vector,
  562. /// 3) a set of all other instructions that are necessary to create the vector and
  563. /// 4) a pointer value that can be used as relative base for all elements.
  564. struct VectorInfo {
  565. private:
  566. VectorInfo(const VectorInfo &c) : VTy(c.VTy) {
  567. llvm_unreachable(
  568. "Copying VectorInfo is neither implemented nor necessary,");
  569. }
  570. public:
  571. /// Information of a Vector Element
  572. struct ElementInfo {
  573. /// Offset Polynomial.
  574. Polynomial Ofs;
  575. /// The Load Instruction used to Load the entry. LI is null if the pointer
  576. /// of the load instruction does not point on to the entry
  577. LoadInst *LI;
  578. ElementInfo(Polynomial Offset = Polynomial(), LoadInst *LI = nullptr)
  579. : Ofs(Offset), LI(LI) {}
  580. };
  581. /// Basic-block the load instructions are within
  582. BasicBlock *BB;
  583. /// Pointer value of all participation load instructions
  584. Value *PV;
  585. /// Participating load instructions
  586. std::set<LoadInst *> LIs;
  587. /// Participating instructions
  588. std::set<Instruction *> Is;
  589. /// Final shuffle-vector instruction
  590. ShuffleVectorInst *SVI;
  591. /// Information of the offset for each vector element
  592. ElementInfo *EI;
  593. /// Vector Type
  594. VectorType *const VTy;
  595. VectorInfo(VectorType *VTy)
  596. : BB(nullptr), PV(nullptr), LIs(), Is(), SVI(nullptr), VTy(VTy) {
  597. EI = new ElementInfo[VTy->getNumElements()];
  598. }
  599. virtual ~VectorInfo() { delete[] EI; }
  600. unsigned getDimension() const { return VTy->getNumElements(); }
  601. /// Test if the VectorInfo can be part of an interleaved load with the
  602. /// specified factor.
  603. ///
  604. /// \param Factor of the interleave
  605. /// \param DL Targets Datalayout
  606. ///
  607. /// \returns true if this is possible and false if not
  608. bool isInterleaved(unsigned Factor, const DataLayout &DL) const {
  609. unsigned Size = DL.getTypeAllocSize(VTy->getElementType());
  610. for (unsigned i = 1; i < getDimension(); i++) {
  611. if (!EI[i].Ofs.isProvenEqualTo(EI[0].Ofs + i * Factor * Size)) {
  612. return false;
  613. }
  614. }
  615. return true;
  616. }
  617. /// Recursively computes the vector information stored in V.
  618. ///
  619. /// This function delegates the work to specialized implementations
  620. ///
  621. /// \param V Value to operate on
  622. /// \param Result Result of the computation
  623. ///
  624. /// \returns false if no sensible information can be gathered.
  625. static bool compute(Value *V, VectorInfo &Result, const DataLayout &DL) {
  626. ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V);
  627. if (SVI)
  628. return computeFromSVI(SVI, Result, DL);
  629. LoadInst *LI = dyn_cast<LoadInst>(V);
  630. if (LI)
  631. return computeFromLI(LI, Result, DL);
  632. BitCastInst *BCI = dyn_cast<BitCastInst>(V);
  633. if (BCI)
  634. return computeFromBCI(BCI, Result, DL);
  635. return false;
  636. }
  637. /// BitCastInst specialization to compute the vector information.
  638. ///
  639. /// \param BCI BitCastInst to operate on
  640. /// \param Result Result of the computation
  641. ///
  642. /// \returns false if no sensible information can be gathered.
  643. static bool computeFromBCI(BitCastInst *BCI, VectorInfo &Result,
  644. const DataLayout &DL) {
  645. Instruction *Op = dyn_cast<Instruction>(BCI->getOperand(0));
  646. if (!Op)
  647. return false;
  648. VectorType *VTy = dyn_cast<VectorType>(Op->getType());
  649. if (!VTy)
  650. return false;
  651. // We can only cast from large to smaller vectors
  652. if (Result.VTy->getNumElements() % VTy->getNumElements())
  653. return false;
  654. unsigned Factor = Result.VTy->getNumElements() / VTy->getNumElements();
  655. unsigned NewSize = DL.getTypeAllocSize(Result.VTy->getElementType());
  656. unsigned OldSize = DL.getTypeAllocSize(VTy->getElementType());
  657. if (NewSize * Factor != OldSize)
  658. return false;
  659. VectorInfo Old(VTy);
  660. if (!compute(Op, Old, DL))
  661. return false;
  662. for (unsigned i = 0; i < Result.VTy->getNumElements(); i += Factor) {
  663. for (unsigned j = 0; j < Factor; j++) {
  664. Result.EI[i + j] =
  665. ElementInfo(Old.EI[i / Factor].Ofs + j * NewSize,
  666. j == 0 ? Old.EI[i / Factor].LI : nullptr);
  667. }
  668. }
  669. Result.BB = Old.BB;
  670. Result.PV = Old.PV;
  671. Result.LIs.insert(Old.LIs.begin(), Old.LIs.end());
  672. Result.Is.insert(Old.Is.begin(), Old.Is.end());
  673. Result.Is.insert(BCI);
  674. Result.SVI = nullptr;
  675. return true;
  676. }
  677. /// ShuffleVectorInst specialization to compute vector information.
  678. ///
  679. /// \param SVI ShuffleVectorInst to operate on
  680. /// \param Result Result of the computation
  681. ///
  682. /// Compute the left and the right side vector information and merge them by
  683. /// applying the shuffle operation. This function also ensures that the left
  684. /// and right side have compatible loads. This means that all loads are with
  685. /// in the same basic block and are based on the same pointer.
  686. ///
  687. /// \returns false if no sensible information can be gathered.
  688. static bool computeFromSVI(ShuffleVectorInst *SVI, VectorInfo &Result,
  689. const DataLayout &DL) {
  690. VectorType *ArgTy = dyn_cast<VectorType>(SVI->getOperand(0)->getType());
  691. assert(ArgTy && "ShuffleVector Operand is not a VectorType");
  692. // Compute the left hand vector information.
  693. VectorInfo LHS(ArgTy);
  694. if (!compute(SVI->getOperand(0), LHS, DL))
  695. LHS.BB = nullptr;
  696. // Compute the right hand vector information.
  697. VectorInfo RHS(ArgTy);
  698. if (!compute(SVI->getOperand(1), RHS, DL))
  699. RHS.BB = nullptr;
  700. // Neither operand produced sensible results?
  701. if (!LHS.BB && !RHS.BB)
  702. return false;
  703. // Only RHS produced sensible results?
  704. else if (!LHS.BB) {
  705. Result.BB = RHS.BB;
  706. Result.PV = RHS.PV;
  707. }
  708. // Only LHS produced sensible results?
  709. else if (!RHS.BB) {
  710. Result.BB = LHS.BB;
  711. Result.PV = LHS.PV;
  712. }
  713. // Both operands produced sensible results?
  714. else if ((LHS.BB == RHS.BB) && (LHS.PV == LHS.PV)) {
  715. Result.BB = LHS.BB;
  716. Result.PV = LHS.PV;
  717. }
  718. // Both operands produced sensible results but they are incompatible.
  719. else {
  720. return false;
  721. }
  722. // Merge and apply the operation on the offset information.
  723. if (LHS.BB) {
  724. Result.LIs.insert(LHS.LIs.begin(), LHS.LIs.end());
  725. Result.Is.insert(LHS.Is.begin(), LHS.Is.end());
  726. }
  727. if (RHS.BB) {
  728. Result.LIs.insert(RHS.LIs.begin(), RHS.LIs.end());
  729. Result.Is.insert(RHS.Is.begin(), RHS.Is.end());
  730. }
  731. Result.Is.insert(SVI);
  732. Result.SVI = SVI;
  733. int j = 0;
  734. for (int i : SVI->getShuffleMask()) {
  735. assert((i < 2 * (signed)ArgTy->getNumElements()) &&
  736. "Invalid ShuffleVectorInst (index out of bounds)");
  737. if (i < 0)
  738. Result.EI[j] = ElementInfo();
  739. else if (i < (signed)ArgTy->getNumElements()) {
  740. if (LHS.BB)
  741. Result.EI[j] = LHS.EI[i];
  742. else
  743. Result.EI[j] = ElementInfo();
  744. } else {
  745. if (RHS.BB)
  746. Result.EI[j] = RHS.EI[i - ArgTy->getNumElements()];
  747. else
  748. Result.EI[j] = ElementInfo();
  749. }
  750. j++;
  751. }
  752. return true;
  753. }
  754. /// LoadInst specialization to compute vector information.
  755. ///
  756. /// This function also acts as abort condition to the recursion.
  757. ///
  758. /// \param LI LoadInst to operate on
  759. /// \param Result Result of the computation
  760. ///
  761. /// \returns false if no sensible information can be gathered.
  762. static bool computeFromLI(LoadInst *LI, VectorInfo &Result,
  763. const DataLayout &DL) {
  764. Value *BasePtr;
  765. Polynomial Offset;
  766. if (LI->isVolatile())
  767. return false;
  768. if (LI->isAtomic())
  769. return false;
  770. // Get the base polynomial
  771. computePolynomialFromPointer(*LI->getPointerOperand(), Offset, BasePtr, DL);
  772. Result.BB = LI->getParent();
  773. Result.PV = BasePtr;
  774. Result.LIs.insert(LI);
  775. Result.Is.insert(LI);
  776. for (unsigned i = 0; i < Result.getDimension(); i++) {
  777. Value *Idx[2] = {
  778. ConstantInt::get(Type::getInt32Ty(LI->getContext()), 0),
  779. ConstantInt::get(Type::getInt32Ty(LI->getContext()), i),
  780. };
  781. int64_t Ofs = DL.getIndexedOffsetInType(Result.VTy, makeArrayRef(Idx, 2));
  782. Result.EI[i] = ElementInfo(Offset + Ofs, i == 0 ? LI : nullptr);
  783. }
  784. return true;
  785. }
  786. /// Recursively compute polynomial of a value.
  787. ///
  788. /// \param BO Input binary operation
  789. /// \param Result Result polynomial
  790. static void computePolynomialBinOp(BinaryOperator &BO, Polynomial &Result) {
  791. Value *LHS = BO.getOperand(0);
  792. Value *RHS = BO.getOperand(1);
  793. // Find the RHS Constant if any
  794. ConstantInt *C = dyn_cast<ConstantInt>(RHS);
  795. if ((!C) && BO.isCommutative()) {
  796. C = dyn_cast<ConstantInt>(LHS);
  797. if (C)
  798. std::swap(LHS, RHS);
  799. }
  800. switch (BO.getOpcode()) {
  801. case Instruction::Add:
  802. if (!C)
  803. break;
  804. computePolynomial(*LHS, Result);
  805. Result.add(C->getValue());
  806. return;
  807. case Instruction::LShr:
  808. if (!C)
  809. break;
  810. computePolynomial(*LHS, Result);
  811. Result.lshr(C->getValue());
  812. return;
  813. default:
  814. break;
  815. }
  816. Result = Polynomial(&BO);
  817. }
  818. /// Recursively compute polynomial of a value
  819. ///
  820. /// \param V input value
  821. /// \param Result result polynomial
  822. static void computePolynomial(Value &V, Polynomial &Result) {
  823. if (isa<BinaryOperator>(&V))
  824. computePolynomialBinOp(*dyn_cast<BinaryOperator>(&V), Result);
  825. else
  826. Result = Polynomial(&V);
  827. }
  828. /// Compute the Polynomial representation of a Pointer type.
  829. ///
  830. /// \param Ptr input pointer value
  831. /// \param Result result polynomial
  832. /// \param BasePtr pointer the polynomial is based on
  833. /// \param DL Datalayout of the target machine
  834. static void computePolynomialFromPointer(Value &Ptr, Polynomial &Result,
  835. Value *&BasePtr,
  836. const DataLayout &DL) {
  837. // Not a pointer type? Return an undefined polynomial
  838. PointerType *PtrTy = dyn_cast<PointerType>(Ptr.getType());
  839. if (!PtrTy) {
  840. Result = Polynomial();
  841. BasePtr = nullptr;
  842. }
  843. unsigned PointerBits =
  844. DL.getIndexSizeInBits(PtrTy->getPointerAddressSpace());
  845. /// Skip pointer casts. Return Zero polynomial otherwise
  846. if (isa<CastInst>(&Ptr)) {
  847. CastInst &CI = *cast<CastInst>(&Ptr);
  848. switch (CI.getOpcode()) {
  849. case Instruction::BitCast:
  850. computePolynomialFromPointer(*CI.getOperand(0), Result, BasePtr, DL);
  851. break;
  852. default:
  853. BasePtr = &Ptr;
  854. Polynomial(PointerBits, 0);
  855. break;
  856. }
  857. }
  858. /// Resolve GetElementPtrInst.
  859. else if (isa<GetElementPtrInst>(&Ptr)) {
  860. GetElementPtrInst &GEP = *cast<GetElementPtrInst>(&Ptr);
  861. APInt BaseOffset(PointerBits, 0);
  862. // Check if we can compute the Offset with accumulateConstantOffset
  863. if (GEP.accumulateConstantOffset(DL, BaseOffset)) {
  864. Result = Polynomial(BaseOffset);
  865. BasePtr = GEP.getPointerOperand();
  866. return;
  867. } else {
  868. // Otherwise we allow that the last index operand of the GEP is
  869. // non-constant.
  870. unsigned idxOperand, e;
  871. SmallVector<Value *, 4> Indices;
  872. for (idxOperand = 1, e = GEP.getNumOperands(); idxOperand < e;
  873. idxOperand++) {
  874. ConstantInt *IDX = dyn_cast<ConstantInt>(GEP.getOperand(idxOperand));
  875. if (!IDX)
  876. break;
  877. Indices.push_back(IDX);
  878. }
  879. // It must also be the last operand.
  880. if (idxOperand + 1 != e) {
  881. Result = Polynomial();
  882. BasePtr = nullptr;
  883. return;
  884. }
  885. // Compute the polynomial of the index operand.
  886. computePolynomial(*GEP.getOperand(idxOperand), Result);
  887. // Compute base offset from zero based index, excluding the last
  888. // variable operand.
  889. BaseOffset =
  890. DL.getIndexedOffsetInType(GEP.getSourceElementType(), Indices);
  891. // Apply the operations of GEP to the polynomial.
  892. unsigned ResultSize = DL.getTypeAllocSize(GEP.getResultElementType());
  893. Result.sextOrTrunc(PointerBits);
  894. Result.mul(APInt(PointerBits, ResultSize));
  895. Result.add(BaseOffset);
  896. BasePtr = GEP.getPointerOperand();
  897. }
  898. }
  899. // All other instructions are handled by using the value as base pointer and
  900. // a zero polynomial.
  901. else {
  902. BasePtr = &Ptr;
  903. Polynomial(DL.getIndexSizeInBits(PtrTy->getPointerAddressSpace()), 0);
  904. }
  905. }
  906. #ifndef NDEBUG
  907. void print(raw_ostream &OS) const {
  908. if (PV)
  909. OS << *PV;
  910. else
  911. OS << "(none)";
  912. OS << " + ";
  913. for (unsigned i = 0; i < getDimension(); i++)
  914. OS << ((i == 0) ? "[" : ", ") << EI[i].Ofs;
  915. OS << "]";
  916. }
  917. #endif
  918. };
  919. #ifndef NDEBUG
  920. static raw_ostream &operator<<(raw_ostream &OS, const VectorInfo &S) {
  921. S.print(OS);
  922. return OS;
  923. }
  924. #endif
  925. } // anonymous namespace
  926. bool InterleavedLoadCombineImpl::findPattern(
  927. std::list<VectorInfo> &Candidates, std::list<VectorInfo> &InterleavedLoad,
  928. unsigned Factor, const DataLayout &DL) {
  929. for (auto C0 = Candidates.begin(), E0 = Candidates.end(); C0 != E0; ++C0) {
  930. unsigned i;
  931. // Try to find an interleaved load using the front of Worklist as first line
  932. unsigned Size = DL.getTypeAllocSize(C0->VTy->getElementType());
  933. // List containing iterators pointing to the VectorInfos of the candidates
  934. std::vector<std::list<VectorInfo>::iterator> Res(Factor, Candidates.end());
  935. for (auto C = Candidates.begin(), E = Candidates.end(); C != E; C++) {
  936. if (C->VTy != C0->VTy)
  937. continue;
  938. if (C->BB != C0->BB)
  939. continue;
  940. if (C->PV != C0->PV)
  941. continue;
  942. // Check the current value matches any of factor - 1 remaining lines
  943. for (i = 1; i < Factor; i++) {
  944. if (C->EI[0].Ofs.isProvenEqualTo(C0->EI[0].Ofs + i * Size)) {
  945. Res[i] = C;
  946. }
  947. }
  948. for (i = 1; i < Factor; i++) {
  949. if (Res[i] == Candidates.end())
  950. break;
  951. }
  952. if (i == Factor) {
  953. Res[0] = C0;
  954. break;
  955. }
  956. }
  957. if (Res[0] != Candidates.end()) {
  958. // Move the result into the output
  959. for (unsigned i = 0; i < Factor; i++) {
  960. InterleavedLoad.splice(InterleavedLoad.end(), Candidates, Res[i]);
  961. }
  962. return true;
  963. }
  964. }
  965. return false;
  966. }
  967. LoadInst *
  968. InterleavedLoadCombineImpl::findFirstLoad(const std::set<LoadInst *> &LIs) {
  969. assert(!LIs.empty() && "No load instructions given.");
  970. // All LIs are within the same BB. Select the first for a reference.
  971. BasicBlock *BB = (*LIs.begin())->getParent();
  972. BasicBlock::iterator FLI =
  973. std::find_if(BB->begin(), BB->end(), [&LIs](Instruction &I) -> bool {
  974. return is_contained(LIs, &I);
  975. });
  976. assert(FLI != BB->end());
  977. return cast<LoadInst>(FLI);
  978. }
  979. bool InterleavedLoadCombineImpl::combine(std::list<VectorInfo> &InterleavedLoad,
  980. OptimizationRemarkEmitter &ORE) {
  981. LLVM_DEBUG(dbgs() << "Checking interleaved load\n");
  982. // The insertion point is the LoadInst which loads the first values. The
  983. // following tests are used to proof that the combined load can be inserted
  984. // just before InsertionPoint.
  985. LoadInst *InsertionPoint = InterleavedLoad.front().EI[0].LI;
  986. // Test if the offset is computed
  987. if (!InsertionPoint)
  988. return false;
  989. std::set<LoadInst *> LIs;
  990. std::set<Instruction *> Is;
  991. std::set<Instruction *> SVIs;
  992. unsigned InterleavedCost;
  993. unsigned InstructionCost = 0;
  994. // Get the interleave factor
  995. unsigned Factor = InterleavedLoad.size();
  996. // Merge all input sets used in analysis
  997. for (auto &VI : InterleavedLoad) {
  998. // Generate a set of all load instructions to be combined
  999. LIs.insert(VI.LIs.begin(), VI.LIs.end());
  1000. // Generate a set of all instructions taking part in load
  1001. // interleaved. This list excludes the instructions necessary for the
  1002. // polynomial construction.
  1003. Is.insert(VI.Is.begin(), VI.Is.end());
  1004. // Generate the set of the final ShuffleVectorInst.
  1005. SVIs.insert(VI.SVI);
  1006. }
  1007. // There is nothing to combine.
  1008. if (LIs.size() < 2)
  1009. return false;
  1010. // Test if all participating instruction will be dead after the
  1011. // transformation. If intermediate results are used, no performance gain can
  1012. // be expected. Also sum the cost of the Instructions beeing left dead.
  1013. for (auto &I : Is) {
  1014. // Compute the old cost
  1015. InstructionCost +=
  1016. TTI.getInstructionCost(I, TargetTransformInfo::TCK_Latency);
  1017. // The final SVIs are allowed not to be dead, all uses will be replaced
  1018. if (SVIs.find(I) != SVIs.end())
  1019. continue;
  1020. // If there are users outside the set to be eliminated, we abort the
  1021. // transformation. No gain can be expected.
  1022. for (const auto &U : I->users()) {
  1023. if (Is.find(dyn_cast<Instruction>(U)) == Is.end())
  1024. return false;
  1025. }
  1026. }
  1027. // We know that all LoadInst are within the same BB. This guarantees that
  1028. // either everything or nothing is loaded.
  1029. LoadInst *First = findFirstLoad(LIs);
  1030. // To be safe that the loads can be combined, iterate over all loads and test
  1031. // that the corresponding defining access dominates first LI. This guarantees
  1032. // that there are no aliasing stores in between the loads.
  1033. auto FMA = MSSA.getMemoryAccess(First);
  1034. for (auto LI : LIs) {
  1035. auto MADef = MSSA.getMemoryAccess(LI)->getDefiningAccess();
  1036. if (!MSSA.dominates(MADef, FMA))
  1037. return false;
  1038. }
  1039. assert(!LIs.empty() && "There are no LoadInst to combine");
  1040. // It is necessary that insertion point dominates all final ShuffleVectorInst.
  1041. for (auto &VI : InterleavedLoad) {
  1042. if (!DT.dominates(InsertionPoint, VI.SVI))
  1043. return false;
  1044. }
  1045. // All checks are done. Add instructions detectable by InterleavedAccessPass
  1046. // The old instruction will are left dead.
  1047. IRBuilder<> Builder(InsertionPoint);
  1048. Type *ETy = InterleavedLoad.front().SVI->getType()->getElementType();
  1049. unsigned ElementsPerSVI =
  1050. InterleavedLoad.front().SVI->getType()->getNumElements();
  1051. VectorType *ILTy = VectorType::get(ETy, Factor * ElementsPerSVI);
  1052. SmallVector<unsigned, 4> Indices;
  1053. for (unsigned i = 0; i < Factor; i++)
  1054. Indices.push_back(i);
  1055. InterleavedCost = TTI.getInterleavedMemoryOpCost(
  1056. Instruction::Load, ILTy, Factor, Indices, InsertionPoint->getAlignment(),
  1057. InsertionPoint->getPointerAddressSpace());
  1058. if (InterleavedCost >= InstructionCost) {
  1059. return false;
  1060. }
  1061. // Create a pointer cast for the wide load.
  1062. auto CI = Builder.CreatePointerCast(InsertionPoint->getOperand(0),
  1063. ILTy->getPointerTo(),
  1064. "interleaved.wide.ptrcast");
  1065. // Create the wide load and update the MemorySSA.
  1066. auto LI = Builder.CreateAlignedLoad(CI, InsertionPoint->getAlignment(),
  1067. "interleaved.wide.load");
  1068. auto MSSAU = MemorySSAUpdater(&MSSA);
  1069. MemoryUse *MSSALoad = cast<MemoryUse>(MSSAU.createMemoryAccessBefore(
  1070. LI, nullptr, MSSA.getMemoryAccess(InsertionPoint)));
  1071. MSSAU.insertUse(MSSALoad);
  1072. // Create the final SVIs and replace all uses.
  1073. int i = 0;
  1074. for (auto &VI : InterleavedLoad) {
  1075. SmallVector<uint32_t, 4> Mask;
  1076. for (unsigned j = 0; j < ElementsPerSVI; j++)
  1077. Mask.push_back(i + j * Factor);
  1078. Builder.SetInsertPoint(VI.SVI);
  1079. auto SVI = Builder.CreateShuffleVector(LI, UndefValue::get(LI->getType()),
  1080. Mask, "interleaved.shuffle");
  1081. VI.SVI->replaceAllUsesWith(SVI);
  1082. i++;
  1083. }
  1084. NumInterleavedLoadCombine++;
  1085. ORE.emit([&]() {
  1086. return OptimizationRemark(DEBUG_TYPE, "Combined Interleaved Load", LI)
  1087. << "Load interleaved combined with factor "
  1088. << ore::NV("Factor", Factor);
  1089. });
  1090. return true;
  1091. }
  1092. bool InterleavedLoadCombineImpl::run() {
  1093. OptimizationRemarkEmitter ORE(&F);
  1094. bool changed = false;
  1095. unsigned MaxFactor = TLI.getMaxSupportedInterleaveFactor();
  1096. auto &DL = F.getParent()->getDataLayout();
  1097. // Start with the highest factor to avoid combining and recombining.
  1098. for (unsigned Factor = MaxFactor; Factor >= 2; Factor--) {
  1099. std::list<VectorInfo> Candidates;
  1100. for (BasicBlock &BB : F) {
  1101. for (Instruction &I : BB) {
  1102. if (auto SVI = dyn_cast<ShuffleVectorInst>(&I)) {
  1103. Candidates.emplace_back(SVI->getType());
  1104. if (!VectorInfo::computeFromSVI(SVI, Candidates.back(), DL)) {
  1105. Candidates.pop_back();
  1106. continue;
  1107. }
  1108. if (!Candidates.back().isInterleaved(Factor, DL)) {
  1109. Candidates.pop_back();
  1110. }
  1111. }
  1112. }
  1113. }
  1114. std::list<VectorInfo> InterleavedLoad;
  1115. while (findPattern(Candidates, InterleavedLoad, Factor, DL)) {
  1116. if (combine(InterleavedLoad, ORE)) {
  1117. changed = true;
  1118. } else {
  1119. // Remove the first element of the Interleaved Load but put the others
  1120. // back on the list and continue searching
  1121. Candidates.splice(Candidates.begin(), InterleavedLoad,
  1122. std::next(InterleavedLoad.begin()),
  1123. InterleavedLoad.end());
  1124. }
  1125. InterleavedLoad.clear();
  1126. }
  1127. }
  1128. return changed;
  1129. }
  1130. namespace {
  1131. /// This pass combines interleaved loads into a pattern detectable by
  1132. /// InterleavedAccessPass.
  1133. struct InterleavedLoadCombine : public FunctionPass {
  1134. static char ID;
  1135. InterleavedLoadCombine() : FunctionPass(ID) {
  1136. initializeInterleavedLoadCombinePass(*PassRegistry::getPassRegistry());
  1137. }
  1138. StringRef getPassName() const override {
  1139. return "Interleaved Load Combine Pass";
  1140. }
  1141. bool runOnFunction(Function &F) override {
  1142. if (DisableInterleavedLoadCombine)
  1143. return false;
  1144. auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
  1145. if (!TPC)
  1146. return false;
  1147. LLVM_DEBUG(dbgs() << "*** " << getPassName() << ": " << F.getName()
  1148. << "\n");
  1149. return InterleavedLoadCombineImpl(
  1150. F, getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
  1151. getAnalysis<MemorySSAWrapperPass>().getMSSA(),
  1152. TPC->getTM<TargetMachine>())
  1153. .run();
  1154. }
  1155. void getAnalysisUsage(AnalysisUsage &AU) const override {
  1156. AU.addRequired<MemorySSAWrapperPass>();
  1157. AU.addRequired<DominatorTreeWrapperPass>();
  1158. FunctionPass::getAnalysisUsage(AU);
  1159. }
  1160. private:
  1161. };
  1162. } // anonymous namespace
  1163. char InterleavedLoadCombine::ID = 0;
  1164. INITIALIZE_PASS_BEGIN(
  1165. InterleavedLoadCombine, DEBUG_TYPE,
  1166. "Combine interleaved loads into wide loads and shufflevector instructions",
  1167. false, false)
  1168. INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
  1169. INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
  1170. INITIALIZE_PASS_END(
  1171. InterleavedLoadCombine, DEBUG_TYPE,
  1172. "Combine interleaved loads into wide loads and shufflevector instructions",
  1173. false, false)
  1174. FunctionPass *
  1175. llvm::createInterleavedLoadCombinePass() {
  1176. auto P = new InterleavedLoadCombine();
  1177. return P;
  1178. }