TargetLoweringBase.cpp 73 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933
  1. //===- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ----===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This implements the TargetLoweringBase class.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include "llvm/ADT/BitVector.h"
  13. #include "llvm/ADT/STLExtras.h"
  14. #include "llvm/ADT/SmallVector.h"
  15. #include "llvm/ADT/StringExtras.h"
  16. #include "llvm/ADT/StringRef.h"
  17. #include "llvm/ADT/Triple.h"
  18. #include "llvm/ADT/Twine.h"
  19. #include "llvm/CodeGen/Analysis.h"
  20. #include "llvm/CodeGen/ISDOpcodes.h"
  21. #include "llvm/CodeGen/MachineBasicBlock.h"
  22. #include "llvm/CodeGen/MachineFrameInfo.h"
  23. #include "llvm/CodeGen/MachineFunction.h"
  24. #include "llvm/CodeGen/MachineInstr.h"
  25. #include "llvm/CodeGen/MachineInstrBuilder.h"
  26. #include "llvm/CodeGen/MachineMemOperand.h"
  27. #include "llvm/CodeGen/MachineOperand.h"
  28. #include "llvm/CodeGen/MachineRegisterInfo.h"
  29. #include "llvm/CodeGen/RuntimeLibcalls.h"
  30. #include "llvm/CodeGen/StackMaps.h"
  31. #include "llvm/CodeGen/TargetLowering.h"
  32. #include "llvm/CodeGen/TargetOpcodes.h"
  33. #include "llvm/CodeGen/TargetRegisterInfo.h"
  34. #include "llvm/CodeGen/ValueTypes.h"
  35. #include "llvm/IR/Attributes.h"
  36. #include "llvm/IR/CallingConv.h"
  37. #include "llvm/IR/DataLayout.h"
  38. #include "llvm/IR/DerivedTypes.h"
  39. #include "llvm/IR/Function.h"
  40. #include "llvm/IR/GlobalValue.h"
  41. #include "llvm/IR/GlobalVariable.h"
  42. #include "llvm/IR/IRBuilder.h"
  43. #include "llvm/IR/Module.h"
  44. #include "llvm/IR/Type.h"
  45. #include "llvm/Support/BranchProbability.h"
  46. #include "llvm/Support/Casting.h"
  47. #include "llvm/Support/CommandLine.h"
  48. #include "llvm/Support/Compiler.h"
  49. #include "llvm/Support/ErrorHandling.h"
  50. #include "llvm/Support/MachineValueType.h"
  51. #include "llvm/Support/MathExtras.h"
  52. #include "llvm/Target/TargetMachine.h"
  53. #include <algorithm>
  54. #include <cassert>
  55. #include <cstddef>
  56. #include <cstdint>
  57. #include <cstring>
  58. #include <iterator>
  59. #include <string>
  60. #include <tuple>
  61. #include <utility>
  62. using namespace llvm;
  63. static cl::opt<bool> JumpIsExpensiveOverride(
  64. "jump-is-expensive", cl::init(false),
  65. cl::desc("Do not create extra branches to split comparison logic."),
  66. cl::Hidden);
  67. static cl::opt<unsigned> MinimumJumpTableEntries
  68. ("min-jump-table-entries", cl::init(4), cl::Hidden,
  69. cl::desc("Set minimum number of entries to use a jump table."));
  70. static cl::opt<unsigned> MaximumJumpTableSize
  71. ("max-jump-table-size", cl::init(UINT_MAX), cl::Hidden,
  72. cl::desc("Set maximum size of jump tables."));
  73. /// Minimum jump table density for normal functions.
  74. static cl::opt<unsigned>
  75. JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden,
  76. cl::desc("Minimum density for building a jump table in "
  77. "a normal function"));
  78. /// Minimum jump table density for -Os or -Oz functions.
  79. static cl::opt<unsigned> OptsizeJumpTableDensity(
  80. "optsize-jump-table-density", cl::init(40), cl::Hidden,
  81. cl::desc("Minimum density for building a jump table in "
  82. "an optsize function"));
  83. static bool darwinHasSinCos(const Triple &TT) {
  84. assert(TT.isOSDarwin() && "should be called with darwin triple");
  85. // Don't bother with 32 bit x86.
  86. if (TT.getArch() == Triple::x86)
  87. return false;
  88. // Macos < 10.9 has no sincos_stret.
  89. if (TT.isMacOSX())
  90. return !TT.isMacOSXVersionLT(10, 9) && TT.isArch64Bit();
  91. // iOS < 7.0 has no sincos_stret.
  92. if (TT.isiOS())
  93. return !TT.isOSVersionLT(7, 0);
  94. // Any other darwin such as WatchOS/TvOS is new enough.
  95. return true;
  96. }
  97. // Although this default value is arbitrary, it is not random. It is assumed
  98. // that a condition that evaluates the same way by a higher percentage than this
  99. // is best represented as control flow. Therefore, the default value N should be
  100. // set such that the win from N% correct executions is greater than the loss
  101. // from (100 - N)% mispredicted executions for the majority of intended targets.
  102. static cl::opt<int> MinPercentageForPredictableBranch(
  103. "min-predictable-branch", cl::init(99),
  104. cl::desc("Minimum percentage (0-100) that a condition must be either true "
  105. "or false to assume that the condition is predictable"),
  106. cl::Hidden);
  107. void TargetLoweringBase::InitLibcalls(const Triple &TT) {
  108. #define HANDLE_LIBCALL(code, name) \
  109. setLibcallName(RTLIB::code, name);
  110. #include "llvm/IR/RuntimeLibcalls.def"
  111. #undef HANDLE_LIBCALL
  112. // Initialize calling conventions to their default.
  113. for (int LC = 0; LC < RTLIB::UNKNOWN_LIBCALL; ++LC)
  114. setLibcallCallingConv((RTLIB::Libcall)LC, CallingConv::C);
  115. // A few names are different on particular architectures or environments.
  116. if (TT.isOSDarwin()) {
  117. // For f16/f32 conversions, Darwin uses the standard naming scheme, instead
  118. // of the gnueabi-style __gnu_*_ieee.
  119. // FIXME: What about other targets?
  120. setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
  121. setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
  122. // Some darwins have an optimized __bzero/bzero function.
  123. switch (TT.getArch()) {
  124. case Triple::x86:
  125. case Triple::x86_64:
  126. if (TT.isMacOSX() && !TT.isMacOSXVersionLT(10, 6))
  127. setLibcallName(RTLIB::BZERO, "__bzero");
  128. break;
  129. case Triple::aarch64:
  130. setLibcallName(RTLIB::BZERO, "bzero");
  131. break;
  132. default:
  133. break;
  134. }
  135. if (darwinHasSinCos(TT)) {
  136. setLibcallName(RTLIB::SINCOS_STRET_F32, "__sincosf_stret");
  137. setLibcallName(RTLIB::SINCOS_STRET_F64, "__sincos_stret");
  138. if (TT.isWatchABI()) {
  139. setLibcallCallingConv(RTLIB::SINCOS_STRET_F32,
  140. CallingConv::ARM_AAPCS_VFP);
  141. setLibcallCallingConv(RTLIB::SINCOS_STRET_F64,
  142. CallingConv::ARM_AAPCS_VFP);
  143. }
  144. }
  145. } else {
  146. setLibcallName(RTLIB::FPEXT_F16_F32, "__gnu_h2f_ieee");
  147. setLibcallName(RTLIB::FPROUND_F32_F16, "__gnu_f2h_ieee");
  148. }
  149. if (TT.isGNUEnvironment() || TT.isOSFuchsia() ||
  150. (TT.isAndroid() && !TT.isAndroidVersionLT(9))) {
  151. setLibcallName(RTLIB::SINCOS_F32, "sincosf");
  152. setLibcallName(RTLIB::SINCOS_F64, "sincos");
  153. setLibcallName(RTLIB::SINCOS_F80, "sincosl");
  154. setLibcallName(RTLIB::SINCOS_F128, "sincosl");
  155. setLibcallName(RTLIB::SINCOS_PPCF128, "sincosl");
  156. }
  157. if (TT.isOSOpenBSD()) {
  158. setLibcallName(RTLIB::STACKPROTECTOR_CHECK_FAIL, nullptr);
  159. }
  160. }
  161. /// getFPEXT - Return the FPEXT_*_* value for the given types, or
  162. /// UNKNOWN_LIBCALL if there is none.
  163. RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) {
  164. if (OpVT == MVT::f16) {
  165. if (RetVT == MVT::f32)
  166. return FPEXT_F16_F32;
  167. } else if (OpVT == MVT::f32) {
  168. if (RetVT == MVT::f64)
  169. return FPEXT_F32_F64;
  170. if (RetVT == MVT::f128)
  171. return FPEXT_F32_F128;
  172. if (RetVT == MVT::ppcf128)
  173. return FPEXT_F32_PPCF128;
  174. } else if (OpVT == MVT::f64) {
  175. if (RetVT == MVT::f128)
  176. return FPEXT_F64_F128;
  177. else if (RetVT == MVT::ppcf128)
  178. return FPEXT_F64_PPCF128;
  179. } else if (OpVT == MVT::f80) {
  180. if (RetVT == MVT::f128)
  181. return FPEXT_F80_F128;
  182. }
  183. return UNKNOWN_LIBCALL;
  184. }
  185. /// getFPROUND - Return the FPROUND_*_* value for the given types, or
  186. /// UNKNOWN_LIBCALL if there is none.
  187. RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) {
  188. if (RetVT == MVT::f16) {
  189. if (OpVT == MVT::f32)
  190. return FPROUND_F32_F16;
  191. if (OpVT == MVT::f64)
  192. return FPROUND_F64_F16;
  193. if (OpVT == MVT::f80)
  194. return FPROUND_F80_F16;
  195. if (OpVT == MVT::f128)
  196. return FPROUND_F128_F16;
  197. if (OpVT == MVT::ppcf128)
  198. return FPROUND_PPCF128_F16;
  199. } else if (RetVT == MVT::f32) {
  200. if (OpVT == MVT::f64)
  201. return FPROUND_F64_F32;
  202. if (OpVT == MVT::f80)
  203. return FPROUND_F80_F32;
  204. if (OpVT == MVT::f128)
  205. return FPROUND_F128_F32;
  206. if (OpVT == MVT::ppcf128)
  207. return FPROUND_PPCF128_F32;
  208. } else if (RetVT == MVT::f64) {
  209. if (OpVT == MVT::f80)
  210. return FPROUND_F80_F64;
  211. if (OpVT == MVT::f128)
  212. return FPROUND_F128_F64;
  213. if (OpVT == MVT::ppcf128)
  214. return FPROUND_PPCF128_F64;
  215. } else if (RetVT == MVT::f80) {
  216. if (OpVT == MVT::f128)
  217. return FPROUND_F128_F80;
  218. }
  219. return UNKNOWN_LIBCALL;
  220. }
  221. /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
  222. /// UNKNOWN_LIBCALL if there is none.
  223. RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) {
  224. if (OpVT == MVT::f32) {
  225. if (RetVT == MVT::i32)
  226. return FPTOSINT_F32_I32;
  227. if (RetVT == MVT::i64)
  228. return FPTOSINT_F32_I64;
  229. if (RetVT == MVT::i128)
  230. return FPTOSINT_F32_I128;
  231. } else if (OpVT == MVT::f64) {
  232. if (RetVT == MVT::i32)
  233. return FPTOSINT_F64_I32;
  234. if (RetVT == MVT::i64)
  235. return FPTOSINT_F64_I64;
  236. if (RetVT == MVT::i128)
  237. return FPTOSINT_F64_I128;
  238. } else if (OpVT == MVT::f80) {
  239. if (RetVT == MVT::i32)
  240. return FPTOSINT_F80_I32;
  241. if (RetVT == MVT::i64)
  242. return FPTOSINT_F80_I64;
  243. if (RetVT == MVT::i128)
  244. return FPTOSINT_F80_I128;
  245. } else if (OpVT == MVT::f128) {
  246. if (RetVT == MVT::i32)
  247. return FPTOSINT_F128_I32;
  248. if (RetVT == MVT::i64)
  249. return FPTOSINT_F128_I64;
  250. if (RetVT == MVT::i128)
  251. return FPTOSINT_F128_I128;
  252. } else if (OpVT == MVT::ppcf128) {
  253. if (RetVT == MVT::i32)
  254. return FPTOSINT_PPCF128_I32;
  255. if (RetVT == MVT::i64)
  256. return FPTOSINT_PPCF128_I64;
  257. if (RetVT == MVT::i128)
  258. return FPTOSINT_PPCF128_I128;
  259. }
  260. return UNKNOWN_LIBCALL;
  261. }
  262. /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
  263. /// UNKNOWN_LIBCALL if there is none.
  264. RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) {
  265. if (OpVT == MVT::f32) {
  266. if (RetVT == MVT::i32)
  267. return FPTOUINT_F32_I32;
  268. if (RetVT == MVT::i64)
  269. return FPTOUINT_F32_I64;
  270. if (RetVT == MVT::i128)
  271. return FPTOUINT_F32_I128;
  272. } else if (OpVT == MVT::f64) {
  273. if (RetVT == MVT::i32)
  274. return FPTOUINT_F64_I32;
  275. if (RetVT == MVT::i64)
  276. return FPTOUINT_F64_I64;
  277. if (RetVT == MVT::i128)
  278. return FPTOUINT_F64_I128;
  279. } else if (OpVT == MVT::f80) {
  280. if (RetVT == MVT::i32)
  281. return FPTOUINT_F80_I32;
  282. if (RetVT == MVT::i64)
  283. return FPTOUINT_F80_I64;
  284. if (RetVT == MVT::i128)
  285. return FPTOUINT_F80_I128;
  286. } else if (OpVT == MVT::f128) {
  287. if (RetVT == MVT::i32)
  288. return FPTOUINT_F128_I32;
  289. if (RetVT == MVT::i64)
  290. return FPTOUINT_F128_I64;
  291. if (RetVT == MVT::i128)
  292. return FPTOUINT_F128_I128;
  293. } else if (OpVT == MVT::ppcf128) {
  294. if (RetVT == MVT::i32)
  295. return FPTOUINT_PPCF128_I32;
  296. if (RetVT == MVT::i64)
  297. return FPTOUINT_PPCF128_I64;
  298. if (RetVT == MVT::i128)
  299. return FPTOUINT_PPCF128_I128;
  300. }
  301. return UNKNOWN_LIBCALL;
  302. }
  303. /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
  304. /// UNKNOWN_LIBCALL if there is none.
  305. RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) {
  306. if (OpVT == MVT::i32) {
  307. if (RetVT == MVT::f32)
  308. return SINTTOFP_I32_F32;
  309. if (RetVT == MVT::f64)
  310. return SINTTOFP_I32_F64;
  311. if (RetVT == MVT::f80)
  312. return SINTTOFP_I32_F80;
  313. if (RetVT == MVT::f128)
  314. return SINTTOFP_I32_F128;
  315. if (RetVT == MVT::ppcf128)
  316. return SINTTOFP_I32_PPCF128;
  317. } else if (OpVT == MVT::i64) {
  318. if (RetVT == MVT::f32)
  319. return SINTTOFP_I64_F32;
  320. if (RetVT == MVT::f64)
  321. return SINTTOFP_I64_F64;
  322. if (RetVT == MVT::f80)
  323. return SINTTOFP_I64_F80;
  324. if (RetVT == MVT::f128)
  325. return SINTTOFP_I64_F128;
  326. if (RetVT == MVT::ppcf128)
  327. return SINTTOFP_I64_PPCF128;
  328. } else if (OpVT == MVT::i128) {
  329. if (RetVT == MVT::f32)
  330. return SINTTOFP_I128_F32;
  331. if (RetVT == MVT::f64)
  332. return SINTTOFP_I128_F64;
  333. if (RetVT == MVT::f80)
  334. return SINTTOFP_I128_F80;
  335. if (RetVT == MVT::f128)
  336. return SINTTOFP_I128_F128;
  337. if (RetVT == MVT::ppcf128)
  338. return SINTTOFP_I128_PPCF128;
  339. }
  340. return UNKNOWN_LIBCALL;
  341. }
  342. /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
  343. /// UNKNOWN_LIBCALL if there is none.
  344. RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) {
  345. if (OpVT == MVT::i32) {
  346. if (RetVT == MVT::f32)
  347. return UINTTOFP_I32_F32;
  348. if (RetVT == MVT::f64)
  349. return UINTTOFP_I32_F64;
  350. if (RetVT == MVT::f80)
  351. return UINTTOFP_I32_F80;
  352. if (RetVT == MVT::f128)
  353. return UINTTOFP_I32_F128;
  354. if (RetVT == MVT::ppcf128)
  355. return UINTTOFP_I32_PPCF128;
  356. } else if (OpVT == MVT::i64) {
  357. if (RetVT == MVT::f32)
  358. return UINTTOFP_I64_F32;
  359. if (RetVT == MVT::f64)
  360. return UINTTOFP_I64_F64;
  361. if (RetVT == MVT::f80)
  362. return UINTTOFP_I64_F80;
  363. if (RetVT == MVT::f128)
  364. return UINTTOFP_I64_F128;
  365. if (RetVT == MVT::ppcf128)
  366. return UINTTOFP_I64_PPCF128;
  367. } else if (OpVT == MVT::i128) {
  368. if (RetVT == MVT::f32)
  369. return UINTTOFP_I128_F32;
  370. if (RetVT == MVT::f64)
  371. return UINTTOFP_I128_F64;
  372. if (RetVT == MVT::f80)
  373. return UINTTOFP_I128_F80;
  374. if (RetVT == MVT::f128)
  375. return UINTTOFP_I128_F128;
  376. if (RetVT == MVT::ppcf128)
  377. return UINTTOFP_I128_PPCF128;
  378. }
  379. return UNKNOWN_LIBCALL;
  380. }
  381. RTLIB::Libcall RTLIB::getSYNC(unsigned Opc, MVT VT) {
  382. #define OP_TO_LIBCALL(Name, Enum) \
  383. case Name: \
  384. switch (VT.SimpleTy) { \
  385. default: \
  386. return UNKNOWN_LIBCALL; \
  387. case MVT::i8: \
  388. return Enum##_1; \
  389. case MVT::i16: \
  390. return Enum##_2; \
  391. case MVT::i32: \
  392. return Enum##_4; \
  393. case MVT::i64: \
  394. return Enum##_8; \
  395. case MVT::i128: \
  396. return Enum##_16; \
  397. }
  398. switch (Opc) {
  399. OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET)
  400. OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP)
  401. OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD)
  402. OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB)
  403. OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND)
  404. OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR)
  405. OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR)
  406. OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND)
  407. OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX)
  408. OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX)
  409. OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN)
  410. OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN)
  411. }
  412. #undef OP_TO_LIBCALL
  413. return UNKNOWN_LIBCALL;
  414. }
  415. RTLIB::Libcall RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
  416. switch (ElementSize) {
  417. case 1:
  418. return MEMCPY_ELEMENT_UNORDERED_ATOMIC_1;
  419. case 2:
  420. return MEMCPY_ELEMENT_UNORDERED_ATOMIC_2;
  421. case 4:
  422. return MEMCPY_ELEMENT_UNORDERED_ATOMIC_4;
  423. case 8:
  424. return MEMCPY_ELEMENT_UNORDERED_ATOMIC_8;
  425. case 16:
  426. return MEMCPY_ELEMENT_UNORDERED_ATOMIC_16;
  427. default:
  428. return UNKNOWN_LIBCALL;
  429. }
  430. }
  431. RTLIB::Libcall RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
  432. switch (ElementSize) {
  433. case 1:
  434. return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1;
  435. case 2:
  436. return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2;
  437. case 4:
  438. return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4;
  439. case 8:
  440. return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8;
  441. case 16:
  442. return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16;
  443. default:
  444. return UNKNOWN_LIBCALL;
  445. }
  446. }
  447. RTLIB::Libcall RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
  448. switch (ElementSize) {
  449. case 1:
  450. return MEMSET_ELEMENT_UNORDERED_ATOMIC_1;
  451. case 2:
  452. return MEMSET_ELEMENT_UNORDERED_ATOMIC_2;
  453. case 4:
  454. return MEMSET_ELEMENT_UNORDERED_ATOMIC_4;
  455. case 8:
  456. return MEMSET_ELEMENT_UNORDERED_ATOMIC_8;
  457. case 16:
  458. return MEMSET_ELEMENT_UNORDERED_ATOMIC_16;
  459. default:
  460. return UNKNOWN_LIBCALL;
  461. }
  462. }
  463. /// InitCmpLibcallCCs - Set default comparison libcall CC.
  464. static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
  465. memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL);
  466. CCs[RTLIB::OEQ_F32] = ISD::SETEQ;
  467. CCs[RTLIB::OEQ_F64] = ISD::SETEQ;
  468. CCs[RTLIB::OEQ_F128] = ISD::SETEQ;
  469. CCs[RTLIB::OEQ_PPCF128] = ISD::SETEQ;
  470. CCs[RTLIB::UNE_F32] = ISD::SETNE;
  471. CCs[RTLIB::UNE_F64] = ISD::SETNE;
  472. CCs[RTLIB::UNE_F128] = ISD::SETNE;
  473. CCs[RTLIB::UNE_PPCF128] = ISD::SETNE;
  474. CCs[RTLIB::OGE_F32] = ISD::SETGE;
  475. CCs[RTLIB::OGE_F64] = ISD::SETGE;
  476. CCs[RTLIB::OGE_F128] = ISD::SETGE;
  477. CCs[RTLIB::OGE_PPCF128] = ISD::SETGE;
  478. CCs[RTLIB::OLT_F32] = ISD::SETLT;
  479. CCs[RTLIB::OLT_F64] = ISD::SETLT;
  480. CCs[RTLIB::OLT_F128] = ISD::SETLT;
  481. CCs[RTLIB::OLT_PPCF128] = ISD::SETLT;
  482. CCs[RTLIB::OLE_F32] = ISD::SETLE;
  483. CCs[RTLIB::OLE_F64] = ISD::SETLE;
  484. CCs[RTLIB::OLE_F128] = ISD::SETLE;
  485. CCs[RTLIB::OLE_PPCF128] = ISD::SETLE;
  486. CCs[RTLIB::OGT_F32] = ISD::SETGT;
  487. CCs[RTLIB::OGT_F64] = ISD::SETGT;
  488. CCs[RTLIB::OGT_F128] = ISD::SETGT;
  489. CCs[RTLIB::OGT_PPCF128] = ISD::SETGT;
  490. CCs[RTLIB::UO_F32] = ISD::SETNE;
  491. CCs[RTLIB::UO_F64] = ISD::SETNE;
  492. CCs[RTLIB::UO_F128] = ISD::SETNE;
  493. CCs[RTLIB::UO_PPCF128] = ISD::SETNE;
  494. CCs[RTLIB::O_F32] = ISD::SETEQ;
  495. CCs[RTLIB::O_F64] = ISD::SETEQ;
  496. CCs[RTLIB::O_F128] = ISD::SETEQ;
  497. CCs[RTLIB::O_PPCF128] = ISD::SETEQ;
  498. }
  499. /// NOTE: The TargetMachine owns TLOF.
  500. TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) {
  501. initActions();
  502. // Perform these initializations only once.
  503. MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove =
  504. MaxLoadsPerMemcmp = 8;
  505. MaxGluedStoresPerMemcpy = 0;
  506. MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize =
  507. MaxStoresPerMemmoveOptSize = MaxLoadsPerMemcmpOptSize = 4;
  508. UseUnderscoreSetJmp = false;
  509. UseUnderscoreLongJmp = false;
  510. HasMultipleConditionRegisters = false;
  511. HasExtractBitsInsn = false;
  512. JumpIsExpensive = JumpIsExpensiveOverride;
  513. PredictableSelectIsExpensive = false;
  514. EnableExtLdPromotion = false;
  515. StackPointerRegisterToSaveRestore = 0;
  516. BooleanContents = UndefinedBooleanContent;
  517. BooleanFloatContents = UndefinedBooleanContent;
  518. BooleanVectorContents = UndefinedBooleanContent;
  519. SchedPreferenceInfo = Sched::ILP;
  520. JumpBufSize = 0;
  521. JumpBufAlignment = 0;
  522. MinFunctionAlignment = 0;
  523. PrefFunctionAlignment = 0;
  524. PrefLoopAlignment = 0;
  525. GatherAllAliasesMaxDepth = 18;
  526. MinStackArgumentAlignment = 1;
  527. // TODO: the default will be switched to 0 in the next commit, along
  528. // with the Target-specific changes necessary.
  529. MaxAtomicSizeInBitsSupported = 1024;
  530. MinCmpXchgSizeInBits = 0;
  531. SupportsUnalignedAtomics = false;
  532. std::fill(std::begin(LibcallRoutineNames), std::end(LibcallRoutineNames), nullptr);
  533. InitLibcalls(TM.getTargetTriple());
  534. InitCmpLibcallCCs(CmpLibcallCCs);
  535. }
  536. void TargetLoweringBase::initActions() {
  537. // All operations default to being supported.
  538. memset(OpActions, 0, sizeof(OpActions));
  539. memset(LoadExtActions, 0, sizeof(LoadExtActions));
  540. memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
  541. memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
  542. memset(CondCodeActions, 0, sizeof(CondCodeActions));
  543. std::fill(std::begin(RegClassForVT), std::end(RegClassForVT), nullptr);
  544. std::fill(std::begin(TargetDAGCombineArray),
  545. std::end(TargetDAGCombineArray), 0);
  546. for (MVT VT : MVT::fp_valuetypes()) {
  547. MVT IntVT = MVT::getIntegerVT(VT.getSizeInBits());
  548. if (IntVT.isValid()) {
  549. setOperationAction(ISD::ATOMIC_SWAP, VT, Promote);
  550. AddPromotedToType(ISD::ATOMIC_SWAP, VT, IntVT);
  551. }
  552. }
  553. // Set default actions for various operations.
  554. for (MVT VT : MVT::all_valuetypes()) {
  555. // Default all indexed load / store to expand.
  556. for (unsigned IM = (unsigned)ISD::PRE_INC;
  557. IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
  558. setIndexedLoadAction(IM, VT, Expand);
  559. setIndexedStoreAction(IM, VT, Expand);
  560. }
  561. // Most backends expect to see the node which just returns the value loaded.
  562. setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Expand);
  563. // These operations default to expand.
  564. setOperationAction(ISD::FGETSIGN, VT, Expand);
  565. setOperationAction(ISD::CONCAT_VECTORS, VT, Expand);
  566. setOperationAction(ISD::FMINNUM, VT, Expand);
  567. setOperationAction(ISD::FMAXNUM, VT, Expand);
  568. setOperationAction(ISD::FMINNUM_IEEE, VT, Expand);
  569. setOperationAction(ISD::FMAXNUM_IEEE, VT, Expand);
  570. setOperationAction(ISD::FMINIMUM, VT, Expand);
  571. setOperationAction(ISD::FMAXIMUM, VT, Expand);
  572. setOperationAction(ISD::FMAD, VT, Expand);
  573. setOperationAction(ISD::SMIN, VT, Expand);
  574. setOperationAction(ISD::SMAX, VT, Expand);
  575. setOperationAction(ISD::UMIN, VT, Expand);
  576. setOperationAction(ISD::UMAX, VT, Expand);
  577. setOperationAction(ISD::ABS, VT, Expand);
  578. setOperationAction(ISD::FSHL, VT, Expand);
  579. setOperationAction(ISD::FSHR, VT, Expand);
  580. setOperationAction(ISD::SADDSAT, VT, Expand);
  581. setOperationAction(ISD::UADDSAT, VT, Expand);
  582. setOperationAction(ISD::SSUBSAT, VT, Expand);
  583. setOperationAction(ISD::USUBSAT, VT, Expand);
  584. setOperationAction(ISD::SMULFIX, VT, Expand);
  585. setOperationAction(ISD::SMULFIXSAT, VT, Expand);
  586. setOperationAction(ISD::UMULFIX, VT, Expand);
  587. // Overflow operations default to expand
  588. setOperationAction(ISD::SADDO, VT, Expand);
  589. setOperationAction(ISD::SSUBO, VT, Expand);
  590. setOperationAction(ISD::UADDO, VT, Expand);
  591. setOperationAction(ISD::USUBO, VT, Expand);
  592. setOperationAction(ISD::SMULO, VT, Expand);
  593. setOperationAction(ISD::UMULO, VT, Expand);
  594. // ADDCARRY operations default to expand
  595. setOperationAction(ISD::ADDCARRY, VT, Expand);
  596. setOperationAction(ISD::SUBCARRY, VT, Expand);
  597. setOperationAction(ISD::SETCCCARRY, VT, Expand);
  598. // ADDC/ADDE/SUBC/SUBE default to expand.
  599. setOperationAction(ISD::ADDC, VT, Expand);
  600. setOperationAction(ISD::ADDE, VT, Expand);
  601. setOperationAction(ISD::SUBC, VT, Expand);
  602. setOperationAction(ISD::SUBE, VT, Expand);
  603. // These default to Expand so they will be expanded to CTLZ/CTTZ by default.
  604. setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
  605. setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
  606. setOperationAction(ISD::BITREVERSE, VT, Expand);
  607. // These library functions default to expand.
  608. setOperationAction(ISD::FROUND, VT, Expand);
  609. setOperationAction(ISD::FPOWI, VT, Expand);
  610. // These operations default to expand for vector types.
  611. if (VT.isVector()) {
  612. setOperationAction(ISD::FCOPYSIGN, VT, Expand);
  613. setOperationAction(ISD::ANY_EXTEND_VECTOR_INREG, VT, Expand);
  614. setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Expand);
  615. setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Expand);
  616. }
  617. // Constrained floating-point operations default to expand.
  618. setOperationAction(ISD::STRICT_FADD, VT, Expand);
  619. setOperationAction(ISD::STRICT_FSUB, VT, Expand);
  620. setOperationAction(ISD::STRICT_FMUL, VT, Expand);
  621. setOperationAction(ISD::STRICT_FDIV, VT, Expand);
  622. setOperationAction(ISD::STRICT_FREM, VT, Expand);
  623. setOperationAction(ISD::STRICT_FMA, VT, Expand);
  624. setOperationAction(ISD::STRICT_FSQRT, VT, Expand);
  625. setOperationAction(ISD::STRICT_FPOW, VT, Expand);
  626. setOperationAction(ISD::STRICT_FPOWI, VT, Expand);
  627. setOperationAction(ISD::STRICT_FSIN, VT, Expand);
  628. setOperationAction(ISD::STRICT_FCOS, VT, Expand);
  629. setOperationAction(ISD::STRICT_FEXP, VT, Expand);
  630. setOperationAction(ISD::STRICT_FEXP2, VT, Expand);
  631. setOperationAction(ISD::STRICT_FLOG, VT, Expand);
  632. setOperationAction(ISD::STRICT_FLOG10, VT, Expand);
  633. setOperationAction(ISD::STRICT_FLOG2, VT, Expand);
  634. setOperationAction(ISD::STRICT_FRINT, VT, Expand);
  635. setOperationAction(ISD::STRICT_FNEARBYINT, VT, Expand);
  636. setOperationAction(ISD::STRICT_FCEIL, VT, Expand);
  637. setOperationAction(ISD::STRICT_FFLOOR, VT, Expand);
  638. setOperationAction(ISD::STRICT_FROUND, VT, Expand);
  639. setOperationAction(ISD::STRICT_FTRUNC, VT, Expand);
  640. setOperationAction(ISD::STRICT_FMAXNUM, VT, Expand);
  641. setOperationAction(ISD::STRICT_FMINNUM, VT, Expand);
  642. setOperationAction(ISD::STRICT_FP_ROUND, VT, Expand);
  643. setOperationAction(ISD::STRICT_FP_EXTEND, VT, Expand);
  644. // For most targets @llvm.get.dynamic.area.offset just returns 0.
  645. setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, VT, Expand);
  646. // Vector reduction default to expand.
  647. setOperationAction(ISD::VECREDUCE_FADD, VT, Expand);
  648. setOperationAction(ISD::VECREDUCE_FMUL, VT, Expand);
  649. setOperationAction(ISD::VECREDUCE_ADD, VT, Expand);
  650. setOperationAction(ISD::VECREDUCE_MUL, VT, Expand);
  651. setOperationAction(ISD::VECREDUCE_AND, VT, Expand);
  652. setOperationAction(ISD::VECREDUCE_OR, VT, Expand);
  653. setOperationAction(ISD::VECREDUCE_XOR, VT, Expand);
  654. setOperationAction(ISD::VECREDUCE_SMAX, VT, Expand);
  655. setOperationAction(ISD::VECREDUCE_SMIN, VT, Expand);
  656. setOperationAction(ISD::VECREDUCE_UMAX, VT, Expand);
  657. setOperationAction(ISD::VECREDUCE_UMIN, VT, Expand);
  658. setOperationAction(ISD::VECREDUCE_FMAX, VT, Expand);
  659. setOperationAction(ISD::VECREDUCE_FMIN, VT, Expand);
  660. }
  661. // Most targets ignore the @llvm.prefetch intrinsic.
  662. setOperationAction(ISD::PREFETCH, MVT::Other, Expand);
  663. // Most targets also ignore the @llvm.readcyclecounter intrinsic.
  664. setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Expand);
  665. // ConstantFP nodes default to expand. Targets can either change this to
  666. // Legal, in which case all fp constants are legal, or use isFPImmLegal()
  667. // to optimize expansions for certain constants.
  668. setOperationAction(ISD::ConstantFP, MVT::f16, Expand);
  669. setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
  670. setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
  671. setOperationAction(ISD::ConstantFP, MVT::f80, Expand);
  672. setOperationAction(ISD::ConstantFP, MVT::f128, Expand);
  673. // These library functions default to expand.
  674. for (MVT VT : {MVT::f32, MVT::f64, MVT::f128}) {
  675. setOperationAction(ISD::FCBRT, VT, Expand);
  676. setOperationAction(ISD::FLOG , VT, Expand);
  677. setOperationAction(ISD::FLOG2, VT, Expand);
  678. setOperationAction(ISD::FLOG10, VT, Expand);
  679. setOperationAction(ISD::FEXP , VT, Expand);
  680. setOperationAction(ISD::FEXP2, VT, Expand);
  681. setOperationAction(ISD::FFLOOR, VT, Expand);
  682. setOperationAction(ISD::FNEARBYINT, VT, Expand);
  683. setOperationAction(ISD::FCEIL, VT, Expand);
  684. setOperationAction(ISD::FRINT, VT, Expand);
  685. setOperationAction(ISD::FTRUNC, VT, Expand);
  686. setOperationAction(ISD::FROUND, VT, Expand);
  687. setOperationAction(ISD::LROUND, VT, Expand);
  688. setOperationAction(ISD::LLROUND, VT, Expand);
  689. setOperationAction(ISD::LRINT, VT, Expand);
  690. setOperationAction(ISD::LLRINT, VT, Expand);
  691. }
  692. // Default ISD::TRAP to expand (which turns it into abort).
  693. setOperationAction(ISD::TRAP, MVT::Other, Expand);
  694. // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand"
  695. // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP.
  696. setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand);
  697. }
  698. MVT TargetLoweringBase::getScalarShiftAmountTy(const DataLayout &DL,
  699. EVT) const {
  700. return MVT::getIntegerVT(DL.getPointerSizeInBits(0));
  701. }
  702. EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy, const DataLayout &DL,
  703. bool LegalTypes) const {
  704. assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
  705. if (LHSTy.isVector())
  706. return LHSTy;
  707. return LegalTypes ? getScalarShiftAmountTy(DL, LHSTy)
  708. : getPointerTy(DL);
  709. }
  710. bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const {
  711. assert(isTypeLegal(VT));
  712. switch (Op) {
  713. default:
  714. return false;
  715. case ISD::SDIV:
  716. case ISD::UDIV:
  717. case ISD::SREM:
  718. case ISD::UREM:
  719. return true;
  720. }
  721. }
  722. void TargetLoweringBase::setJumpIsExpensive(bool isExpensive) {
  723. // If the command-line option was specified, ignore this request.
  724. if (!JumpIsExpensiveOverride.getNumOccurrences())
  725. JumpIsExpensive = isExpensive;
  726. }
  727. TargetLoweringBase::LegalizeKind
  728. TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const {
  729. // If this is a simple type, use the ComputeRegisterProp mechanism.
  730. if (VT.isSimple()) {
  731. MVT SVT = VT.getSimpleVT();
  732. assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType));
  733. MVT NVT = TransformToType[SVT.SimpleTy];
  734. LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT);
  735. assert((LA == TypeLegal || LA == TypeSoftenFloat ||
  736. ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger) &&
  737. "Promote may not follow Expand or Promote");
  738. if (LA == TypeSplitVector)
  739. return LegalizeKind(LA,
  740. EVT::getVectorVT(Context, SVT.getVectorElementType(),
  741. SVT.getVectorNumElements() / 2));
  742. if (LA == TypeScalarizeVector)
  743. return LegalizeKind(LA, SVT.getVectorElementType());
  744. return LegalizeKind(LA, NVT);
  745. }
  746. // Handle Extended Scalar Types.
  747. if (!VT.isVector()) {
  748. assert(VT.isInteger() && "Float types must be simple");
  749. unsigned BitSize = VT.getSizeInBits();
  750. // First promote to a power-of-two size, then expand if necessary.
  751. if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
  752. EVT NVT = VT.getRoundIntegerType(Context);
  753. assert(NVT != VT && "Unable to round integer VT");
  754. LegalizeKind NextStep = getTypeConversion(Context, NVT);
  755. // Avoid multi-step promotion.
  756. if (NextStep.first == TypePromoteInteger)
  757. return NextStep;
  758. // Return rounded integer type.
  759. return LegalizeKind(TypePromoteInteger, NVT);
  760. }
  761. return LegalizeKind(TypeExpandInteger,
  762. EVT::getIntegerVT(Context, VT.getSizeInBits() / 2));
  763. }
  764. // Handle vector types.
  765. unsigned NumElts = VT.getVectorNumElements();
  766. EVT EltVT = VT.getVectorElementType();
  767. // Vectors with only one element are always scalarized.
  768. if (NumElts == 1)
  769. return LegalizeKind(TypeScalarizeVector, EltVT);
  770. // Try to widen vector elements until the element type is a power of two and
  771. // promote it to a legal type later on, for example:
  772. // <3 x i8> -> <4 x i8> -> <4 x i32>
  773. if (EltVT.isInteger()) {
  774. // Vectors with a number of elements that is not a power of two are always
  775. // widened, for example <3 x i8> -> <4 x i8>.
  776. if (!VT.isPow2VectorType()) {
  777. NumElts = (unsigned)NextPowerOf2(NumElts);
  778. EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
  779. return LegalizeKind(TypeWidenVector, NVT);
  780. }
  781. // Examine the element type.
  782. LegalizeKind LK = getTypeConversion(Context, EltVT);
  783. // If type is to be expanded, split the vector.
  784. // <4 x i140> -> <2 x i140>
  785. if (LK.first == TypeExpandInteger)
  786. return LegalizeKind(TypeSplitVector,
  787. EVT::getVectorVT(Context, EltVT, NumElts / 2));
  788. // Promote the integer element types until a legal vector type is found
  789. // or until the element integer type is too big. If a legal type was not
  790. // found, fallback to the usual mechanism of widening/splitting the
  791. // vector.
  792. EVT OldEltVT = EltVT;
  793. while (true) {
  794. // Increase the bitwidth of the element to the next pow-of-two
  795. // (which is greater than 8 bits).
  796. EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits())
  797. .getRoundIntegerType(Context);
  798. // Stop trying when getting a non-simple element type.
  799. // Note that vector elements may be greater than legal vector element
  800. // types. Example: X86 XMM registers hold 64bit element on 32bit
  801. // systems.
  802. if (!EltVT.isSimple())
  803. break;
  804. // Build a new vector type and check if it is legal.
  805. MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
  806. // Found a legal promoted vector type.
  807. if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
  808. return LegalizeKind(TypePromoteInteger,
  809. EVT::getVectorVT(Context, EltVT, NumElts));
  810. }
  811. // Reset the type to the unexpanded type if we did not find a legal vector
  812. // type with a promoted vector element type.
  813. EltVT = OldEltVT;
  814. }
  815. // Try to widen the vector until a legal type is found.
  816. // If there is no wider legal type, split the vector.
  817. while (true) {
  818. // Round up to the next power of 2.
  819. NumElts = (unsigned)NextPowerOf2(NumElts);
  820. // If there is no simple vector type with this many elements then there
  821. // cannot be a larger legal vector type. Note that this assumes that
  822. // there are no skipped intermediate vector types in the simple types.
  823. if (!EltVT.isSimple())
  824. break;
  825. MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
  826. if (LargerVector == MVT())
  827. break;
  828. // If this type is legal then widen the vector.
  829. if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
  830. return LegalizeKind(TypeWidenVector, LargerVector);
  831. }
  832. // Widen odd vectors to next power of two.
  833. if (!VT.isPow2VectorType()) {
  834. EVT NVT = VT.getPow2VectorType(Context);
  835. return LegalizeKind(TypeWidenVector, NVT);
  836. }
  837. // Vectors with illegal element types are expanded.
  838. EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2);
  839. return LegalizeKind(TypeSplitVector, NVT);
  840. }
  841. static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
  842. unsigned &NumIntermediates,
  843. MVT &RegisterVT,
  844. TargetLoweringBase *TLI) {
  845. // Figure out the right, legal destination reg to copy into.
  846. unsigned NumElts = VT.getVectorNumElements();
  847. MVT EltTy = VT.getVectorElementType();
  848. unsigned NumVectorRegs = 1;
  849. // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
  850. // could break down into LHS/RHS like LegalizeDAG does.
  851. if (!isPowerOf2_32(NumElts)) {
  852. NumVectorRegs = NumElts;
  853. NumElts = 1;
  854. }
  855. // Divide the input until we get to a supported size. This will always
  856. // end with a scalar if the target doesn't support vectors.
  857. while (NumElts > 1 && !TLI->isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) {
  858. NumElts >>= 1;
  859. NumVectorRegs <<= 1;
  860. }
  861. NumIntermediates = NumVectorRegs;
  862. MVT NewVT = MVT::getVectorVT(EltTy, NumElts);
  863. if (!TLI->isTypeLegal(NewVT))
  864. NewVT = EltTy;
  865. IntermediateVT = NewVT;
  866. unsigned NewVTSize = NewVT.getSizeInBits();
  867. // Convert sizes such as i33 to i64.
  868. if (!isPowerOf2_32(NewVTSize))
  869. NewVTSize = NextPowerOf2(NewVTSize);
  870. MVT DestVT = TLI->getRegisterType(NewVT);
  871. RegisterVT = DestVT;
  872. if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
  873. return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
  874. // Otherwise, promotion or legal types use the same number of registers as
  875. // the vector decimated to the appropriate level.
  876. return NumVectorRegs;
  877. }
  878. /// isLegalRC - Return true if the value types that can be represented by the
  879. /// specified register class are all legal.
  880. bool TargetLoweringBase::isLegalRC(const TargetRegisterInfo &TRI,
  881. const TargetRegisterClass &RC) const {
  882. for (auto I = TRI.legalclasstypes_begin(RC); *I != MVT::Other; ++I)
  883. if (isTypeLegal(*I))
  884. return true;
  885. return false;
  886. }
  887. /// Replace/modify any TargetFrameIndex operands with a targte-dependent
  888. /// sequence of memory operands that is recognized by PrologEpilogInserter.
  889. MachineBasicBlock *
  890. TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI,
  891. MachineBasicBlock *MBB) const {
  892. MachineInstr *MI = &InitialMI;
  893. MachineFunction &MF = *MI->getMF();
  894. MachineFrameInfo &MFI = MF.getFrameInfo();
  895. // We're handling multiple types of operands here:
  896. // PATCHPOINT MetaArgs - live-in, read only, direct
  897. // STATEPOINT Deopt Spill - live-through, read only, indirect
  898. // STATEPOINT Deopt Alloca - live-through, read only, direct
  899. // (We're currently conservative and mark the deopt slots read/write in
  900. // practice.)
  901. // STATEPOINT GC Spill - live-through, read/write, indirect
  902. // STATEPOINT GC Alloca - live-through, read/write, direct
  903. // The live-in vs live-through is handled already (the live through ones are
  904. // all stack slots), but we need to handle the different type of stackmap
  905. // operands and memory effects here.
  906. // MI changes inside this loop as we grow operands.
  907. for(unsigned OperIdx = 0; OperIdx != MI->getNumOperands(); ++OperIdx) {
  908. MachineOperand &MO = MI->getOperand(OperIdx);
  909. if (!MO.isFI())
  910. continue;
  911. // foldMemoryOperand builds a new MI after replacing a single FI operand
  912. // with the canonical set of five x86 addressing-mode operands.
  913. int FI = MO.getIndex();
  914. MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc());
  915. // Copy operands before the frame-index.
  916. for (unsigned i = 0; i < OperIdx; ++i)
  917. MIB.add(MI->getOperand(i));
  918. // Add frame index operands recognized by stackmaps.cpp
  919. if (MFI.isStatepointSpillSlotObjectIndex(FI)) {
  920. // indirect-mem-ref tag, size, #FI, offset.
  921. // Used for spills inserted by StatepointLowering. This codepath is not
  922. // used for patchpoints/stackmaps at all, for these spilling is done via
  923. // foldMemoryOperand callback only.
  924. assert(MI->getOpcode() == TargetOpcode::STATEPOINT && "sanity");
  925. MIB.addImm(StackMaps::IndirectMemRefOp);
  926. MIB.addImm(MFI.getObjectSize(FI));
  927. MIB.add(MI->getOperand(OperIdx));
  928. MIB.addImm(0);
  929. } else {
  930. // direct-mem-ref tag, #FI, offset.
  931. // Used by patchpoint, and direct alloca arguments to statepoints
  932. MIB.addImm(StackMaps::DirectMemRefOp);
  933. MIB.add(MI->getOperand(OperIdx));
  934. MIB.addImm(0);
  935. }
  936. // Copy the operands after the frame index.
  937. for (unsigned i = OperIdx + 1; i != MI->getNumOperands(); ++i)
  938. MIB.add(MI->getOperand(i));
  939. // Inherit previous memory operands.
  940. MIB.cloneMemRefs(*MI);
  941. assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!");
  942. // Add a new memory operand for this FI.
  943. assert(MFI.getObjectOffset(FI) != -1);
  944. // Note: STATEPOINT MMOs are added during SelectionDAG. STACKMAP, and
  945. // PATCHPOINT should be updated to do the same. (TODO)
  946. if (MI->getOpcode() != TargetOpcode::STATEPOINT) {
  947. auto Flags = MachineMemOperand::MOLoad;
  948. MachineMemOperand *MMO = MF.getMachineMemOperand(
  949. MachinePointerInfo::getFixedStack(MF, FI), Flags,
  950. MF.getDataLayout().getPointerSize(), MFI.getObjectAlignment(FI));
  951. MIB->addMemOperand(MF, MMO);
  952. }
  953. // Replace the instruction and update the operand index.
  954. MBB->insert(MachineBasicBlock::iterator(MI), MIB);
  955. OperIdx += (MIB->getNumOperands() - MI->getNumOperands()) - 1;
  956. MI->eraseFromParent();
  957. MI = MIB;
  958. }
  959. return MBB;
  960. }
  961. MachineBasicBlock *
  962. TargetLoweringBase::emitXRayCustomEvent(MachineInstr &MI,
  963. MachineBasicBlock *MBB) const {
  964. assert(MI.getOpcode() == TargetOpcode::PATCHABLE_EVENT_CALL &&
  965. "Called emitXRayCustomEvent on the wrong MI!");
  966. auto &MF = *MI.getMF();
  967. auto MIB = BuildMI(MF, MI.getDebugLoc(), MI.getDesc());
  968. for (unsigned OpIdx = 0; OpIdx != MI.getNumOperands(); ++OpIdx)
  969. MIB.add(MI.getOperand(OpIdx));
  970. MBB->insert(MachineBasicBlock::iterator(MI), MIB);
  971. MI.eraseFromParent();
  972. return MBB;
  973. }
  974. MachineBasicBlock *
  975. TargetLoweringBase::emitXRayTypedEvent(MachineInstr &MI,
  976. MachineBasicBlock *MBB) const {
  977. assert(MI.getOpcode() == TargetOpcode::PATCHABLE_TYPED_EVENT_CALL &&
  978. "Called emitXRayTypedEvent on the wrong MI!");
  979. auto &MF = *MI.getMF();
  980. auto MIB = BuildMI(MF, MI.getDebugLoc(), MI.getDesc());
  981. for (unsigned OpIdx = 0; OpIdx != MI.getNumOperands(); ++OpIdx)
  982. MIB.add(MI.getOperand(OpIdx));
  983. MBB->insert(MachineBasicBlock::iterator(MI), MIB);
  984. MI.eraseFromParent();
  985. return MBB;
  986. }
  987. /// findRepresentativeClass - Return the largest legal super-reg register class
  988. /// of the register class for the specified type and its associated "cost".
  989. // This function is in TargetLowering because it uses RegClassForVT which would
  990. // need to be moved to TargetRegisterInfo and would necessitate moving
  991. // isTypeLegal over as well - a massive change that would just require
  992. // TargetLowering having a TargetRegisterInfo class member that it would use.
  993. std::pair<const TargetRegisterClass *, uint8_t>
  994. TargetLoweringBase::findRepresentativeClass(const TargetRegisterInfo *TRI,
  995. MVT VT) const {
  996. const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
  997. if (!RC)
  998. return std::make_pair(RC, 0);
  999. // Compute the set of all super-register classes.
  1000. BitVector SuperRegRC(TRI->getNumRegClasses());
  1001. for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI)
  1002. SuperRegRC.setBitsInMask(RCI.getMask());
  1003. // Find the first legal register class with the largest spill size.
  1004. const TargetRegisterClass *BestRC = RC;
  1005. for (unsigned i : SuperRegRC.set_bits()) {
  1006. const TargetRegisterClass *SuperRC = TRI->getRegClass(i);
  1007. // We want the largest possible spill size.
  1008. if (TRI->getSpillSize(*SuperRC) <= TRI->getSpillSize(*BestRC))
  1009. continue;
  1010. if (!isLegalRC(*TRI, *SuperRC))
  1011. continue;
  1012. BestRC = SuperRC;
  1013. }
  1014. return std::make_pair(BestRC, 1);
  1015. }
  1016. /// computeRegisterProperties - Once all of the register classes are added,
  1017. /// this allows us to compute derived properties we expose.
  1018. void TargetLoweringBase::computeRegisterProperties(
  1019. const TargetRegisterInfo *TRI) {
  1020. static_assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE,
  1021. "Too many value types for ValueTypeActions to hold!");
  1022. // Everything defaults to needing one register.
  1023. for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
  1024. NumRegistersForVT[i] = 1;
  1025. RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i;
  1026. }
  1027. // ...except isVoid, which doesn't need any registers.
  1028. NumRegistersForVT[MVT::isVoid] = 0;
  1029. // Find the largest integer register class.
  1030. unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
  1031. for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg)
  1032. assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
  1033. // Every integer value type larger than this largest register takes twice as
  1034. // many registers to represent as the previous ValueType.
  1035. for (unsigned ExpandedReg = LargestIntReg + 1;
  1036. ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) {
  1037. NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
  1038. RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
  1039. TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1);
  1040. ValueTypeActions.setTypeAction((MVT::SimpleValueType)ExpandedReg,
  1041. TypeExpandInteger);
  1042. }
  1043. // Inspect all of the ValueType's smaller than the largest integer
  1044. // register to see which ones need promotion.
  1045. unsigned LegalIntReg = LargestIntReg;
  1046. for (unsigned IntReg = LargestIntReg - 1;
  1047. IntReg >= (unsigned)MVT::i1; --IntReg) {
  1048. MVT IVT = (MVT::SimpleValueType)IntReg;
  1049. if (isTypeLegal(IVT)) {
  1050. LegalIntReg = IntReg;
  1051. } else {
  1052. RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
  1053. (MVT::SimpleValueType)LegalIntReg;
  1054. ValueTypeActions.setTypeAction(IVT, TypePromoteInteger);
  1055. }
  1056. }
  1057. // ppcf128 type is really two f64's.
  1058. if (!isTypeLegal(MVT::ppcf128)) {
  1059. if (isTypeLegal(MVT::f64)) {
  1060. NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64];
  1061. RegisterTypeForVT[MVT::ppcf128] = MVT::f64;
  1062. TransformToType[MVT::ppcf128] = MVT::f64;
  1063. ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat);
  1064. } else {
  1065. NumRegistersForVT[MVT::ppcf128] = NumRegistersForVT[MVT::i128];
  1066. RegisterTypeForVT[MVT::ppcf128] = RegisterTypeForVT[MVT::i128];
  1067. TransformToType[MVT::ppcf128] = MVT::i128;
  1068. ValueTypeActions.setTypeAction(MVT::ppcf128, TypeSoftenFloat);
  1069. }
  1070. }
  1071. // Decide how to handle f128. If the target does not have native f128 support,
  1072. // expand it to i128 and we will be generating soft float library calls.
  1073. if (!isTypeLegal(MVT::f128)) {
  1074. NumRegistersForVT[MVT::f128] = NumRegistersForVT[MVT::i128];
  1075. RegisterTypeForVT[MVT::f128] = RegisterTypeForVT[MVT::i128];
  1076. TransformToType[MVT::f128] = MVT::i128;
  1077. ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
  1078. }
  1079. // Decide how to handle f64. If the target does not have native f64 support,
  1080. // expand it to i64 and we will be generating soft float library calls.
  1081. if (!isTypeLegal(MVT::f64)) {
  1082. NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64];
  1083. RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64];
  1084. TransformToType[MVT::f64] = MVT::i64;
  1085. ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat);
  1086. }
  1087. // Decide how to handle f32. If the target does not have native f32 support,
  1088. // expand it to i32 and we will be generating soft float library calls.
  1089. if (!isTypeLegal(MVT::f32)) {
  1090. NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32];
  1091. RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32];
  1092. TransformToType[MVT::f32] = MVT::i32;
  1093. ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat);
  1094. }
  1095. // Decide how to handle f16. If the target does not have native f16 support,
  1096. // promote it to f32, because there are no f16 library calls (except for
  1097. // conversions).
  1098. if (!isTypeLegal(MVT::f16)) {
  1099. NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32];
  1100. RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32];
  1101. TransformToType[MVT::f16] = MVT::f32;
  1102. ValueTypeActions.setTypeAction(MVT::f16, TypePromoteFloat);
  1103. }
  1104. // Loop over all of the vector value types to see which need transformations.
  1105. for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
  1106. i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
  1107. MVT VT = (MVT::SimpleValueType) i;
  1108. if (isTypeLegal(VT))
  1109. continue;
  1110. MVT EltVT = VT.getVectorElementType();
  1111. unsigned NElts = VT.getVectorNumElements();
  1112. bool IsLegalWiderType = false;
  1113. LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT);
  1114. switch (PreferredAction) {
  1115. case TypePromoteInteger:
  1116. // Try to promote the elements of integer vectors. If no legal
  1117. // promotion was found, fall through to the widen-vector method.
  1118. for (unsigned nVT = i + 1; nVT <= MVT::LAST_INTEGER_VECTOR_VALUETYPE; ++nVT) {
  1119. MVT SVT = (MVT::SimpleValueType) nVT;
  1120. // Promote vectors of integers to vectors with the same number
  1121. // of elements, with a wider element type.
  1122. if (SVT.getScalarSizeInBits() > EltVT.getSizeInBits() &&
  1123. SVT.getVectorNumElements() == NElts && isTypeLegal(SVT)) {
  1124. TransformToType[i] = SVT;
  1125. RegisterTypeForVT[i] = SVT;
  1126. NumRegistersForVT[i] = 1;
  1127. ValueTypeActions.setTypeAction(VT, TypePromoteInteger);
  1128. IsLegalWiderType = true;
  1129. break;
  1130. }
  1131. }
  1132. if (IsLegalWiderType)
  1133. break;
  1134. LLVM_FALLTHROUGH;
  1135. case TypeWidenVector:
  1136. // Try to widen the vector.
  1137. for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
  1138. MVT SVT = (MVT::SimpleValueType) nVT;
  1139. if (SVT.getVectorElementType() == EltVT
  1140. && SVT.getVectorNumElements() > NElts && isTypeLegal(SVT)) {
  1141. TransformToType[i] = SVT;
  1142. RegisterTypeForVT[i] = SVT;
  1143. NumRegistersForVT[i] = 1;
  1144. ValueTypeActions.setTypeAction(VT, TypeWidenVector);
  1145. IsLegalWiderType = true;
  1146. break;
  1147. }
  1148. }
  1149. if (IsLegalWiderType)
  1150. break;
  1151. LLVM_FALLTHROUGH;
  1152. case TypeSplitVector:
  1153. case TypeScalarizeVector: {
  1154. MVT IntermediateVT;
  1155. MVT RegisterVT;
  1156. unsigned NumIntermediates;
  1157. NumRegistersForVT[i] = getVectorTypeBreakdownMVT(VT, IntermediateVT,
  1158. NumIntermediates, RegisterVT, this);
  1159. RegisterTypeForVT[i] = RegisterVT;
  1160. MVT NVT = VT.getPow2VectorType();
  1161. if (NVT == VT) {
  1162. // Type is already a power of 2. The default action is to split.
  1163. TransformToType[i] = MVT::Other;
  1164. if (PreferredAction == TypeScalarizeVector)
  1165. ValueTypeActions.setTypeAction(VT, TypeScalarizeVector);
  1166. else if (PreferredAction == TypeSplitVector)
  1167. ValueTypeActions.setTypeAction(VT, TypeSplitVector);
  1168. else
  1169. // Set type action according to the number of elements.
  1170. ValueTypeActions.setTypeAction(VT, NElts == 1 ? TypeScalarizeVector
  1171. : TypeSplitVector);
  1172. } else {
  1173. TransformToType[i] = NVT;
  1174. ValueTypeActions.setTypeAction(VT, TypeWidenVector);
  1175. }
  1176. break;
  1177. }
  1178. default:
  1179. llvm_unreachable("Unknown vector legalization action!");
  1180. }
  1181. }
  1182. // Determine the 'representative' register class for each value type.
  1183. // An representative register class is the largest (meaning one which is
  1184. // not a sub-register class / subreg register class) legal register class for
  1185. // a group of value types. For example, on i386, i8, i16, and i32
  1186. // representative would be GR32; while on x86_64 it's GR64.
  1187. for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
  1188. const TargetRegisterClass* RRC;
  1189. uint8_t Cost;
  1190. std::tie(RRC, Cost) = findRepresentativeClass(TRI, (MVT::SimpleValueType)i);
  1191. RepRegClassForVT[i] = RRC;
  1192. RepRegClassCostForVT[i] = Cost;
  1193. }
  1194. }
  1195. EVT TargetLoweringBase::getSetCCResultType(const DataLayout &DL, LLVMContext &,
  1196. EVT VT) const {
  1197. assert(!VT.isVector() && "No default SetCC type for vectors!");
  1198. return getPointerTy(DL).SimpleTy;
  1199. }
  1200. MVT::SimpleValueType TargetLoweringBase::getCmpLibcallReturnType() const {
  1201. return MVT::i32; // return the default value
  1202. }
  1203. /// getVectorTypeBreakdown - Vector types are broken down into some number of
  1204. /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32
  1205. /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
  1206. /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86.
  1207. ///
  1208. /// This method returns the number of registers needed, and the VT for each
  1209. /// register. It also returns the VT and quantity of the intermediate values
  1210. /// before they are promoted/expanded.
  1211. unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
  1212. EVT &IntermediateVT,
  1213. unsigned &NumIntermediates,
  1214. MVT &RegisterVT) const {
  1215. unsigned NumElts = VT.getVectorNumElements();
  1216. // If there is a wider vector type with the same element type as this one,
  1217. // or a promoted vector type that has the same number of elements which
  1218. // are wider, then we should convert to that legal vector type.
  1219. // This handles things like <2 x float> -> <4 x float> and
  1220. // <4 x i1> -> <4 x i32>.
  1221. LegalizeTypeAction TA = getTypeAction(Context, VT);
  1222. if (NumElts != 1 && (TA == TypeWidenVector || TA == TypePromoteInteger)) {
  1223. EVT RegisterEVT = getTypeToTransformTo(Context, VT);
  1224. if (isTypeLegal(RegisterEVT)) {
  1225. IntermediateVT = RegisterEVT;
  1226. RegisterVT = RegisterEVT.getSimpleVT();
  1227. NumIntermediates = 1;
  1228. return 1;
  1229. }
  1230. }
  1231. // Figure out the right, legal destination reg to copy into.
  1232. EVT EltTy = VT.getVectorElementType();
  1233. unsigned NumVectorRegs = 1;
  1234. // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
  1235. // could break down into LHS/RHS like LegalizeDAG does.
  1236. if (!isPowerOf2_32(NumElts)) {
  1237. NumVectorRegs = NumElts;
  1238. NumElts = 1;
  1239. }
  1240. // Divide the input until we get to a supported size. This will always
  1241. // end with a scalar if the target doesn't support vectors.
  1242. while (NumElts > 1 && !isTypeLegal(
  1243. EVT::getVectorVT(Context, EltTy, NumElts))) {
  1244. NumElts >>= 1;
  1245. NumVectorRegs <<= 1;
  1246. }
  1247. NumIntermediates = NumVectorRegs;
  1248. EVT NewVT = EVT::getVectorVT(Context, EltTy, NumElts);
  1249. if (!isTypeLegal(NewVT))
  1250. NewVT = EltTy;
  1251. IntermediateVT = NewVT;
  1252. MVT DestVT = getRegisterType(Context, NewVT);
  1253. RegisterVT = DestVT;
  1254. unsigned NewVTSize = NewVT.getSizeInBits();
  1255. // Convert sizes such as i33 to i64.
  1256. if (!isPowerOf2_32(NewVTSize))
  1257. NewVTSize = NextPowerOf2(NewVTSize);
  1258. if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
  1259. return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
  1260. // Otherwise, promotion or legal types use the same number of registers as
  1261. // the vector decimated to the appropriate level.
  1262. return NumVectorRegs;
  1263. }
  1264. /// Get the EVTs and ArgFlags collections that represent the legalized return
  1265. /// type of the given function. This does not require a DAG or a return value,
  1266. /// and is suitable for use before any DAGs for the function are constructed.
  1267. /// TODO: Move this out of TargetLowering.cpp.
  1268. void llvm::GetReturnInfo(CallingConv::ID CC, Type *ReturnType,
  1269. AttributeList attr,
  1270. SmallVectorImpl<ISD::OutputArg> &Outs,
  1271. const TargetLowering &TLI, const DataLayout &DL) {
  1272. SmallVector<EVT, 4> ValueVTs;
  1273. ComputeValueVTs(TLI, DL, ReturnType, ValueVTs);
  1274. unsigned NumValues = ValueVTs.size();
  1275. if (NumValues == 0) return;
  1276. for (unsigned j = 0, f = NumValues; j != f; ++j) {
  1277. EVT VT = ValueVTs[j];
  1278. ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
  1279. if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
  1280. ExtendKind = ISD::SIGN_EXTEND;
  1281. else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt))
  1282. ExtendKind = ISD::ZERO_EXTEND;
  1283. // FIXME: C calling convention requires the return type to be promoted to
  1284. // at least 32-bit. But this is not necessary for non-C calling
  1285. // conventions. The frontend should mark functions whose return values
  1286. // require promoting with signext or zeroext attributes.
  1287. if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
  1288. MVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
  1289. if (VT.bitsLT(MinVT))
  1290. VT = MinVT;
  1291. }
  1292. unsigned NumParts =
  1293. TLI.getNumRegistersForCallingConv(ReturnType->getContext(), CC, VT);
  1294. MVT PartVT =
  1295. TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), CC, VT);
  1296. // 'inreg' on function refers to return value
  1297. ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
  1298. if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::InReg))
  1299. Flags.setInReg();
  1300. // Propagate extension type if any
  1301. if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
  1302. Flags.setSExt();
  1303. else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt))
  1304. Flags.setZExt();
  1305. for (unsigned i = 0; i < NumParts; ++i)
  1306. Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isFixed=*/true, 0, 0));
  1307. }
  1308. }
  1309. /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
  1310. /// function arguments in the caller parameter area. This is the actual
  1311. /// alignment, not its logarithm.
  1312. unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty,
  1313. const DataLayout &DL) const {
  1314. return DL.getABITypeAlignment(Ty);
  1315. }
  1316. bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
  1317. const DataLayout &DL, EVT VT,
  1318. unsigned AddrSpace,
  1319. unsigned Alignment,
  1320. bool *Fast) const {
  1321. // Check if the specified alignment is sufficient based on the data layout.
  1322. // TODO: While using the data layout works in practice, a better solution
  1323. // would be to implement this check directly (make this a virtual function).
  1324. // For example, the ABI alignment may change based on software platform while
  1325. // this function should only be affected by hardware implementation.
  1326. Type *Ty = VT.getTypeForEVT(Context);
  1327. if (Alignment >= DL.getABITypeAlignment(Ty)) {
  1328. // Assume that an access that meets the ABI-specified alignment is fast.
  1329. if (Fast != nullptr)
  1330. *Fast = true;
  1331. return true;
  1332. }
  1333. // This is a misaligned access.
  1334. return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Fast);
  1335. }
  1336. bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
  1337. const DataLayout &DL, EVT VT,
  1338. const MachineMemOperand &MMO,
  1339. bool *Fast) const {
  1340. return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(),
  1341. MMO.getAlignment(), Fast);
  1342. }
  1343. BranchProbability TargetLoweringBase::getPredictableBranchThreshold() const {
  1344. return BranchProbability(MinPercentageForPredictableBranch, 100);
  1345. }
  1346. //===----------------------------------------------------------------------===//
  1347. // TargetTransformInfo Helpers
  1348. //===----------------------------------------------------------------------===//
  1349. int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const {
  1350. enum InstructionOpcodes {
  1351. #define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM,
  1352. #define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM
  1353. #include "llvm/IR/Instruction.def"
  1354. };
  1355. switch (static_cast<InstructionOpcodes>(Opcode)) {
  1356. case Ret: return 0;
  1357. case Br: return 0;
  1358. case Switch: return 0;
  1359. case IndirectBr: return 0;
  1360. case Invoke: return 0;
  1361. case CallBr: return 0;
  1362. case Resume: return 0;
  1363. case Unreachable: return 0;
  1364. case CleanupRet: return 0;
  1365. case CatchRet: return 0;
  1366. case CatchPad: return 0;
  1367. case CatchSwitch: return 0;
  1368. case CleanupPad: return 0;
  1369. case FNeg: return ISD::FNEG;
  1370. case Add: return ISD::ADD;
  1371. case FAdd: return ISD::FADD;
  1372. case Sub: return ISD::SUB;
  1373. case FSub: return ISD::FSUB;
  1374. case Mul: return ISD::MUL;
  1375. case FMul: return ISD::FMUL;
  1376. case UDiv: return ISD::UDIV;
  1377. case SDiv: return ISD::SDIV;
  1378. case FDiv: return ISD::FDIV;
  1379. case URem: return ISD::UREM;
  1380. case SRem: return ISD::SREM;
  1381. case FRem: return ISD::FREM;
  1382. case Shl: return ISD::SHL;
  1383. case LShr: return ISD::SRL;
  1384. case AShr: return ISD::SRA;
  1385. case And: return ISD::AND;
  1386. case Or: return ISD::OR;
  1387. case Xor: return ISD::XOR;
  1388. case Alloca: return 0;
  1389. case Load: return ISD::LOAD;
  1390. case Store: return ISD::STORE;
  1391. case GetElementPtr: return 0;
  1392. case Fence: return 0;
  1393. case AtomicCmpXchg: return 0;
  1394. case AtomicRMW: return 0;
  1395. case Trunc: return ISD::TRUNCATE;
  1396. case ZExt: return ISD::ZERO_EXTEND;
  1397. case SExt: return ISD::SIGN_EXTEND;
  1398. case FPToUI: return ISD::FP_TO_UINT;
  1399. case FPToSI: return ISD::FP_TO_SINT;
  1400. case UIToFP: return ISD::UINT_TO_FP;
  1401. case SIToFP: return ISD::SINT_TO_FP;
  1402. case FPTrunc: return ISD::FP_ROUND;
  1403. case FPExt: return ISD::FP_EXTEND;
  1404. case PtrToInt: return ISD::BITCAST;
  1405. case IntToPtr: return ISD::BITCAST;
  1406. case BitCast: return ISD::BITCAST;
  1407. case AddrSpaceCast: return ISD::ADDRSPACECAST;
  1408. case ICmp: return ISD::SETCC;
  1409. case FCmp: return ISD::SETCC;
  1410. case PHI: return 0;
  1411. case Call: return 0;
  1412. case Select: return ISD::SELECT;
  1413. case UserOp1: return 0;
  1414. case UserOp2: return 0;
  1415. case VAArg: return 0;
  1416. case ExtractElement: return ISD::EXTRACT_VECTOR_ELT;
  1417. case InsertElement: return ISD::INSERT_VECTOR_ELT;
  1418. case ShuffleVector: return ISD::VECTOR_SHUFFLE;
  1419. case ExtractValue: return ISD::MERGE_VALUES;
  1420. case InsertValue: return ISD::MERGE_VALUES;
  1421. case LandingPad: return 0;
  1422. }
  1423. llvm_unreachable("Unknown instruction type encountered!");
  1424. }
  1425. std::pair<int, MVT>
  1426. TargetLoweringBase::getTypeLegalizationCost(const DataLayout &DL,
  1427. Type *Ty) const {
  1428. LLVMContext &C = Ty->getContext();
  1429. EVT MTy = getValueType(DL, Ty);
  1430. int Cost = 1;
  1431. // We keep legalizing the type until we find a legal kind. We assume that
  1432. // the only operation that costs anything is the split. After splitting
  1433. // we need to handle two types.
  1434. while (true) {
  1435. LegalizeKind LK = getTypeConversion(C, MTy);
  1436. if (LK.first == TypeLegal)
  1437. return std::make_pair(Cost, MTy.getSimpleVT());
  1438. if (LK.first == TypeSplitVector || LK.first == TypeExpandInteger)
  1439. Cost *= 2;
  1440. // Do not loop with f128 type.
  1441. if (MTy == LK.second)
  1442. return std::make_pair(Cost, MTy.getSimpleVT());
  1443. // Keep legalizing the type.
  1444. MTy = LK.second;
  1445. }
  1446. }
  1447. Value *TargetLoweringBase::getDefaultSafeStackPointerLocation(IRBuilder<> &IRB,
  1448. bool UseTLS) const {
  1449. // compiler-rt provides a variable with a magic name. Targets that do not
  1450. // link with compiler-rt may also provide such a variable.
  1451. Module *M = IRB.GetInsertBlock()->getParent()->getParent();
  1452. const char *UnsafeStackPtrVar = "__safestack_unsafe_stack_ptr";
  1453. auto UnsafeStackPtr =
  1454. dyn_cast_or_null<GlobalVariable>(M->getNamedValue(UnsafeStackPtrVar));
  1455. Type *StackPtrTy = Type::getInt8PtrTy(M->getContext());
  1456. if (!UnsafeStackPtr) {
  1457. auto TLSModel = UseTLS ?
  1458. GlobalValue::InitialExecTLSModel :
  1459. GlobalValue::NotThreadLocal;
  1460. // The global variable is not defined yet, define it ourselves.
  1461. // We use the initial-exec TLS model because we do not support the
  1462. // variable living anywhere other than in the main executable.
  1463. UnsafeStackPtr = new GlobalVariable(
  1464. *M, StackPtrTy, false, GlobalValue::ExternalLinkage, nullptr,
  1465. UnsafeStackPtrVar, nullptr, TLSModel);
  1466. } else {
  1467. // The variable exists, check its type and attributes.
  1468. if (UnsafeStackPtr->getValueType() != StackPtrTy)
  1469. report_fatal_error(Twine(UnsafeStackPtrVar) + " must have void* type");
  1470. if (UseTLS != UnsafeStackPtr->isThreadLocal())
  1471. report_fatal_error(Twine(UnsafeStackPtrVar) + " must " +
  1472. (UseTLS ? "" : "not ") + "be thread-local");
  1473. }
  1474. return UnsafeStackPtr;
  1475. }
  1476. Value *TargetLoweringBase::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
  1477. if (!TM.getTargetTriple().isAndroid())
  1478. return getDefaultSafeStackPointerLocation(IRB, true);
  1479. // Android provides a libc function to retrieve the address of the current
  1480. // thread's unsafe stack pointer.
  1481. Module *M = IRB.GetInsertBlock()->getParent()->getParent();
  1482. Type *StackPtrTy = Type::getInt8PtrTy(M->getContext());
  1483. FunctionCallee Fn = M->getOrInsertFunction("__safestack_pointer_address",
  1484. StackPtrTy->getPointerTo(0));
  1485. return IRB.CreateCall(Fn);
  1486. }
  1487. //===----------------------------------------------------------------------===//
  1488. // Loop Strength Reduction hooks
  1489. //===----------------------------------------------------------------------===//
  1490. /// isLegalAddressingMode - Return true if the addressing mode represented
  1491. /// by AM is legal for this target, for a load/store of the specified type.
  1492. bool TargetLoweringBase::isLegalAddressingMode(const DataLayout &DL,
  1493. const AddrMode &AM, Type *Ty,
  1494. unsigned AS, Instruction *I) const {
  1495. // The default implementation of this implements a conservative RISCy, r+r and
  1496. // r+i addr mode.
  1497. // Allows a sign-extended 16-bit immediate field.
  1498. if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
  1499. return false;
  1500. // No global is ever allowed as a base.
  1501. if (AM.BaseGV)
  1502. return false;
  1503. // Only support r+r,
  1504. switch (AM.Scale) {
  1505. case 0: // "r+i" or just "i", depending on HasBaseReg.
  1506. break;
  1507. case 1:
  1508. if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
  1509. return false;
  1510. // Otherwise we have r+r or r+i.
  1511. break;
  1512. case 2:
  1513. if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
  1514. return false;
  1515. // Allow 2*r as r+r.
  1516. break;
  1517. default: // Don't allow n * r
  1518. return false;
  1519. }
  1520. return true;
  1521. }
  1522. //===----------------------------------------------------------------------===//
  1523. // Stack Protector
  1524. //===----------------------------------------------------------------------===//
  1525. // For OpenBSD return its special guard variable. Otherwise return nullptr,
  1526. // so that SelectionDAG handle SSP.
  1527. Value *TargetLoweringBase::getIRStackGuard(IRBuilder<> &IRB) const {
  1528. if (getTargetMachine().getTargetTriple().isOSOpenBSD()) {
  1529. Module &M = *IRB.GetInsertBlock()->getParent()->getParent();
  1530. PointerType *PtrTy = Type::getInt8PtrTy(M.getContext());
  1531. return M.getOrInsertGlobal("__guard_local", PtrTy);
  1532. }
  1533. return nullptr;
  1534. }
  1535. // Currently only support "standard" __stack_chk_guard.
  1536. // TODO: add LOAD_STACK_GUARD support.
  1537. void TargetLoweringBase::insertSSPDeclarations(Module &M) const {
  1538. if (!M.getNamedValue("__stack_chk_guard"))
  1539. new GlobalVariable(M, Type::getInt8PtrTy(M.getContext()), false,
  1540. GlobalVariable::ExternalLinkage,
  1541. nullptr, "__stack_chk_guard");
  1542. }
  1543. // Currently only support "standard" __stack_chk_guard.
  1544. // TODO: add LOAD_STACK_GUARD support.
  1545. Value *TargetLoweringBase::getSDagStackGuard(const Module &M) const {
  1546. return M.getNamedValue("__stack_chk_guard");
  1547. }
  1548. Function *TargetLoweringBase::getSSPStackGuardCheck(const Module &M) const {
  1549. return nullptr;
  1550. }
  1551. unsigned TargetLoweringBase::getMinimumJumpTableEntries() const {
  1552. return MinimumJumpTableEntries;
  1553. }
  1554. void TargetLoweringBase::setMinimumJumpTableEntries(unsigned Val) {
  1555. MinimumJumpTableEntries = Val;
  1556. }
  1557. unsigned TargetLoweringBase::getMinimumJumpTableDensity(bool OptForSize) const {
  1558. return OptForSize ? OptsizeJumpTableDensity : JumpTableDensity;
  1559. }
  1560. unsigned TargetLoweringBase::getMaximumJumpTableSize() const {
  1561. return MaximumJumpTableSize;
  1562. }
  1563. void TargetLoweringBase::setMaximumJumpTableSize(unsigned Val) {
  1564. MaximumJumpTableSize = Val;
  1565. }
  1566. //===----------------------------------------------------------------------===//
  1567. // Reciprocal Estimates
  1568. //===----------------------------------------------------------------------===//
  1569. /// Get the reciprocal estimate attribute string for a function that will
  1570. /// override the target defaults.
  1571. static StringRef getRecipEstimateForFunc(MachineFunction &MF) {
  1572. const Function &F = MF.getFunction();
  1573. return F.getFnAttribute("reciprocal-estimates").getValueAsString();
  1574. }
  1575. /// Construct a string for the given reciprocal operation of the given type.
  1576. /// This string should match the corresponding option to the front-end's
  1577. /// "-mrecip" flag assuming those strings have been passed through in an
  1578. /// attribute string. For example, "vec-divf" for a division of a vXf32.
  1579. static std::string getReciprocalOpName(bool IsSqrt, EVT VT) {
  1580. std::string Name = VT.isVector() ? "vec-" : "";
  1581. Name += IsSqrt ? "sqrt" : "div";
  1582. // TODO: Handle "half" or other float types?
  1583. if (VT.getScalarType() == MVT::f64) {
  1584. Name += "d";
  1585. } else {
  1586. assert(VT.getScalarType() == MVT::f32 &&
  1587. "Unexpected FP type for reciprocal estimate");
  1588. Name += "f";
  1589. }
  1590. return Name;
  1591. }
  1592. /// Return the character position and value (a single numeric character) of a
  1593. /// customized refinement operation in the input string if it exists. Return
  1594. /// false if there is no customized refinement step count.
  1595. static bool parseRefinementStep(StringRef In, size_t &Position,
  1596. uint8_t &Value) {
  1597. const char RefStepToken = ':';
  1598. Position = In.find(RefStepToken);
  1599. if (Position == StringRef::npos)
  1600. return false;
  1601. StringRef RefStepString = In.substr(Position + 1);
  1602. // Allow exactly one numeric character for the additional refinement
  1603. // step parameter.
  1604. if (RefStepString.size() == 1) {
  1605. char RefStepChar = RefStepString[0];
  1606. if (RefStepChar >= '0' && RefStepChar <= '9') {
  1607. Value = RefStepChar - '0';
  1608. return true;
  1609. }
  1610. }
  1611. report_fatal_error("Invalid refinement step for -recip.");
  1612. }
  1613. /// For the input attribute string, return one of the ReciprocalEstimate enum
  1614. /// status values (enabled, disabled, or not specified) for this operation on
  1615. /// the specified data type.
  1616. static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override) {
  1617. if (Override.empty())
  1618. return TargetLoweringBase::ReciprocalEstimate::Unspecified;
  1619. SmallVector<StringRef, 4> OverrideVector;
  1620. Override.split(OverrideVector, ',');
  1621. unsigned NumArgs = OverrideVector.size();
  1622. // Check if "all", "none", or "default" was specified.
  1623. if (NumArgs == 1) {
  1624. // Look for an optional setting of the number of refinement steps needed
  1625. // for this type of reciprocal operation.
  1626. size_t RefPos;
  1627. uint8_t RefSteps;
  1628. if (parseRefinementStep(Override, RefPos, RefSteps)) {
  1629. // Split the string for further processing.
  1630. Override = Override.substr(0, RefPos);
  1631. }
  1632. // All reciprocal types are enabled.
  1633. if (Override == "all")
  1634. return TargetLoweringBase::ReciprocalEstimate::Enabled;
  1635. // All reciprocal types are disabled.
  1636. if (Override == "none")
  1637. return TargetLoweringBase::ReciprocalEstimate::Disabled;
  1638. // Target defaults for enablement are used.
  1639. if (Override == "default")
  1640. return TargetLoweringBase::ReciprocalEstimate::Unspecified;
  1641. }
  1642. // The attribute string may omit the size suffix ('f'/'d').
  1643. std::string VTName = getReciprocalOpName(IsSqrt, VT);
  1644. std::string VTNameNoSize = VTName;
  1645. VTNameNoSize.pop_back();
  1646. static const char DisabledPrefix = '!';
  1647. for (StringRef RecipType : OverrideVector) {
  1648. size_t RefPos;
  1649. uint8_t RefSteps;
  1650. if (parseRefinementStep(RecipType, RefPos, RefSteps))
  1651. RecipType = RecipType.substr(0, RefPos);
  1652. // Ignore the disablement token for string matching.
  1653. bool IsDisabled = RecipType[0] == DisabledPrefix;
  1654. if (IsDisabled)
  1655. RecipType = RecipType.substr(1);
  1656. if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
  1657. return IsDisabled ? TargetLoweringBase::ReciprocalEstimate::Disabled
  1658. : TargetLoweringBase::ReciprocalEstimate::Enabled;
  1659. }
  1660. return TargetLoweringBase::ReciprocalEstimate::Unspecified;
  1661. }
  1662. /// For the input attribute string, return the customized refinement step count
  1663. /// for this operation on the specified data type. If the step count does not
  1664. /// exist, return the ReciprocalEstimate enum value for unspecified.
  1665. static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override) {
  1666. if (Override.empty())
  1667. return TargetLoweringBase::ReciprocalEstimate::Unspecified;
  1668. SmallVector<StringRef, 4> OverrideVector;
  1669. Override.split(OverrideVector, ',');
  1670. unsigned NumArgs = OverrideVector.size();
  1671. // Check if "all", "default", or "none" was specified.
  1672. if (NumArgs == 1) {
  1673. // Look for an optional setting of the number of refinement steps needed
  1674. // for this type of reciprocal operation.
  1675. size_t RefPos;
  1676. uint8_t RefSteps;
  1677. if (!parseRefinementStep(Override, RefPos, RefSteps))
  1678. return TargetLoweringBase::ReciprocalEstimate::Unspecified;
  1679. // Split the string for further processing.
  1680. Override = Override.substr(0, RefPos);
  1681. assert(Override != "none" &&
  1682. "Disabled reciprocals, but specifed refinement steps?");
  1683. // If this is a general override, return the specified number of steps.
  1684. if (Override == "all" || Override == "default")
  1685. return RefSteps;
  1686. }
  1687. // The attribute string may omit the size suffix ('f'/'d').
  1688. std::string VTName = getReciprocalOpName(IsSqrt, VT);
  1689. std::string VTNameNoSize = VTName;
  1690. VTNameNoSize.pop_back();
  1691. for (StringRef RecipType : OverrideVector) {
  1692. size_t RefPos;
  1693. uint8_t RefSteps;
  1694. if (!parseRefinementStep(RecipType, RefPos, RefSteps))
  1695. continue;
  1696. RecipType = RecipType.substr(0, RefPos);
  1697. if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
  1698. return RefSteps;
  1699. }
  1700. return TargetLoweringBase::ReciprocalEstimate::Unspecified;
  1701. }
  1702. int TargetLoweringBase::getRecipEstimateSqrtEnabled(EVT VT,
  1703. MachineFunction &MF) const {
  1704. return getOpEnabled(true, VT, getRecipEstimateForFunc(MF));
  1705. }
  1706. int TargetLoweringBase::getRecipEstimateDivEnabled(EVT VT,
  1707. MachineFunction &MF) const {
  1708. return getOpEnabled(false, VT, getRecipEstimateForFunc(MF));
  1709. }
  1710. int TargetLoweringBase::getSqrtRefinementSteps(EVT VT,
  1711. MachineFunction &MF) const {
  1712. return getOpRefinementSteps(true, VT, getRecipEstimateForFunc(MF));
  1713. }
  1714. int TargetLoweringBase::getDivRefinementSteps(EVT VT,
  1715. MachineFunction &MF) const {
  1716. return getOpRefinementSteps(false, VT, getRecipEstimateForFunc(MF));
  1717. }
  1718. void TargetLoweringBase::finalizeLowering(MachineFunction &MF) const {
  1719. MF.getRegInfo().freezeReservedRegs(MF);
  1720. }