TargetLowering.cpp 105 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627
  1. //===-- TargetLowering.cpp - Implement the TargetLowering class -----------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This implements the TargetLowering class.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "llvm/Target/TargetAsmInfo.h"
  14. #include "llvm/Target/TargetLowering.h"
  15. #include "llvm/Target/TargetSubtarget.h"
  16. #include "llvm/Target/TargetData.h"
  17. #include "llvm/Target/TargetMachine.h"
  18. #include "llvm/Target/TargetRegisterInfo.h"
  19. #include "llvm/GlobalVariable.h"
  20. #include "llvm/DerivedTypes.h"
  21. #include "llvm/CodeGen/MachineFrameInfo.h"
  22. #include "llvm/CodeGen/SelectionDAG.h"
  23. #include "llvm/ADT/StringExtras.h"
  24. #include "llvm/ADT/STLExtras.h"
  25. #include "llvm/Support/ErrorHandling.h"
  26. #include "llvm/Support/MathExtras.h"
  27. using namespace llvm;
  28. namespace llvm {
  29. TLSModel::Model getTLSModel(const GlobalValue *GV, Reloc::Model reloc) {
  30. bool isLocal = GV->hasLocalLinkage();
  31. bool isDeclaration = GV->isDeclaration();
  32. // FIXME: what should we do for protected and internal visibility?
  33. // For variables, is internal different from hidden?
  34. bool isHidden = GV->hasHiddenVisibility();
  35. if (reloc == Reloc::PIC_) {
  36. if (isLocal || isHidden)
  37. return TLSModel::LocalDynamic;
  38. else
  39. return TLSModel::GeneralDynamic;
  40. } else {
  41. if (!isDeclaration || isHidden)
  42. return TLSModel::LocalExec;
  43. else
  44. return TLSModel::InitialExec;
  45. }
  46. }
  47. }
  48. /// InitLibcallNames - Set default libcall names.
  49. ///
  50. static void InitLibcallNames(const char **Names) {
  51. Names[RTLIB::SHL_I16] = "__ashlhi3";
  52. Names[RTLIB::SHL_I32] = "__ashlsi3";
  53. Names[RTLIB::SHL_I64] = "__ashldi3";
  54. Names[RTLIB::SHL_I128] = "__ashlti3";
  55. Names[RTLIB::SRL_I16] = "__lshrhi3";
  56. Names[RTLIB::SRL_I32] = "__lshrsi3";
  57. Names[RTLIB::SRL_I64] = "__lshrdi3";
  58. Names[RTLIB::SRL_I128] = "__lshrti3";
  59. Names[RTLIB::SRA_I16] = "__ashrhi3";
  60. Names[RTLIB::SRA_I32] = "__ashrsi3";
  61. Names[RTLIB::SRA_I64] = "__ashrdi3";
  62. Names[RTLIB::SRA_I128] = "__ashrti3";
  63. Names[RTLIB::MUL_I16] = "__mulhi3";
  64. Names[RTLIB::MUL_I32] = "__mulsi3";
  65. Names[RTLIB::MUL_I64] = "__muldi3";
  66. Names[RTLIB::MUL_I128] = "__multi3";
  67. Names[RTLIB::SDIV_I16] = "__divhi3";
  68. Names[RTLIB::SDIV_I32] = "__divsi3";
  69. Names[RTLIB::SDIV_I64] = "__divdi3";
  70. Names[RTLIB::SDIV_I128] = "__divti3";
  71. Names[RTLIB::UDIV_I16] = "__udivhi3";
  72. Names[RTLIB::UDIV_I32] = "__udivsi3";
  73. Names[RTLIB::UDIV_I64] = "__udivdi3";
  74. Names[RTLIB::UDIV_I128] = "__udivti3";
  75. Names[RTLIB::SREM_I16] = "__modhi3";
  76. Names[RTLIB::SREM_I32] = "__modsi3";
  77. Names[RTLIB::SREM_I64] = "__moddi3";
  78. Names[RTLIB::SREM_I128] = "__modti3";
  79. Names[RTLIB::UREM_I16] = "__umodhi3";
  80. Names[RTLIB::UREM_I32] = "__umodsi3";
  81. Names[RTLIB::UREM_I64] = "__umoddi3";
  82. Names[RTLIB::UREM_I128] = "__umodti3";
  83. Names[RTLIB::NEG_I32] = "__negsi2";
  84. Names[RTLIB::NEG_I64] = "__negdi2";
  85. Names[RTLIB::ADD_F32] = "__addsf3";
  86. Names[RTLIB::ADD_F64] = "__adddf3";
  87. Names[RTLIB::ADD_F80] = "__addxf3";
  88. Names[RTLIB::ADD_PPCF128] = "__gcc_qadd";
  89. Names[RTLIB::SUB_F32] = "__subsf3";
  90. Names[RTLIB::SUB_F64] = "__subdf3";
  91. Names[RTLIB::SUB_F80] = "__subxf3";
  92. Names[RTLIB::SUB_PPCF128] = "__gcc_qsub";
  93. Names[RTLIB::MUL_F32] = "__mulsf3";
  94. Names[RTLIB::MUL_F64] = "__muldf3";
  95. Names[RTLIB::MUL_F80] = "__mulxf3";
  96. Names[RTLIB::MUL_PPCF128] = "__gcc_qmul";
  97. Names[RTLIB::DIV_F32] = "__divsf3";
  98. Names[RTLIB::DIV_F64] = "__divdf3";
  99. Names[RTLIB::DIV_F80] = "__divxf3";
  100. Names[RTLIB::DIV_PPCF128] = "__gcc_qdiv";
  101. Names[RTLIB::REM_F32] = "fmodf";
  102. Names[RTLIB::REM_F64] = "fmod";
  103. Names[RTLIB::REM_F80] = "fmodl";
  104. Names[RTLIB::REM_PPCF128] = "fmodl";
  105. Names[RTLIB::POWI_F32] = "__powisf2";
  106. Names[RTLIB::POWI_F64] = "__powidf2";
  107. Names[RTLIB::POWI_F80] = "__powixf2";
  108. Names[RTLIB::POWI_PPCF128] = "__powitf2";
  109. Names[RTLIB::SQRT_F32] = "sqrtf";
  110. Names[RTLIB::SQRT_F64] = "sqrt";
  111. Names[RTLIB::SQRT_F80] = "sqrtl";
  112. Names[RTLIB::SQRT_PPCF128] = "sqrtl";
  113. Names[RTLIB::LOG_F32] = "logf";
  114. Names[RTLIB::LOG_F64] = "log";
  115. Names[RTLIB::LOG_F80] = "logl";
  116. Names[RTLIB::LOG_PPCF128] = "logl";
  117. Names[RTLIB::LOG2_F32] = "log2f";
  118. Names[RTLIB::LOG2_F64] = "log2";
  119. Names[RTLIB::LOG2_F80] = "log2l";
  120. Names[RTLIB::LOG2_PPCF128] = "log2l";
  121. Names[RTLIB::LOG10_F32] = "log10f";
  122. Names[RTLIB::LOG10_F64] = "log10";
  123. Names[RTLIB::LOG10_F80] = "log10l";
  124. Names[RTLIB::LOG10_PPCF128] = "log10l";
  125. Names[RTLIB::EXP_F32] = "expf";
  126. Names[RTLIB::EXP_F64] = "exp";
  127. Names[RTLIB::EXP_F80] = "expl";
  128. Names[RTLIB::EXP_PPCF128] = "expl";
  129. Names[RTLIB::EXP2_F32] = "exp2f";
  130. Names[RTLIB::EXP2_F64] = "exp2";
  131. Names[RTLIB::EXP2_F80] = "exp2l";
  132. Names[RTLIB::EXP2_PPCF128] = "exp2l";
  133. Names[RTLIB::SIN_F32] = "sinf";
  134. Names[RTLIB::SIN_F64] = "sin";
  135. Names[RTLIB::SIN_F80] = "sinl";
  136. Names[RTLIB::SIN_PPCF128] = "sinl";
  137. Names[RTLIB::COS_F32] = "cosf";
  138. Names[RTLIB::COS_F64] = "cos";
  139. Names[RTLIB::COS_F80] = "cosl";
  140. Names[RTLIB::COS_PPCF128] = "cosl";
  141. Names[RTLIB::POW_F32] = "powf";
  142. Names[RTLIB::POW_F64] = "pow";
  143. Names[RTLIB::POW_F80] = "powl";
  144. Names[RTLIB::POW_PPCF128] = "powl";
  145. Names[RTLIB::CEIL_F32] = "ceilf";
  146. Names[RTLIB::CEIL_F64] = "ceil";
  147. Names[RTLIB::CEIL_F80] = "ceill";
  148. Names[RTLIB::CEIL_PPCF128] = "ceill";
  149. Names[RTLIB::TRUNC_F32] = "truncf";
  150. Names[RTLIB::TRUNC_F64] = "trunc";
  151. Names[RTLIB::TRUNC_F80] = "truncl";
  152. Names[RTLIB::TRUNC_PPCF128] = "truncl";
  153. Names[RTLIB::RINT_F32] = "rintf";
  154. Names[RTLIB::RINT_F64] = "rint";
  155. Names[RTLIB::RINT_F80] = "rintl";
  156. Names[RTLIB::RINT_PPCF128] = "rintl";
  157. Names[RTLIB::NEARBYINT_F32] = "nearbyintf";
  158. Names[RTLIB::NEARBYINT_F64] = "nearbyint";
  159. Names[RTLIB::NEARBYINT_F80] = "nearbyintl";
  160. Names[RTLIB::NEARBYINT_PPCF128] = "nearbyintl";
  161. Names[RTLIB::FLOOR_F32] = "floorf";
  162. Names[RTLIB::FLOOR_F64] = "floor";
  163. Names[RTLIB::FLOOR_F80] = "floorl";
  164. Names[RTLIB::FLOOR_PPCF128] = "floorl";
  165. Names[RTLIB::FPEXT_F32_F64] = "__extendsfdf2";
  166. Names[RTLIB::FPROUND_F64_F32] = "__truncdfsf2";
  167. Names[RTLIB::FPROUND_F80_F32] = "__truncxfsf2";
  168. Names[RTLIB::FPROUND_PPCF128_F32] = "__trunctfsf2";
  169. Names[RTLIB::FPROUND_F80_F64] = "__truncxfdf2";
  170. Names[RTLIB::FPROUND_PPCF128_F64] = "__trunctfdf2";
  171. Names[RTLIB::FPTOSINT_F32_I8] = "__fixsfi8";
  172. Names[RTLIB::FPTOSINT_F32_I16] = "__fixsfi16";
  173. Names[RTLIB::FPTOSINT_F32_I32] = "__fixsfsi";
  174. Names[RTLIB::FPTOSINT_F32_I64] = "__fixsfdi";
  175. Names[RTLIB::FPTOSINT_F32_I128] = "__fixsfti";
  176. Names[RTLIB::FPTOSINT_F64_I32] = "__fixdfsi";
  177. Names[RTLIB::FPTOSINT_F64_I64] = "__fixdfdi";
  178. Names[RTLIB::FPTOSINT_F64_I128] = "__fixdfti";
  179. Names[RTLIB::FPTOSINT_F80_I32] = "__fixxfsi";
  180. Names[RTLIB::FPTOSINT_F80_I64] = "__fixxfdi";
  181. Names[RTLIB::FPTOSINT_F80_I128] = "__fixxfti";
  182. Names[RTLIB::FPTOSINT_PPCF128_I32] = "__fixtfsi";
  183. Names[RTLIB::FPTOSINT_PPCF128_I64] = "__fixtfdi";
  184. Names[RTLIB::FPTOSINT_PPCF128_I128] = "__fixtfti";
  185. Names[RTLIB::FPTOUINT_F32_I8] = "__fixunssfi8";
  186. Names[RTLIB::FPTOUINT_F32_I16] = "__fixunssfi16";
  187. Names[RTLIB::FPTOUINT_F32_I32] = "__fixunssfsi";
  188. Names[RTLIB::FPTOUINT_F32_I64] = "__fixunssfdi";
  189. Names[RTLIB::FPTOUINT_F32_I128] = "__fixunssfti";
  190. Names[RTLIB::FPTOUINT_F64_I32] = "__fixunsdfsi";
  191. Names[RTLIB::FPTOUINT_F64_I64] = "__fixunsdfdi";
  192. Names[RTLIB::FPTOUINT_F64_I128] = "__fixunsdfti";
  193. Names[RTLIB::FPTOUINT_F80_I32] = "__fixunsxfsi";
  194. Names[RTLIB::FPTOUINT_F80_I64] = "__fixunsxfdi";
  195. Names[RTLIB::FPTOUINT_F80_I128] = "__fixunsxfti";
  196. Names[RTLIB::FPTOUINT_PPCF128_I32] = "__fixunstfsi";
  197. Names[RTLIB::FPTOUINT_PPCF128_I64] = "__fixunstfdi";
  198. Names[RTLIB::FPTOUINT_PPCF128_I128] = "__fixunstfti";
  199. Names[RTLIB::SINTTOFP_I32_F32] = "__floatsisf";
  200. Names[RTLIB::SINTTOFP_I32_F64] = "__floatsidf";
  201. Names[RTLIB::SINTTOFP_I32_F80] = "__floatsixf";
  202. Names[RTLIB::SINTTOFP_I32_PPCF128] = "__floatsitf";
  203. Names[RTLIB::SINTTOFP_I64_F32] = "__floatdisf";
  204. Names[RTLIB::SINTTOFP_I64_F64] = "__floatdidf";
  205. Names[RTLIB::SINTTOFP_I64_F80] = "__floatdixf";
  206. Names[RTLIB::SINTTOFP_I64_PPCF128] = "__floatditf";
  207. Names[RTLIB::SINTTOFP_I128_F32] = "__floattisf";
  208. Names[RTLIB::SINTTOFP_I128_F64] = "__floattidf";
  209. Names[RTLIB::SINTTOFP_I128_F80] = "__floattixf";
  210. Names[RTLIB::SINTTOFP_I128_PPCF128] = "__floattitf";
  211. Names[RTLIB::UINTTOFP_I32_F32] = "__floatunsisf";
  212. Names[RTLIB::UINTTOFP_I32_F64] = "__floatunsidf";
  213. Names[RTLIB::UINTTOFP_I32_F80] = "__floatunsixf";
  214. Names[RTLIB::UINTTOFP_I32_PPCF128] = "__floatunsitf";
  215. Names[RTLIB::UINTTOFP_I64_F32] = "__floatundisf";
  216. Names[RTLIB::UINTTOFP_I64_F64] = "__floatundidf";
  217. Names[RTLIB::UINTTOFP_I64_F80] = "__floatundixf";
  218. Names[RTLIB::UINTTOFP_I64_PPCF128] = "__floatunditf";
  219. Names[RTLIB::UINTTOFP_I128_F32] = "__floatuntisf";
  220. Names[RTLIB::UINTTOFP_I128_F64] = "__floatuntidf";
  221. Names[RTLIB::UINTTOFP_I128_F80] = "__floatuntixf";
  222. Names[RTLIB::UINTTOFP_I128_PPCF128] = "__floatuntitf";
  223. Names[RTLIB::OEQ_F32] = "__eqsf2";
  224. Names[RTLIB::OEQ_F64] = "__eqdf2";
  225. Names[RTLIB::UNE_F32] = "__nesf2";
  226. Names[RTLIB::UNE_F64] = "__nedf2";
  227. Names[RTLIB::OGE_F32] = "__gesf2";
  228. Names[RTLIB::OGE_F64] = "__gedf2";
  229. Names[RTLIB::OLT_F32] = "__ltsf2";
  230. Names[RTLIB::OLT_F64] = "__ltdf2";
  231. Names[RTLIB::OLE_F32] = "__lesf2";
  232. Names[RTLIB::OLE_F64] = "__ledf2";
  233. Names[RTLIB::OGT_F32] = "__gtsf2";
  234. Names[RTLIB::OGT_F64] = "__gtdf2";
  235. Names[RTLIB::UO_F32] = "__unordsf2";
  236. Names[RTLIB::UO_F64] = "__unorddf2";
  237. Names[RTLIB::O_F32] = "__unordsf2";
  238. Names[RTLIB::O_F64] = "__unorddf2";
  239. Names[RTLIB::UNWIND_RESUME] = "_Unwind_Resume";
  240. }
  241. /// getFPEXT - Return the FPEXT_*_* value for the given types, or
  242. /// UNKNOWN_LIBCALL if there is none.
  243. RTLIB::Libcall RTLIB::getFPEXT(MVT OpVT, MVT RetVT) {
  244. if (OpVT == MVT::f32) {
  245. if (RetVT == MVT::f64)
  246. return FPEXT_F32_F64;
  247. }
  248. return UNKNOWN_LIBCALL;
  249. }
  250. /// getFPROUND - Return the FPROUND_*_* value for the given types, or
  251. /// UNKNOWN_LIBCALL if there is none.
  252. RTLIB::Libcall RTLIB::getFPROUND(MVT OpVT, MVT RetVT) {
  253. if (RetVT == MVT::f32) {
  254. if (OpVT == MVT::f64)
  255. return FPROUND_F64_F32;
  256. if (OpVT == MVT::f80)
  257. return FPROUND_F80_F32;
  258. if (OpVT == MVT::ppcf128)
  259. return FPROUND_PPCF128_F32;
  260. } else if (RetVT == MVT::f64) {
  261. if (OpVT == MVT::f80)
  262. return FPROUND_F80_F64;
  263. if (OpVT == MVT::ppcf128)
  264. return FPROUND_PPCF128_F64;
  265. }
  266. return UNKNOWN_LIBCALL;
  267. }
  268. /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
  269. /// UNKNOWN_LIBCALL if there is none.
  270. RTLIB::Libcall RTLIB::getFPTOSINT(MVT OpVT, MVT RetVT) {
  271. if (OpVT == MVT::f32) {
  272. if (RetVT == MVT::i8)
  273. return FPTOSINT_F32_I8;
  274. if (RetVT == MVT::i16)
  275. return FPTOSINT_F32_I16;
  276. if (RetVT == MVT::i32)
  277. return FPTOSINT_F32_I32;
  278. if (RetVT == MVT::i64)
  279. return FPTOSINT_F32_I64;
  280. if (RetVT == MVT::i128)
  281. return FPTOSINT_F32_I128;
  282. } else if (OpVT == MVT::f64) {
  283. if (RetVT == MVT::i32)
  284. return FPTOSINT_F64_I32;
  285. if (RetVT == MVT::i64)
  286. return FPTOSINT_F64_I64;
  287. if (RetVT == MVT::i128)
  288. return FPTOSINT_F64_I128;
  289. } else if (OpVT == MVT::f80) {
  290. if (RetVT == MVT::i32)
  291. return FPTOSINT_F80_I32;
  292. if (RetVT == MVT::i64)
  293. return FPTOSINT_F80_I64;
  294. if (RetVT == MVT::i128)
  295. return FPTOSINT_F80_I128;
  296. } else if (OpVT == MVT::ppcf128) {
  297. if (RetVT == MVT::i32)
  298. return FPTOSINT_PPCF128_I32;
  299. if (RetVT == MVT::i64)
  300. return FPTOSINT_PPCF128_I64;
  301. if (RetVT == MVT::i128)
  302. return FPTOSINT_PPCF128_I128;
  303. }
  304. return UNKNOWN_LIBCALL;
  305. }
  306. /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
  307. /// UNKNOWN_LIBCALL if there is none.
  308. RTLIB::Libcall RTLIB::getFPTOUINT(MVT OpVT, MVT RetVT) {
  309. if (OpVT == MVT::f32) {
  310. if (RetVT == MVT::i8)
  311. return FPTOUINT_F32_I8;
  312. if (RetVT == MVT::i16)
  313. return FPTOUINT_F32_I16;
  314. if (RetVT == MVT::i32)
  315. return FPTOUINT_F32_I32;
  316. if (RetVT == MVT::i64)
  317. return FPTOUINT_F32_I64;
  318. if (RetVT == MVT::i128)
  319. return FPTOUINT_F32_I128;
  320. } else if (OpVT == MVT::f64) {
  321. if (RetVT == MVT::i32)
  322. return FPTOUINT_F64_I32;
  323. if (RetVT == MVT::i64)
  324. return FPTOUINT_F64_I64;
  325. if (RetVT == MVT::i128)
  326. return FPTOUINT_F64_I128;
  327. } else if (OpVT == MVT::f80) {
  328. if (RetVT == MVT::i32)
  329. return FPTOUINT_F80_I32;
  330. if (RetVT == MVT::i64)
  331. return FPTOUINT_F80_I64;
  332. if (RetVT == MVT::i128)
  333. return FPTOUINT_F80_I128;
  334. } else if (OpVT == MVT::ppcf128) {
  335. if (RetVT == MVT::i32)
  336. return FPTOUINT_PPCF128_I32;
  337. if (RetVT == MVT::i64)
  338. return FPTOUINT_PPCF128_I64;
  339. if (RetVT == MVT::i128)
  340. return FPTOUINT_PPCF128_I128;
  341. }
  342. return UNKNOWN_LIBCALL;
  343. }
  344. /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
  345. /// UNKNOWN_LIBCALL if there is none.
  346. RTLIB::Libcall RTLIB::getSINTTOFP(MVT OpVT, MVT RetVT) {
  347. if (OpVT == MVT::i32) {
  348. if (RetVT == MVT::f32)
  349. return SINTTOFP_I32_F32;
  350. else if (RetVT == MVT::f64)
  351. return SINTTOFP_I32_F64;
  352. else if (RetVT == MVT::f80)
  353. return SINTTOFP_I32_F80;
  354. else if (RetVT == MVT::ppcf128)
  355. return SINTTOFP_I32_PPCF128;
  356. } else if (OpVT == MVT::i64) {
  357. if (RetVT == MVT::f32)
  358. return SINTTOFP_I64_F32;
  359. else if (RetVT == MVT::f64)
  360. return SINTTOFP_I64_F64;
  361. else if (RetVT == MVT::f80)
  362. return SINTTOFP_I64_F80;
  363. else if (RetVT == MVT::ppcf128)
  364. return SINTTOFP_I64_PPCF128;
  365. } else if (OpVT == MVT::i128) {
  366. if (RetVT == MVT::f32)
  367. return SINTTOFP_I128_F32;
  368. else if (RetVT == MVT::f64)
  369. return SINTTOFP_I128_F64;
  370. else if (RetVT == MVT::f80)
  371. return SINTTOFP_I128_F80;
  372. else if (RetVT == MVT::ppcf128)
  373. return SINTTOFP_I128_PPCF128;
  374. }
  375. return UNKNOWN_LIBCALL;
  376. }
  377. /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
  378. /// UNKNOWN_LIBCALL if there is none.
  379. RTLIB::Libcall RTLIB::getUINTTOFP(MVT OpVT, MVT RetVT) {
  380. if (OpVT == MVT::i32) {
  381. if (RetVT == MVT::f32)
  382. return UINTTOFP_I32_F32;
  383. else if (RetVT == MVT::f64)
  384. return UINTTOFP_I32_F64;
  385. else if (RetVT == MVT::f80)
  386. return UINTTOFP_I32_F80;
  387. else if (RetVT == MVT::ppcf128)
  388. return UINTTOFP_I32_PPCF128;
  389. } else if (OpVT == MVT::i64) {
  390. if (RetVT == MVT::f32)
  391. return UINTTOFP_I64_F32;
  392. else if (RetVT == MVT::f64)
  393. return UINTTOFP_I64_F64;
  394. else if (RetVT == MVT::f80)
  395. return UINTTOFP_I64_F80;
  396. else if (RetVT == MVT::ppcf128)
  397. return UINTTOFP_I64_PPCF128;
  398. } else if (OpVT == MVT::i128) {
  399. if (RetVT == MVT::f32)
  400. return UINTTOFP_I128_F32;
  401. else if (RetVT == MVT::f64)
  402. return UINTTOFP_I128_F64;
  403. else if (RetVT == MVT::f80)
  404. return UINTTOFP_I128_F80;
  405. else if (RetVT == MVT::ppcf128)
  406. return UINTTOFP_I128_PPCF128;
  407. }
  408. return UNKNOWN_LIBCALL;
  409. }
  410. /// InitCmpLibcallCCs - Set default comparison libcall CC.
  411. ///
  412. static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
  413. memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL);
  414. CCs[RTLIB::OEQ_F32] = ISD::SETEQ;
  415. CCs[RTLIB::OEQ_F64] = ISD::SETEQ;
  416. CCs[RTLIB::UNE_F32] = ISD::SETNE;
  417. CCs[RTLIB::UNE_F64] = ISD::SETNE;
  418. CCs[RTLIB::OGE_F32] = ISD::SETGE;
  419. CCs[RTLIB::OGE_F64] = ISD::SETGE;
  420. CCs[RTLIB::OLT_F32] = ISD::SETLT;
  421. CCs[RTLIB::OLT_F64] = ISD::SETLT;
  422. CCs[RTLIB::OLE_F32] = ISD::SETLE;
  423. CCs[RTLIB::OLE_F64] = ISD::SETLE;
  424. CCs[RTLIB::OGT_F32] = ISD::SETGT;
  425. CCs[RTLIB::OGT_F64] = ISD::SETGT;
  426. CCs[RTLIB::UO_F32] = ISD::SETNE;
  427. CCs[RTLIB::UO_F64] = ISD::SETNE;
  428. CCs[RTLIB::O_F32] = ISD::SETEQ;
  429. CCs[RTLIB::O_F64] = ISD::SETEQ;
  430. }
  431. TargetLowering::TargetLowering(TargetMachine &tm)
  432. : TM(tm), TD(TM.getTargetData()) {
  433. // All operations default to being supported.
  434. memset(OpActions, 0, sizeof(OpActions));
  435. memset(LoadExtActions, 0, sizeof(LoadExtActions));
  436. memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
  437. memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
  438. memset(ConvertActions, 0, sizeof(ConvertActions));
  439. memset(CondCodeActions, 0, sizeof(CondCodeActions));
  440. // Set default actions for various operations.
  441. for (unsigned VT = 0; VT != (unsigned)MVT::LAST_VALUETYPE; ++VT) {
  442. // Default all indexed load / store to expand.
  443. for (unsigned IM = (unsigned)ISD::PRE_INC;
  444. IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
  445. setIndexedLoadAction(IM, (MVT::SimpleValueType)VT, Expand);
  446. setIndexedStoreAction(IM, (MVT::SimpleValueType)VT, Expand);
  447. }
  448. // These operations default to expand.
  449. setOperationAction(ISD::FGETSIGN, (MVT::SimpleValueType)VT, Expand);
  450. setOperationAction(ISD::CONCAT_VECTORS, (MVT::SimpleValueType)VT, Expand);
  451. }
  452. // Most targets ignore the @llvm.prefetch intrinsic.
  453. setOperationAction(ISD::PREFETCH, MVT::Other, Expand);
  454. // ConstantFP nodes default to expand. Targets can either change this to
  455. // Legal, in which case all fp constants are legal, or use addLegalFPImmediate
  456. // to optimize expansions for certain constants.
  457. setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
  458. setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
  459. setOperationAction(ISD::ConstantFP, MVT::f80, Expand);
  460. // These library functions default to expand.
  461. setOperationAction(ISD::FLOG , MVT::f64, Expand);
  462. setOperationAction(ISD::FLOG2, MVT::f64, Expand);
  463. setOperationAction(ISD::FLOG10,MVT::f64, Expand);
  464. setOperationAction(ISD::FEXP , MVT::f64, Expand);
  465. setOperationAction(ISD::FEXP2, MVT::f64, Expand);
  466. setOperationAction(ISD::FLOG , MVT::f32, Expand);
  467. setOperationAction(ISD::FLOG2, MVT::f32, Expand);
  468. setOperationAction(ISD::FLOG10,MVT::f32, Expand);
  469. setOperationAction(ISD::FEXP , MVT::f32, Expand);
  470. setOperationAction(ISD::FEXP2, MVT::f32, Expand);
  471. // Default ISD::TRAP to expand (which turns it into abort).
  472. setOperationAction(ISD::TRAP, MVT::Other, Expand);
  473. IsLittleEndian = TD->isLittleEndian();
  474. UsesGlobalOffsetTable = false;
  475. ShiftAmountTy = PointerTy = getValueType(TD->getIntPtrType());
  476. ShiftAmtHandling = Undefined;
  477. memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*));
  478. memset(TargetDAGCombineArray, 0, array_lengthof(TargetDAGCombineArray));
  479. maxStoresPerMemset = maxStoresPerMemcpy = maxStoresPerMemmove = 8;
  480. allowUnalignedMemoryAccesses = false;
  481. benefitFromCodePlacementOpt = false;
  482. UseUnderscoreSetJmp = false;
  483. UseUnderscoreLongJmp = false;
  484. SelectIsExpensive = false;
  485. IntDivIsCheap = false;
  486. Pow2DivIsCheap = false;
  487. StackPointerRegisterToSaveRestore = 0;
  488. ExceptionPointerRegister = 0;
  489. ExceptionSelectorRegister = 0;
  490. BooleanContents = UndefinedBooleanContent;
  491. SchedPreferenceInfo = SchedulingForLatency;
  492. JumpBufSize = 0;
  493. JumpBufAlignment = 0;
  494. IfCvtBlockSizeLimit = 2;
  495. IfCvtDupBlockSizeLimit = 0;
  496. PrefLoopAlignment = 0;
  497. InitLibcallNames(LibcallRoutineNames);
  498. InitCmpLibcallCCs(CmpLibcallCCs);
  499. // Tell Legalize whether the assembler supports DEBUG_LOC.
  500. const TargetAsmInfo *TASM = TM.getTargetAsmInfo();
  501. if (!TASM || !TASM->hasDotLocAndDotFile())
  502. setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
  503. }
  504. TargetLowering::~TargetLowering() {}
  505. /// computeRegisterProperties - Once all of the register classes are added,
  506. /// this allows us to compute derived properties we expose.
  507. void TargetLowering::computeRegisterProperties() {
  508. assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE &&
  509. "Too many value types for ValueTypeActions to hold!");
  510. // Everything defaults to needing one register.
  511. for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
  512. NumRegistersForVT[i] = 1;
  513. RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i;
  514. }
  515. // ...except isVoid, which doesn't need any registers.
  516. NumRegistersForVT[MVT::isVoid] = 0;
  517. // Find the largest integer register class.
  518. unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
  519. for (; RegClassForVT[LargestIntReg] == 0; --LargestIntReg)
  520. assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
  521. // Every integer value type larger than this largest register takes twice as
  522. // many registers to represent as the previous ValueType.
  523. for (unsigned ExpandedReg = LargestIntReg + 1; ; ++ExpandedReg) {
  524. MVT EVT = (MVT::SimpleValueType)ExpandedReg;
  525. if (!EVT.isInteger())
  526. break;
  527. NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
  528. RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
  529. TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1);
  530. ValueTypeActions.setTypeAction(EVT, Expand);
  531. }
  532. // Inspect all of the ValueType's smaller than the largest integer
  533. // register to see which ones need promotion.
  534. unsigned LegalIntReg = LargestIntReg;
  535. for (unsigned IntReg = LargestIntReg - 1;
  536. IntReg >= (unsigned)MVT::i1; --IntReg) {
  537. MVT IVT = (MVT::SimpleValueType)IntReg;
  538. if (isTypeLegal(IVT)) {
  539. LegalIntReg = IntReg;
  540. } else {
  541. RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
  542. (MVT::SimpleValueType)LegalIntReg;
  543. ValueTypeActions.setTypeAction(IVT, Promote);
  544. }
  545. }
  546. // ppcf128 type is really two f64's.
  547. if (!isTypeLegal(MVT::ppcf128)) {
  548. NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64];
  549. RegisterTypeForVT[MVT::ppcf128] = MVT::f64;
  550. TransformToType[MVT::ppcf128] = MVT::f64;
  551. ValueTypeActions.setTypeAction(MVT::ppcf128, Expand);
  552. }
  553. // Decide how to handle f64. If the target does not have native f64 support,
  554. // expand it to i64 and we will be generating soft float library calls.
  555. if (!isTypeLegal(MVT::f64)) {
  556. NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64];
  557. RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64];
  558. TransformToType[MVT::f64] = MVT::i64;
  559. ValueTypeActions.setTypeAction(MVT::f64, Expand);
  560. }
  561. // Decide how to handle f32. If the target does not have native support for
  562. // f32, promote it to f64 if it is legal. Otherwise, expand it to i32.
  563. if (!isTypeLegal(MVT::f32)) {
  564. if (isTypeLegal(MVT::f64)) {
  565. NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::f64];
  566. RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::f64];
  567. TransformToType[MVT::f32] = MVT::f64;
  568. ValueTypeActions.setTypeAction(MVT::f32, Promote);
  569. } else {
  570. NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32];
  571. RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32];
  572. TransformToType[MVT::f32] = MVT::i32;
  573. ValueTypeActions.setTypeAction(MVT::f32, Expand);
  574. }
  575. }
  576. // Loop over all of the vector value types to see which need transformations.
  577. for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
  578. i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
  579. MVT VT = (MVT::SimpleValueType)i;
  580. if (!isTypeLegal(VT)) {
  581. MVT IntermediateVT, RegisterVT;
  582. unsigned NumIntermediates;
  583. NumRegistersForVT[i] =
  584. getVectorTypeBreakdown(VT,
  585. IntermediateVT, NumIntermediates,
  586. RegisterVT);
  587. RegisterTypeForVT[i] = RegisterVT;
  588. // Determine if there is a legal wider type.
  589. bool IsLegalWiderType = false;
  590. MVT EltVT = VT.getVectorElementType();
  591. unsigned NElts = VT.getVectorNumElements();
  592. for (unsigned nVT = i+1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
  593. MVT SVT = (MVT::SimpleValueType)nVT;
  594. if (isTypeLegal(SVT) && SVT.getVectorElementType() == EltVT &&
  595. SVT.getVectorNumElements() > NElts) {
  596. TransformToType[i] = SVT;
  597. ValueTypeActions.setTypeAction(VT, Promote);
  598. IsLegalWiderType = true;
  599. break;
  600. }
  601. }
  602. if (!IsLegalWiderType) {
  603. MVT NVT = VT.getPow2VectorType();
  604. if (NVT == VT) {
  605. // Type is already a power of 2. The default action is to split.
  606. TransformToType[i] = MVT::Other;
  607. ValueTypeActions.setTypeAction(VT, Expand);
  608. } else {
  609. TransformToType[i] = NVT;
  610. ValueTypeActions.setTypeAction(VT, Promote);
  611. }
  612. }
  613. }
  614. }
  615. }
  616. const char *TargetLowering::getTargetNodeName(unsigned Opcode) const {
  617. return NULL;
  618. }
  619. MVT TargetLowering::getSetCCResultType(MVT VT) const {
  620. return getValueType(TD->getIntPtrType());
  621. }
  622. /// getVectorTypeBreakdown - Vector types are broken down into some number of
  623. /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32
  624. /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
  625. /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86.
  626. ///
  627. /// This method returns the number of registers needed, and the VT for each
  628. /// register. It also returns the VT and quantity of the intermediate values
  629. /// before they are promoted/expanded.
  630. ///
  631. unsigned TargetLowering::getVectorTypeBreakdown(MVT VT,
  632. MVT &IntermediateVT,
  633. unsigned &NumIntermediates,
  634. MVT &RegisterVT) const {
  635. // Figure out the right, legal destination reg to copy into.
  636. unsigned NumElts = VT.getVectorNumElements();
  637. MVT EltTy = VT.getVectorElementType();
  638. unsigned NumVectorRegs = 1;
  639. // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
  640. // could break down into LHS/RHS like LegalizeDAG does.
  641. if (!isPowerOf2_32(NumElts)) {
  642. NumVectorRegs = NumElts;
  643. NumElts = 1;
  644. }
  645. // Divide the input until we get to a supported size. This will always
  646. // end with a scalar if the target doesn't support vectors.
  647. while (NumElts > 1 && !isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) {
  648. NumElts >>= 1;
  649. NumVectorRegs <<= 1;
  650. }
  651. NumIntermediates = NumVectorRegs;
  652. MVT NewVT = MVT::getVectorVT(EltTy, NumElts);
  653. if (!isTypeLegal(NewVT))
  654. NewVT = EltTy;
  655. IntermediateVT = NewVT;
  656. MVT DestVT = getRegisterType(NewVT);
  657. RegisterVT = DestVT;
  658. if (DestVT.bitsLT(NewVT)) {
  659. // Value is expanded, e.g. i64 -> i16.
  660. return NumVectorRegs*(NewVT.getSizeInBits()/DestVT.getSizeInBits());
  661. } else {
  662. // Otherwise, promotion or legal types use the same number of registers as
  663. // the vector decimated to the appropriate level.
  664. return NumVectorRegs;
  665. }
  666. return 1;
  667. }
  668. /// getWidenVectorType: given a vector type, returns the type to widen to
  669. /// (e.g., v7i8 to v8i8). If the vector type is legal, it returns itself.
  670. /// If there is no vector type that we want to widen to, returns MVT::Other
  671. /// When and where to widen is target dependent based on the cost of
  672. /// scalarizing vs using the wider vector type.
  673. MVT TargetLowering::getWidenVectorType(MVT VT) const {
  674. assert(VT.isVector());
  675. if (isTypeLegal(VT))
  676. return VT;
  677. // Default is not to widen until moved to LegalizeTypes
  678. return MVT::Other;
  679. }
  680. /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
  681. /// function arguments in the caller parameter area. This is the actual
  682. /// alignment, not its logarithm.
  683. unsigned TargetLowering::getByValTypeAlignment(const Type *Ty) const {
  684. return TD->getCallFrameTypeAlignment(Ty);
  685. }
  686. SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table,
  687. SelectionDAG &DAG) const {
  688. if (usesGlobalOffsetTable())
  689. return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy());
  690. return Table;
  691. }
  692. bool
  693. TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
  694. // Assume that everything is safe in static mode.
  695. if (getTargetMachine().getRelocationModel() == Reloc::Static)
  696. return true;
  697. // In dynamic-no-pic mode, assume that known defined values are safe.
  698. if (getTargetMachine().getRelocationModel() == Reloc::DynamicNoPIC &&
  699. GA &&
  700. !GA->getGlobal()->isDeclaration() &&
  701. !GA->getGlobal()->isWeakForLinker())
  702. return true;
  703. // Otherwise assume nothing is safe.
  704. return false;
  705. }
  706. //===----------------------------------------------------------------------===//
  707. // Optimization Methods
  708. //===----------------------------------------------------------------------===//
  709. /// ShrinkDemandedConstant - Check to see if the specified operand of the
  710. /// specified instruction is a constant integer. If so, check to see if there
  711. /// are any bits set in the constant that are not demanded. If so, shrink the
  712. /// constant and return true.
  713. bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDValue Op,
  714. const APInt &Demanded) {
  715. DebugLoc dl = Op.getDebugLoc();
  716. // FIXME: ISD::SELECT, ISD::SELECT_CC
  717. switch (Op.getOpcode()) {
  718. default: break;
  719. case ISD::XOR:
  720. case ISD::AND:
  721. case ISD::OR: {
  722. ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
  723. if (!C) return false;
  724. if (Op.getOpcode() == ISD::XOR &&
  725. (C->getAPIntValue() | (~Demanded)).isAllOnesValue())
  726. return false;
  727. // if we can expand it to have all bits set, do it
  728. if (C->getAPIntValue().intersects(~Demanded)) {
  729. MVT VT = Op.getValueType();
  730. SDValue New = DAG.getNode(Op.getOpcode(), dl, VT, Op.getOperand(0),
  731. DAG.getConstant(Demanded &
  732. C->getAPIntValue(),
  733. VT));
  734. return CombineTo(Op, New);
  735. }
  736. break;
  737. }
  738. }
  739. return false;
  740. }
  741. /// ShrinkDemandedOp - Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the
  742. /// casts are free. This uses isZExtFree and ZERO_EXTEND for the widening
  743. /// cast, but it could be generalized for targets with other types of
  744. /// implicit widening casts.
  745. bool
  746. TargetLowering::TargetLoweringOpt::ShrinkDemandedOp(SDValue Op,
  747. unsigned BitWidth,
  748. const APInt &Demanded,
  749. DebugLoc dl) {
  750. assert(Op.getNumOperands() == 2 &&
  751. "ShrinkDemandedOp only supports binary operators!");
  752. assert(Op.getNode()->getNumValues() == 1 &&
  753. "ShrinkDemandedOp only supports nodes with one result!");
  754. // Don't do this if the node has another user, which may require the
  755. // full value.
  756. if (!Op.getNode()->hasOneUse())
  757. return false;
  758. // Search for the smallest integer type with free casts to and from
  759. // Op's type. For expedience, just check power-of-2 integer types.
  760. const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  761. unsigned SmallVTBits = BitWidth - Demanded.countLeadingZeros();
  762. if (!isPowerOf2_32(SmallVTBits))
  763. SmallVTBits = NextPowerOf2(SmallVTBits);
  764. for (; SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) {
  765. MVT SmallVT = MVT::getIntegerVT(SmallVTBits);
  766. if (TLI.isTruncateFree(Op.getValueType(), SmallVT) &&
  767. TLI.isZExtFree(SmallVT, Op.getValueType())) {
  768. // We found a type with free casts.
  769. SDValue X = DAG.getNode(Op.getOpcode(), dl, SmallVT,
  770. DAG.getNode(ISD::TRUNCATE, dl, SmallVT,
  771. Op.getNode()->getOperand(0)),
  772. DAG.getNode(ISD::TRUNCATE, dl, SmallVT,
  773. Op.getNode()->getOperand(1)));
  774. SDValue Z = DAG.getNode(ISD::ZERO_EXTEND, dl, Op.getValueType(), X);
  775. return CombineTo(Op, Z);
  776. }
  777. }
  778. return false;
  779. }
  780. /// SimplifyDemandedBits - Look at Op. At this point, we know that only the
  781. /// DemandedMask bits of the result of Op are ever used downstream. If we can
  782. /// use this information to simplify Op, create a new simplified DAG node and
  783. /// return true, returning the original and new nodes in Old and New. Otherwise,
  784. /// analyze the expression and return a mask of KnownOne and KnownZero bits for
  785. /// the expression (used to simplify the caller). The KnownZero/One bits may
  786. /// only be accurate for those bits in the DemandedMask.
  787. bool TargetLowering::SimplifyDemandedBits(SDValue Op,
  788. const APInt &DemandedMask,
  789. APInt &KnownZero,
  790. APInt &KnownOne,
  791. TargetLoweringOpt &TLO,
  792. unsigned Depth) const {
  793. unsigned BitWidth = DemandedMask.getBitWidth();
  794. assert(Op.getValueSizeInBits() == BitWidth &&
  795. "Mask size mismatches value type size!");
  796. APInt NewMask = DemandedMask;
  797. DebugLoc dl = Op.getDebugLoc();
  798. // Don't know anything.
  799. KnownZero = KnownOne = APInt(BitWidth, 0);
  800. // Other users may use these bits.
  801. if (!Op.getNode()->hasOneUse()) {
  802. if (Depth != 0) {
  803. // If not at the root, Just compute the KnownZero/KnownOne bits to
  804. // simplify things downstream.
  805. TLO.DAG.ComputeMaskedBits(Op, DemandedMask, KnownZero, KnownOne, Depth);
  806. return false;
  807. }
  808. // If this is the root being simplified, allow it to have multiple uses,
  809. // just set the NewMask to all bits.
  810. NewMask = APInt::getAllOnesValue(BitWidth);
  811. } else if (DemandedMask == 0) {
  812. // Not demanding any bits from Op.
  813. if (Op.getOpcode() != ISD::UNDEF)
  814. return TLO.CombineTo(Op, TLO.DAG.getUNDEF(Op.getValueType()));
  815. return false;
  816. } else if (Depth == 6) { // Limit search depth.
  817. return false;
  818. }
  819. APInt KnownZero2, KnownOne2, KnownZeroOut, KnownOneOut;
  820. switch (Op.getOpcode()) {
  821. case ISD::Constant:
  822. // We know all of the bits for a constant!
  823. KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue() & NewMask;
  824. KnownZero = ~KnownOne & NewMask;
  825. return false; // Don't fall through, will infinitely loop.
  826. case ISD::AND:
  827. // If the RHS is a constant, check to see if the LHS would be zero without
  828. // using the bits from the RHS. Below, we use knowledge about the RHS to
  829. // simplify the LHS, here we're using information from the LHS to simplify
  830. // the RHS.
  831. if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
  832. APInt LHSZero, LHSOne;
  833. TLO.DAG.ComputeMaskedBits(Op.getOperand(0), NewMask,
  834. LHSZero, LHSOne, Depth+1);
  835. // If the LHS already has zeros where RHSC does, this and is dead.
  836. if ((LHSZero & NewMask) == (~RHSC->getAPIntValue() & NewMask))
  837. return TLO.CombineTo(Op, Op.getOperand(0));
  838. // If any of the set bits in the RHS are known zero on the LHS, shrink
  839. // the constant.
  840. if (TLO.ShrinkDemandedConstant(Op, ~LHSZero & NewMask))
  841. return true;
  842. }
  843. if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
  844. KnownOne, TLO, Depth+1))
  845. return true;
  846. assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
  847. if (SimplifyDemandedBits(Op.getOperand(0), ~KnownZero & NewMask,
  848. KnownZero2, KnownOne2, TLO, Depth+1))
  849. return true;
  850. assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
  851. // If all of the demanded bits are known one on one side, return the other.
  852. // These bits cannot contribute to the result of the 'and'.
  853. if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask))
  854. return TLO.CombineTo(Op, Op.getOperand(0));
  855. if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask))
  856. return TLO.CombineTo(Op, Op.getOperand(1));
  857. // If all of the demanded bits in the inputs are known zeros, return zero.
  858. if ((NewMask & (KnownZero|KnownZero2)) == NewMask)
  859. return TLO.CombineTo(Op, TLO.DAG.getConstant(0, Op.getValueType()));
  860. // If the RHS is a constant, see if we can simplify it.
  861. if (TLO.ShrinkDemandedConstant(Op, ~KnownZero2 & NewMask))
  862. return true;
  863. // If the operation can be done in a smaller type, do so.
  864. if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
  865. return true;
  866. // Output known-1 bits are only known if set in both the LHS & RHS.
  867. KnownOne &= KnownOne2;
  868. // Output known-0 are known to be clear if zero in either the LHS | RHS.
  869. KnownZero |= KnownZero2;
  870. break;
  871. case ISD::OR:
  872. if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
  873. KnownOne, TLO, Depth+1))
  874. return true;
  875. assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
  876. if (SimplifyDemandedBits(Op.getOperand(0), ~KnownOne & NewMask,
  877. KnownZero2, KnownOne2, TLO, Depth+1))
  878. return true;
  879. assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
  880. // If all of the demanded bits are known zero on one side, return the other.
  881. // These bits cannot contribute to the result of the 'or'.
  882. if ((NewMask & ~KnownOne2 & KnownZero) == (~KnownOne2 & NewMask))
  883. return TLO.CombineTo(Op, Op.getOperand(0));
  884. if ((NewMask & ~KnownOne & KnownZero2) == (~KnownOne & NewMask))
  885. return TLO.CombineTo(Op, Op.getOperand(1));
  886. // If all of the potentially set bits on one side are known to be set on
  887. // the other side, just use the 'other' side.
  888. if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask))
  889. return TLO.CombineTo(Op, Op.getOperand(0));
  890. if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask))
  891. return TLO.CombineTo(Op, Op.getOperand(1));
  892. // If the RHS is a constant, see if we can simplify it.
  893. if (TLO.ShrinkDemandedConstant(Op, NewMask))
  894. return true;
  895. // If the operation can be done in a smaller type, do so.
  896. if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
  897. return true;
  898. // Output known-0 bits are only known if clear in both the LHS & RHS.
  899. KnownZero &= KnownZero2;
  900. // Output known-1 are known to be set if set in either the LHS | RHS.
  901. KnownOne |= KnownOne2;
  902. break;
  903. case ISD::XOR:
  904. if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
  905. KnownOne, TLO, Depth+1))
  906. return true;
  907. assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
  908. if (SimplifyDemandedBits(Op.getOperand(0), NewMask, KnownZero2,
  909. KnownOne2, TLO, Depth+1))
  910. return true;
  911. assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
  912. // If all of the demanded bits are known zero on one side, return the other.
  913. // These bits cannot contribute to the result of the 'xor'.
  914. if ((KnownZero & NewMask) == NewMask)
  915. return TLO.CombineTo(Op, Op.getOperand(0));
  916. if ((KnownZero2 & NewMask) == NewMask)
  917. return TLO.CombineTo(Op, Op.getOperand(1));
  918. // If the operation can be done in a smaller type, do so.
  919. if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
  920. return true;
  921. // If all of the unknown bits are known to be zero on one side or the other
  922. // (but not both) turn this into an *inclusive* or.
  923. // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
  924. if ((NewMask & ~KnownZero & ~KnownZero2) == 0)
  925. return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, Op.getValueType(),
  926. Op.getOperand(0),
  927. Op.getOperand(1)));
  928. // Output known-0 bits are known if clear or set in both the LHS & RHS.
  929. KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
  930. // Output known-1 are known to be set if set in only one of the LHS, RHS.
  931. KnownOneOut = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
  932. // If all of the demanded bits on one side are known, and all of the set
  933. // bits on that side are also known to be set on the other side, turn this
  934. // into an AND, as we know the bits will be cleared.
  935. // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
  936. if ((NewMask & (KnownZero|KnownOne)) == NewMask) { // all known
  937. if ((KnownOne & KnownOne2) == KnownOne) {
  938. MVT VT = Op.getValueType();
  939. SDValue ANDC = TLO.DAG.getConstant(~KnownOne & NewMask, VT);
  940. return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT,
  941. Op.getOperand(0), ANDC));
  942. }
  943. }
  944. // If the RHS is a constant, see if we can simplify it.
  945. // for XOR, we prefer to force bits to 1 if they will make a -1.
  946. // if we can't force bits, try to shrink constant
  947. if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
  948. APInt Expanded = C->getAPIntValue() | (~NewMask);
  949. // if we can expand it to have all bits set, do it
  950. if (Expanded.isAllOnesValue()) {
  951. if (Expanded != C->getAPIntValue()) {
  952. MVT VT = Op.getValueType();
  953. SDValue New = TLO.DAG.getNode(Op.getOpcode(), dl,VT, Op.getOperand(0),
  954. TLO.DAG.getConstant(Expanded, VT));
  955. return TLO.CombineTo(Op, New);
  956. }
  957. // if it already has all the bits set, nothing to change
  958. // but don't shrink either!
  959. } else if (TLO.ShrinkDemandedConstant(Op, NewMask)) {
  960. return true;
  961. }
  962. }
  963. KnownZero = KnownZeroOut;
  964. KnownOne = KnownOneOut;
  965. break;
  966. case ISD::SELECT:
  967. if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero,
  968. KnownOne, TLO, Depth+1))
  969. return true;
  970. if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero2,
  971. KnownOne2, TLO, Depth+1))
  972. return true;
  973. assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
  974. assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
  975. // If the operands are constants, see if we can simplify them.
  976. if (TLO.ShrinkDemandedConstant(Op, NewMask))
  977. return true;
  978. // Only known if known in both the LHS and RHS.
  979. KnownOne &= KnownOne2;
  980. KnownZero &= KnownZero2;
  981. break;
  982. case ISD::SELECT_CC:
  983. if (SimplifyDemandedBits(Op.getOperand(3), NewMask, KnownZero,
  984. KnownOne, TLO, Depth+1))
  985. return true;
  986. if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero2,
  987. KnownOne2, TLO, Depth+1))
  988. return true;
  989. assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
  990. assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
  991. // If the operands are constants, see if we can simplify them.
  992. if (TLO.ShrinkDemandedConstant(Op, NewMask))
  993. return true;
  994. // Only known if known in both the LHS and RHS.
  995. KnownOne &= KnownOne2;
  996. KnownZero &= KnownZero2;
  997. break;
  998. case ISD::SHL:
  999. if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
  1000. unsigned ShAmt = SA->getZExtValue();
  1001. SDValue InOp = Op.getOperand(0);
  1002. // If the shift count is an invalid immediate, don't do anything.
  1003. if (ShAmt >= BitWidth)
  1004. break;
  1005. // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
  1006. // single shift. We can do this if the bottom bits (which are shifted
  1007. // out) are never demanded.
  1008. if (InOp.getOpcode() == ISD::SRL &&
  1009. isa<ConstantSDNode>(InOp.getOperand(1))) {
  1010. if (ShAmt && (NewMask & APInt::getLowBitsSet(BitWidth, ShAmt)) == 0) {
  1011. unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue();
  1012. unsigned Opc = ISD::SHL;
  1013. int Diff = ShAmt-C1;
  1014. if (Diff < 0) {
  1015. Diff = -Diff;
  1016. Opc = ISD::SRL;
  1017. }
  1018. SDValue NewSA =
  1019. TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType());
  1020. MVT VT = Op.getValueType();
  1021. return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT,
  1022. InOp.getOperand(0), NewSA));
  1023. }
  1024. }
  1025. if (SimplifyDemandedBits(Op.getOperand(0), NewMask.lshr(ShAmt),
  1026. KnownZero, KnownOne, TLO, Depth+1))
  1027. return true;
  1028. KnownZero <<= SA->getZExtValue();
  1029. KnownOne <<= SA->getZExtValue();
  1030. // low bits known zero.
  1031. KnownZero |= APInt::getLowBitsSet(BitWidth, SA->getZExtValue());
  1032. }
  1033. break;
  1034. case ISD::SRL:
  1035. if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
  1036. MVT VT = Op.getValueType();
  1037. unsigned ShAmt = SA->getZExtValue();
  1038. unsigned VTSize = VT.getSizeInBits();
  1039. SDValue InOp = Op.getOperand(0);
  1040. // If the shift count is an invalid immediate, don't do anything.
  1041. if (ShAmt >= BitWidth)
  1042. break;
  1043. // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
  1044. // single shift. We can do this if the top bits (which are shifted out)
  1045. // are never demanded.
  1046. if (InOp.getOpcode() == ISD::SHL &&
  1047. isa<ConstantSDNode>(InOp.getOperand(1))) {
  1048. if (ShAmt && (NewMask & APInt::getHighBitsSet(VTSize, ShAmt)) == 0) {
  1049. unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue();
  1050. unsigned Opc = ISD::SRL;
  1051. int Diff = ShAmt-C1;
  1052. if (Diff < 0) {
  1053. Diff = -Diff;
  1054. Opc = ISD::SHL;
  1055. }
  1056. SDValue NewSA =
  1057. TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType());
  1058. return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT,
  1059. InOp.getOperand(0), NewSA));
  1060. }
  1061. }
  1062. // Compute the new bits that are at the top now.
  1063. if (SimplifyDemandedBits(InOp, (NewMask << ShAmt),
  1064. KnownZero, KnownOne, TLO, Depth+1))
  1065. return true;
  1066. assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
  1067. KnownZero = KnownZero.lshr(ShAmt);
  1068. KnownOne = KnownOne.lshr(ShAmt);
  1069. APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
  1070. KnownZero |= HighBits; // High bits known zero.
  1071. }
  1072. break;
  1073. case ISD::SRA:
  1074. // If this is an arithmetic shift right and only the low-bit is set, we can
  1075. // always convert this into a logical shr, even if the shift amount is
  1076. // variable. The low bit of the shift cannot be an input sign bit unless
  1077. // the shift amount is >= the size of the datatype, which is undefined.
  1078. if (DemandedMask == 1)
  1079. return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, Op.getValueType(),
  1080. Op.getOperand(0), Op.getOperand(1)));
  1081. if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
  1082. MVT VT = Op.getValueType();
  1083. unsigned ShAmt = SA->getZExtValue();
  1084. // If the shift count is an invalid immediate, don't do anything.
  1085. if (ShAmt >= BitWidth)
  1086. break;
  1087. APInt InDemandedMask = (NewMask << ShAmt);
  1088. // If any of the demanded bits are produced by the sign extension, we also
  1089. // demand the input sign bit.
  1090. APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
  1091. if (HighBits.intersects(NewMask))
  1092. InDemandedMask |= APInt::getSignBit(VT.getSizeInBits());
  1093. if (SimplifyDemandedBits(Op.getOperand(0), InDemandedMask,
  1094. KnownZero, KnownOne, TLO, Depth+1))
  1095. return true;
  1096. assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
  1097. KnownZero = KnownZero.lshr(ShAmt);
  1098. KnownOne = KnownOne.lshr(ShAmt);
  1099. // Handle the sign bit, adjusted to where it is now in the mask.
  1100. APInt SignBit = APInt::getSignBit(BitWidth).lshr(ShAmt);
  1101. // If the input sign bit is known to be zero, or if none of the top bits
  1102. // are demanded, turn this into an unsigned shift right.
  1103. if (KnownZero.intersects(SignBit) || (HighBits & ~NewMask) == HighBits) {
  1104. return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT,
  1105. Op.getOperand(0),
  1106. Op.getOperand(1)));
  1107. } else if (KnownOne.intersects(SignBit)) { // New bits are known one.
  1108. KnownOne |= HighBits;
  1109. }
  1110. }
  1111. break;
  1112. case ISD::SIGN_EXTEND_INREG: {
  1113. MVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
  1114. // Sign extension. Compute the demanded bits in the result that are not
  1115. // present in the input.
  1116. APInt NewBits = APInt::getHighBitsSet(BitWidth,
  1117. BitWidth - EVT.getSizeInBits()) &
  1118. NewMask;
  1119. // If none of the extended bits are demanded, eliminate the sextinreg.
  1120. if (NewBits == 0)
  1121. return TLO.CombineTo(Op, Op.getOperand(0));
  1122. APInt InSignBit = APInt::getSignBit(EVT.getSizeInBits());
  1123. InSignBit.zext(BitWidth);
  1124. APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth,
  1125. EVT.getSizeInBits()) &
  1126. NewMask;
  1127. // Since the sign extended bits are demanded, we know that the sign
  1128. // bit is demanded.
  1129. InputDemandedBits |= InSignBit;
  1130. if (SimplifyDemandedBits(Op.getOperand(0), InputDemandedBits,
  1131. KnownZero, KnownOne, TLO, Depth+1))
  1132. return true;
  1133. assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
  1134. // If the sign bit of the input is known set or clear, then we know the
  1135. // top bits of the result.
  1136. // If the input sign bit is known zero, convert this into a zero extension.
  1137. if (KnownZero.intersects(InSignBit))
  1138. return TLO.CombineTo(Op,
  1139. TLO.DAG.getZeroExtendInReg(Op.getOperand(0),dl,EVT));
  1140. if (KnownOne.intersects(InSignBit)) { // Input sign bit known set
  1141. KnownOne |= NewBits;
  1142. KnownZero &= ~NewBits;
  1143. } else { // Input sign bit unknown
  1144. KnownZero &= ~NewBits;
  1145. KnownOne &= ~NewBits;
  1146. }
  1147. break;
  1148. }
  1149. case ISD::ZERO_EXTEND: {
  1150. unsigned OperandBitWidth = Op.getOperand(0).getValueSizeInBits();
  1151. APInt InMask = NewMask;
  1152. InMask.trunc(OperandBitWidth);
  1153. // If none of the top bits are demanded, convert this into an any_extend.
  1154. APInt NewBits =
  1155. APInt::getHighBitsSet(BitWidth, BitWidth - OperandBitWidth) & NewMask;
  1156. if (!NewBits.intersects(NewMask))
  1157. return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl,
  1158. Op.getValueType(),
  1159. Op.getOperand(0)));
  1160. if (SimplifyDemandedBits(Op.getOperand(0), InMask,
  1161. KnownZero, KnownOne, TLO, Depth+1))
  1162. return true;
  1163. assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
  1164. KnownZero.zext(BitWidth);
  1165. KnownOne.zext(BitWidth);
  1166. KnownZero |= NewBits;
  1167. break;
  1168. }
  1169. case ISD::SIGN_EXTEND: {
  1170. MVT InVT = Op.getOperand(0).getValueType();
  1171. unsigned InBits = InVT.getSizeInBits();
  1172. APInt InMask = APInt::getLowBitsSet(BitWidth, InBits);
  1173. APInt InSignBit = APInt::getBitsSet(BitWidth, InBits - 1, InBits);
  1174. APInt NewBits = ~InMask & NewMask;
  1175. // If none of the top bits are demanded, convert this into an any_extend.
  1176. if (NewBits == 0)
  1177. return TLO.CombineTo(Op,TLO.DAG.getNode(ISD::ANY_EXTEND, dl,
  1178. Op.getValueType(),
  1179. Op.getOperand(0)));
  1180. // Since some of the sign extended bits are demanded, we know that the sign
  1181. // bit is demanded.
  1182. APInt InDemandedBits = InMask & NewMask;
  1183. InDemandedBits |= InSignBit;
  1184. InDemandedBits.trunc(InBits);
  1185. if (SimplifyDemandedBits(Op.getOperand(0), InDemandedBits, KnownZero,
  1186. KnownOne, TLO, Depth+1))
  1187. return true;
  1188. KnownZero.zext(BitWidth);
  1189. KnownOne.zext(BitWidth);
  1190. // If the sign bit is known zero, convert this to a zero extend.
  1191. if (KnownZero.intersects(InSignBit))
  1192. return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ZERO_EXTEND, dl,
  1193. Op.getValueType(),
  1194. Op.getOperand(0)));
  1195. // If the sign bit is known one, the top bits match.
  1196. if (KnownOne.intersects(InSignBit)) {
  1197. KnownOne |= NewBits;
  1198. KnownZero &= ~NewBits;
  1199. } else { // Otherwise, top bits aren't known.
  1200. KnownOne &= ~NewBits;
  1201. KnownZero &= ~NewBits;
  1202. }
  1203. break;
  1204. }
  1205. case ISD::ANY_EXTEND: {
  1206. unsigned OperandBitWidth = Op.getOperand(0).getValueSizeInBits();
  1207. APInt InMask = NewMask;
  1208. InMask.trunc(OperandBitWidth);
  1209. if (SimplifyDemandedBits(Op.getOperand(0), InMask,
  1210. KnownZero, KnownOne, TLO, Depth+1))
  1211. return true;
  1212. assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
  1213. KnownZero.zext(BitWidth);
  1214. KnownOne.zext(BitWidth);
  1215. break;
  1216. }
  1217. case ISD::TRUNCATE: {
  1218. // Simplify the input, using demanded bit information, and compute the known
  1219. // zero/one bits live out.
  1220. APInt TruncMask = NewMask;
  1221. TruncMask.zext(Op.getOperand(0).getValueSizeInBits());
  1222. if (SimplifyDemandedBits(Op.getOperand(0), TruncMask,
  1223. KnownZero, KnownOne, TLO, Depth+1))
  1224. return true;
  1225. KnownZero.trunc(BitWidth);
  1226. KnownOne.trunc(BitWidth);
  1227. // If the input is only used by this truncate, see if we can shrink it based
  1228. // on the known demanded bits.
  1229. if (Op.getOperand(0).getNode()->hasOneUse()) {
  1230. SDValue In = Op.getOperand(0);
  1231. unsigned InBitWidth = In.getValueSizeInBits();
  1232. switch (In.getOpcode()) {
  1233. default: break;
  1234. case ISD::SRL:
  1235. // Shrink SRL by a constant if none of the high bits shifted in are
  1236. // demanded.
  1237. if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(In.getOperand(1))){
  1238. APInt HighBits = APInt::getHighBitsSet(InBitWidth,
  1239. InBitWidth - BitWidth);
  1240. HighBits = HighBits.lshr(ShAmt->getZExtValue());
  1241. HighBits.trunc(BitWidth);
  1242. if (ShAmt->getZExtValue() < BitWidth && !(HighBits & NewMask)) {
  1243. // None of the shifted in bits are needed. Add a truncate of the
  1244. // shift input, then shift it.
  1245. SDValue NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE, dl,
  1246. Op.getValueType(),
  1247. In.getOperand(0));
  1248. return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl,
  1249. Op.getValueType(),
  1250. NewTrunc,
  1251. In.getOperand(1)));
  1252. }
  1253. }
  1254. break;
  1255. }
  1256. }
  1257. assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
  1258. break;
  1259. }
  1260. case ISD::AssertZext: {
  1261. MVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
  1262. APInt InMask = APInt::getLowBitsSet(BitWidth,
  1263. VT.getSizeInBits());
  1264. if (SimplifyDemandedBits(Op.getOperand(0), InMask & NewMask,
  1265. KnownZero, KnownOne, TLO, Depth+1))
  1266. return true;
  1267. assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
  1268. KnownZero |= ~InMask & NewMask;
  1269. break;
  1270. }
  1271. case ISD::BIT_CONVERT:
  1272. #if 0
  1273. // If this is an FP->Int bitcast and if the sign bit is the only thing that
  1274. // is demanded, turn this into a FGETSIGN.
  1275. if (NewMask == MVT::getIntegerVTSignBit(Op.getValueType()) &&
  1276. MVT::isFloatingPoint(Op.getOperand(0).getValueType()) &&
  1277. !MVT::isVector(Op.getOperand(0).getValueType())) {
  1278. // Only do this xform if FGETSIGN is valid or if before legalize.
  1279. if (!TLO.AfterLegalize ||
  1280. isOperationLegal(ISD::FGETSIGN, Op.getValueType())) {
  1281. // Make a FGETSIGN + SHL to move the sign bit into the appropriate
  1282. // place. We expect the SHL to be eliminated by other optimizations.
  1283. SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, Op.getValueType(),
  1284. Op.getOperand(0));
  1285. unsigned ShVal = Op.getValueType().getSizeInBits()-1;
  1286. SDValue ShAmt = TLO.DAG.getConstant(ShVal, getShiftAmountTy());
  1287. return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, Op.getValueType(),
  1288. Sign, ShAmt));
  1289. }
  1290. }
  1291. #endif
  1292. break;
  1293. case ISD::ADD:
  1294. case ISD::MUL:
  1295. case ISD::SUB: {
  1296. // Add, Sub, and Mul don't demand any bits in positions beyond that
  1297. // of the highest bit demanded of them.
  1298. APInt LoMask = APInt::getLowBitsSet(BitWidth,
  1299. BitWidth - NewMask.countLeadingZeros());
  1300. if (SimplifyDemandedBits(Op.getOperand(0), LoMask, KnownZero2,
  1301. KnownOne2, TLO, Depth+1))
  1302. return true;
  1303. if (SimplifyDemandedBits(Op.getOperand(1), LoMask, KnownZero2,
  1304. KnownOne2, TLO, Depth+1))
  1305. return true;
  1306. // See if the operation should be performed at a smaller bit width.
  1307. if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
  1308. return true;
  1309. }
  1310. // FALL THROUGH
  1311. default:
  1312. // Just use ComputeMaskedBits to compute output bits.
  1313. TLO.DAG.ComputeMaskedBits(Op, NewMask, KnownZero, KnownOne, Depth);
  1314. break;
  1315. }
  1316. // If we know the value of all of the demanded bits, return this as a
  1317. // constant.
  1318. if ((NewMask & (KnownZero|KnownOne)) == NewMask)
  1319. return TLO.CombineTo(Op, TLO.DAG.getConstant(KnownOne, Op.getValueType()));
  1320. return false;
  1321. }
  1322. /// computeMaskedBitsForTargetNode - Determine which of the bits specified
  1323. /// in Mask are known to be either zero or one and return them in the
  1324. /// KnownZero/KnownOne bitsets.
  1325. void TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
  1326. const APInt &Mask,
  1327. APInt &KnownZero,
  1328. APInt &KnownOne,
  1329. const SelectionDAG &DAG,
  1330. unsigned Depth) const {
  1331. assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
  1332. Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
  1333. Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
  1334. Op.getOpcode() == ISD::INTRINSIC_VOID) &&
  1335. "Should use MaskedValueIsZero if you don't know whether Op"
  1336. " is a target node!");
  1337. KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
  1338. }
  1339. /// ComputeNumSignBitsForTargetNode - This method can be implemented by
  1340. /// targets that want to expose additional information about sign bits to the
  1341. /// DAG Combiner.
  1342. unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
  1343. unsigned Depth) const {
  1344. assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
  1345. Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
  1346. Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
  1347. Op.getOpcode() == ISD::INTRINSIC_VOID) &&
  1348. "Should use ComputeNumSignBits if you don't know whether Op"
  1349. " is a target node!");
  1350. return 1;
  1351. }
  1352. /// ValueHasExactlyOneBitSet - Test if the given value is known to have exactly
  1353. /// one bit set. This differs from ComputeMaskedBits in that it doesn't need to
  1354. /// determine which bit is set.
  1355. ///
  1356. static bool ValueHasExactlyOneBitSet(SDValue Val, const SelectionDAG &DAG) {
  1357. // A left-shift of a constant one will have exactly one bit set, because
  1358. // shifting the bit off the end is undefined.
  1359. if (Val.getOpcode() == ISD::SHL)
  1360. if (ConstantSDNode *C =
  1361. dyn_cast<ConstantSDNode>(Val.getNode()->getOperand(0)))
  1362. if (C->getAPIntValue() == 1)
  1363. return true;
  1364. // Similarly, a right-shift of a constant sign-bit will have exactly
  1365. // one bit set.
  1366. if (Val.getOpcode() == ISD::SRL)
  1367. if (ConstantSDNode *C =
  1368. dyn_cast<ConstantSDNode>(Val.getNode()->getOperand(0)))
  1369. if (C->getAPIntValue().isSignBit())
  1370. return true;
  1371. // More could be done here, though the above checks are enough
  1372. // to handle some common cases.
  1373. // Fall back to ComputeMaskedBits to catch other known cases.
  1374. MVT OpVT = Val.getValueType();
  1375. unsigned BitWidth = OpVT.getSizeInBits();
  1376. APInt Mask = APInt::getAllOnesValue(BitWidth);
  1377. APInt KnownZero, KnownOne;
  1378. DAG.ComputeMaskedBits(Val, Mask, KnownZero, KnownOne);
  1379. return (KnownZero.countPopulation() == BitWidth - 1) &&
  1380. (KnownOne.countPopulation() == 1);
  1381. }
  1382. /// SimplifySetCC - Try to simplify a setcc built with the specified operands
  1383. /// and cc. If it is unable to simplify it, return a null SDValue.
  1384. SDValue
  1385. TargetLowering::SimplifySetCC(MVT VT, SDValue N0, SDValue N1,
  1386. ISD::CondCode Cond, bool foldBooleans,
  1387. DAGCombinerInfo &DCI, DebugLoc dl) const {
  1388. SelectionDAG &DAG = DCI.DAG;
  1389. // These setcc operations always fold.
  1390. switch (Cond) {
  1391. default: break;
  1392. case ISD::SETFALSE:
  1393. case ISD::SETFALSE2: return DAG.getConstant(0, VT);
  1394. case ISD::SETTRUE:
  1395. case ISD::SETTRUE2: return DAG.getConstant(1, VT);
  1396. }
  1397. if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
  1398. const APInt &C1 = N1C->getAPIntValue();
  1399. if (isa<ConstantSDNode>(N0.getNode())) {
  1400. return DAG.FoldSetCC(VT, N0, N1, Cond, dl);
  1401. } else {
  1402. // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an
  1403. // equality comparison, then we're just comparing whether X itself is
  1404. // zero.
  1405. if (N0.getOpcode() == ISD::SRL && (C1 == 0 || C1 == 1) &&
  1406. N0.getOperand(0).getOpcode() == ISD::CTLZ &&
  1407. N0.getOperand(1).getOpcode() == ISD::Constant) {
  1408. unsigned ShAmt = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
  1409. if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
  1410. ShAmt == Log2_32(N0.getValueType().getSizeInBits())) {
  1411. if ((C1 == 0) == (Cond == ISD::SETEQ)) {
  1412. // (srl (ctlz x), 5) == 0 -> X != 0
  1413. // (srl (ctlz x), 5) != 1 -> X != 0
  1414. Cond = ISD::SETNE;
  1415. } else {
  1416. // (srl (ctlz x), 5) != 0 -> X == 0
  1417. // (srl (ctlz x), 5) == 1 -> X == 0
  1418. Cond = ISD::SETEQ;
  1419. }
  1420. SDValue Zero = DAG.getConstant(0, N0.getValueType());
  1421. return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0),
  1422. Zero, Cond);
  1423. }
  1424. }
  1425. // If the LHS is '(and load, const)', the RHS is 0,
  1426. // the test is for equality or unsigned, and all 1 bits of the const are
  1427. // in the same partial word, see if we can shorten the load.
  1428. if (DCI.isBeforeLegalize() &&
  1429. N0.getOpcode() == ISD::AND && C1 == 0 &&
  1430. N0.getNode()->hasOneUse() &&
  1431. isa<LoadSDNode>(N0.getOperand(0)) &&
  1432. N0.getOperand(0).getNode()->hasOneUse() &&
  1433. isa<ConstantSDNode>(N0.getOperand(1))) {
  1434. LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0));
  1435. uint64_t bestMask = 0;
  1436. unsigned bestWidth = 0, bestOffset = 0;
  1437. if (!Lod->isVolatile() && Lod->isUnindexed() &&
  1438. // FIXME: This uses getZExtValue() below so it only works on i64 and
  1439. // below.
  1440. N0.getValueType().getSizeInBits() <= 64) {
  1441. unsigned origWidth = N0.getValueType().getSizeInBits();
  1442. // We can narrow (e.g.) 16-bit extending loads on 32-bit target to
  1443. // 8 bits, but have to be careful...
  1444. if (Lod->getExtensionType() != ISD::NON_EXTLOAD)
  1445. origWidth = Lod->getMemoryVT().getSizeInBits();
  1446. uint64_t Mask =cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
  1447. for (unsigned width = origWidth / 2; width>=8; width /= 2) {
  1448. uint64_t newMask = (1ULL << width) - 1;
  1449. for (unsigned offset=0; offset<origWidth/width; offset++) {
  1450. if ((newMask & Mask) == Mask) {
  1451. if (!TD->isLittleEndian())
  1452. bestOffset = (origWidth/width - offset - 1) * (width/8);
  1453. else
  1454. bestOffset = (uint64_t)offset * (width/8);
  1455. bestMask = Mask >> (offset * (width/8) * 8);
  1456. bestWidth = width;
  1457. break;
  1458. }
  1459. newMask = newMask << width;
  1460. }
  1461. }
  1462. }
  1463. if (bestWidth) {
  1464. MVT newVT = MVT::getIntegerVT(bestWidth);
  1465. if (newVT.isRound()) {
  1466. MVT PtrType = Lod->getOperand(1).getValueType();
  1467. SDValue Ptr = Lod->getBasePtr();
  1468. if (bestOffset != 0)
  1469. Ptr = DAG.getNode(ISD::ADD, dl, PtrType, Lod->getBasePtr(),
  1470. DAG.getConstant(bestOffset, PtrType));
  1471. unsigned NewAlign = MinAlign(Lod->getAlignment(), bestOffset);
  1472. SDValue NewLoad = DAG.getLoad(newVT, dl, Lod->getChain(), Ptr,
  1473. Lod->getSrcValue(),
  1474. Lod->getSrcValueOffset() + bestOffset,
  1475. false, NewAlign);
  1476. return DAG.getSetCC(dl, VT,
  1477. DAG.getNode(ISD::AND, dl, newVT, NewLoad,
  1478. DAG.getConstant(bestMask, newVT)),
  1479. DAG.getConstant(0LL, newVT), Cond);
  1480. }
  1481. }
  1482. }
  1483. // If the LHS is a ZERO_EXTEND, perform the comparison on the input.
  1484. if (N0.getOpcode() == ISD::ZERO_EXTEND) {
  1485. unsigned InSize = N0.getOperand(0).getValueType().getSizeInBits();
  1486. // If the comparison constant has bits in the upper part, the
  1487. // zero-extended value could never match.
  1488. if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(),
  1489. C1.getBitWidth() - InSize))) {
  1490. switch (Cond) {
  1491. case ISD::SETUGT:
  1492. case ISD::SETUGE:
  1493. case ISD::SETEQ: return DAG.getConstant(0, VT);
  1494. case ISD::SETULT:
  1495. case ISD::SETULE:
  1496. case ISD::SETNE: return DAG.getConstant(1, VT);
  1497. case ISD::SETGT:
  1498. case ISD::SETGE:
  1499. // True if the sign bit of C1 is set.
  1500. return DAG.getConstant(C1.isNegative(), VT);
  1501. case ISD::SETLT:
  1502. case ISD::SETLE:
  1503. // True if the sign bit of C1 isn't set.
  1504. return DAG.getConstant(C1.isNonNegative(), VT);
  1505. default:
  1506. break;
  1507. }
  1508. }
  1509. // Otherwise, we can perform the comparison with the low bits.
  1510. switch (Cond) {
  1511. case ISD::SETEQ:
  1512. case ISD::SETNE:
  1513. case ISD::SETUGT:
  1514. case ISD::SETUGE:
  1515. case ISD::SETULT:
  1516. case ISD::SETULE:
  1517. return DAG.getSetCC(dl, VT, N0.getOperand(0),
  1518. DAG.getConstant(APInt(C1).trunc(InSize),
  1519. N0.getOperand(0).getValueType()),
  1520. Cond);
  1521. default:
  1522. break; // todo, be more careful with signed comparisons
  1523. }
  1524. } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
  1525. (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
  1526. MVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT();
  1527. unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits();
  1528. MVT ExtDstTy = N0.getValueType();
  1529. unsigned ExtDstTyBits = ExtDstTy.getSizeInBits();
  1530. // If the extended part has any inconsistent bits, it cannot ever
  1531. // compare equal. In other words, they have to be all ones or all
  1532. // zeros.
  1533. APInt ExtBits =
  1534. APInt::getHighBitsSet(ExtDstTyBits, ExtDstTyBits - ExtSrcTyBits);
  1535. if ((C1 & ExtBits) != 0 && (C1 & ExtBits) != ExtBits)
  1536. return DAG.getConstant(Cond == ISD::SETNE, VT);
  1537. SDValue ZextOp;
  1538. MVT Op0Ty = N0.getOperand(0).getValueType();
  1539. if (Op0Ty == ExtSrcTy) {
  1540. ZextOp = N0.getOperand(0);
  1541. } else {
  1542. APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits);
  1543. ZextOp = DAG.getNode(ISD::AND, dl, Op0Ty, N0.getOperand(0),
  1544. DAG.getConstant(Imm, Op0Ty));
  1545. }
  1546. if (!DCI.isCalledByLegalizer())
  1547. DCI.AddToWorklist(ZextOp.getNode());
  1548. // Otherwise, make this a use of a zext.
  1549. return DAG.getSetCC(dl, VT, ZextOp,
  1550. DAG.getConstant(C1 & APInt::getLowBitsSet(
  1551. ExtDstTyBits,
  1552. ExtSrcTyBits),
  1553. ExtDstTy),
  1554. Cond);
  1555. } else if ((N1C->isNullValue() || N1C->getAPIntValue() == 1) &&
  1556. (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
  1557. // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC
  1558. if (N0.getOpcode() == ISD::SETCC) {
  1559. bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (N1C->getZExtValue() != 1);
  1560. if (TrueWhenTrue)
  1561. return N0;
  1562. // Invert the condition.
  1563. ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
  1564. CC = ISD::getSetCCInverse(CC,
  1565. N0.getOperand(0).getValueType().isInteger());
  1566. return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
  1567. }
  1568. if ((N0.getOpcode() == ISD::XOR ||
  1569. (N0.getOpcode() == ISD::AND &&
  1570. N0.getOperand(0).getOpcode() == ISD::XOR &&
  1571. N0.getOperand(1) == N0.getOperand(0).getOperand(1))) &&
  1572. isa<ConstantSDNode>(N0.getOperand(1)) &&
  1573. cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue() == 1) {
  1574. // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We
  1575. // can only do this if the top bits are known zero.
  1576. unsigned BitWidth = N0.getValueSizeInBits();
  1577. if (DAG.MaskedValueIsZero(N0,
  1578. APInt::getHighBitsSet(BitWidth,
  1579. BitWidth-1))) {
  1580. // Okay, get the un-inverted input value.
  1581. SDValue Val;
  1582. if (N0.getOpcode() == ISD::XOR)
  1583. Val = N0.getOperand(0);
  1584. else {
  1585. assert(N0.getOpcode() == ISD::AND &&
  1586. N0.getOperand(0).getOpcode() == ISD::XOR);
  1587. // ((X^1)&1)^1 -> X & 1
  1588. Val = DAG.getNode(ISD::AND, dl, N0.getValueType(),
  1589. N0.getOperand(0).getOperand(0),
  1590. N0.getOperand(1));
  1591. }
  1592. return DAG.getSetCC(dl, VT, Val, N1,
  1593. Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
  1594. }
  1595. }
  1596. }
  1597. APInt MinVal, MaxVal;
  1598. unsigned OperandBitSize = N1C->getValueType(0).getSizeInBits();
  1599. if (ISD::isSignedIntSetCC(Cond)) {
  1600. MinVal = APInt::getSignedMinValue(OperandBitSize);
  1601. MaxVal = APInt::getSignedMaxValue(OperandBitSize);
  1602. } else {
  1603. MinVal = APInt::getMinValue(OperandBitSize);
  1604. MaxVal = APInt::getMaxValue(OperandBitSize);
  1605. }
  1606. // Canonicalize GE/LE comparisons to use GT/LT comparisons.
  1607. if (Cond == ISD::SETGE || Cond == ISD::SETUGE) {
  1608. if (C1 == MinVal) return DAG.getConstant(1, VT); // X >= MIN --> true
  1609. // X >= C0 --> X > (C0-1)
  1610. return DAG.getSetCC(dl, VT, N0,
  1611. DAG.getConstant(C1-1, N1.getValueType()),
  1612. (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT);
  1613. }
  1614. if (Cond == ISD::SETLE || Cond == ISD::SETULE) {
  1615. if (C1 == MaxVal) return DAG.getConstant(1, VT); // X <= MAX --> true
  1616. // X <= C0 --> X < (C0+1)
  1617. return DAG.getSetCC(dl, VT, N0,
  1618. DAG.getConstant(C1+1, N1.getValueType()),
  1619. (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT);
  1620. }
  1621. if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal)
  1622. return DAG.getConstant(0, VT); // X < MIN --> false
  1623. if ((Cond == ISD::SETGE || Cond == ISD::SETUGE) && C1 == MinVal)
  1624. return DAG.getConstant(1, VT); // X >= MIN --> true
  1625. if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal)
  1626. return DAG.getConstant(0, VT); // X > MAX --> false
  1627. if ((Cond == ISD::SETLE || Cond == ISD::SETULE) && C1 == MaxVal)
  1628. return DAG.getConstant(1, VT); // X <= MAX --> true
  1629. // Canonicalize setgt X, Min --> setne X, Min
  1630. if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MinVal)
  1631. return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
  1632. // Canonicalize setlt X, Max --> setne X, Max
  1633. if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MaxVal)
  1634. return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
  1635. // If we have setult X, 1, turn it into seteq X, 0
  1636. if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal+1)
  1637. return DAG.getSetCC(dl, VT, N0,
  1638. DAG.getConstant(MinVal, N0.getValueType()),
  1639. ISD::SETEQ);
  1640. // If we have setugt X, Max-1, turn it into seteq X, Max
  1641. else if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal-1)
  1642. return DAG.getSetCC(dl, VT, N0,
  1643. DAG.getConstant(MaxVal, N0.getValueType()),
  1644. ISD::SETEQ);
  1645. // If we have "setcc X, C0", check to see if we can shrink the immediate
  1646. // by changing cc.
  1647. // SETUGT X, SINTMAX -> SETLT X, 0
  1648. if (Cond == ISD::SETUGT &&
  1649. C1 == APInt::getSignedMaxValue(OperandBitSize))
  1650. return DAG.getSetCC(dl, VT, N0,
  1651. DAG.getConstant(0, N1.getValueType()),
  1652. ISD::SETLT);
  1653. // SETULT X, SINTMIN -> SETGT X, -1
  1654. if (Cond == ISD::SETULT &&
  1655. C1 == APInt::getSignedMinValue(OperandBitSize)) {
  1656. SDValue ConstMinusOne =
  1657. DAG.getConstant(APInt::getAllOnesValue(OperandBitSize),
  1658. N1.getValueType());
  1659. return DAG.getSetCC(dl, VT, N0, ConstMinusOne, ISD::SETGT);
  1660. }
  1661. // Fold bit comparisons when we can.
  1662. if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
  1663. VT == N0.getValueType() && N0.getOpcode() == ISD::AND)
  1664. if (ConstantSDNode *AndRHS =
  1665. dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
  1666. MVT ShiftTy = DCI.isBeforeLegalize() ?
  1667. getPointerTy() : getShiftAmountTy();
  1668. if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3
  1669. // Perform the xform if the AND RHS is a single bit.
  1670. if (isPowerOf2_64(AndRHS->getZExtValue())) {
  1671. return DAG.getNode(ISD::SRL, dl, VT, N0,
  1672. DAG.getConstant(Log2_64(AndRHS->getZExtValue()),
  1673. ShiftTy));
  1674. }
  1675. } else if (Cond == ISD::SETEQ && C1 == AndRHS->getZExtValue()) {
  1676. // (X & 8) == 8 --> (X & 8) >> 3
  1677. // Perform the xform if C1 is a single bit.
  1678. if (C1.isPowerOf2()) {
  1679. return DAG.getNode(ISD::SRL, dl, VT, N0,
  1680. DAG.getConstant(C1.logBase2(), ShiftTy));
  1681. }
  1682. }
  1683. }
  1684. }
  1685. } else if (isa<ConstantSDNode>(N0.getNode())) {
  1686. // Ensure that the constant occurs on the RHS.
  1687. return DAG.getSetCC(dl, VT, N1, N0, ISD::getSetCCSwappedOperands(Cond));
  1688. }
  1689. if (isa<ConstantFPSDNode>(N0.getNode())) {
  1690. // Constant fold or commute setcc.
  1691. SDValue O = DAG.FoldSetCC(VT, N0, N1, Cond, dl);
  1692. if (O.getNode()) return O;
  1693. } else if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1.getNode())) {
  1694. // If the RHS of an FP comparison is a constant, simplify it away in
  1695. // some cases.
  1696. if (CFP->getValueAPF().isNaN()) {
  1697. // If an operand is known to be a nan, we can fold it.
  1698. switch (ISD::getUnorderedFlavor(Cond)) {
  1699. default: LLVM_UNREACHABLE("Unknown flavor!");
  1700. case 0: // Known false.
  1701. return DAG.getConstant(0, VT);
  1702. case 1: // Known true.
  1703. return DAG.getConstant(1, VT);
  1704. case 2: // Undefined.
  1705. return DAG.getUNDEF(VT);
  1706. }
  1707. }
  1708. // Otherwise, we know the RHS is not a NaN. Simplify the node to drop the
  1709. // constant if knowing that the operand is non-nan is enough. We prefer to
  1710. // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to
  1711. // materialize 0.0.
  1712. if (Cond == ISD::SETO || Cond == ISD::SETUO)
  1713. return DAG.getSetCC(dl, VT, N0, N0, Cond);
  1714. }
  1715. if (N0 == N1) {
  1716. // We can always fold X == X for integer setcc's.
  1717. if (N0.getValueType().isInteger())
  1718. return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT);
  1719. unsigned UOF = ISD::getUnorderedFlavor(Cond);
  1720. if (UOF == 2) // FP operators that are undefined on NaNs.
  1721. return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT);
  1722. if (UOF == unsigned(ISD::isTrueWhenEqual(Cond)))
  1723. return DAG.getConstant(UOF, VT);
  1724. // Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO
  1725. // if it is not already.
  1726. ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO;
  1727. if (NewCond != Cond)
  1728. return DAG.getSetCC(dl, VT, N0, N1, NewCond);
  1729. }
  1730. if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
  1731. N0.getValueType().isInteger()) {
  1732. if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB ||
  1733. N0.getOpcode() == ISD::XOR) {
  1734. // Simplify (X+Y) == (X+Z) --> Y == Z
  1735. if (N0.getOpcode() == N1.getOpcode()) {
  1736. if (N0.getOperand(0) == N1.getOperand(0))
  1737. return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(1), Cond);
  1738. if (N0.getOperand(1) == N1.getOperand(1))
  1739. return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(0), Cond);
  1740. if (DAG.isCommutativeBinOp(N0.getOpcode())) {
  1741. // If X op Y == Y op X, try other combinations.
  1742. if (N0.getOperand(0) == N1.getOperand(1))
  1743. return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(0),
  1744. Cond);
  1745. if (N0.getOperand(1) == N1.getOperand(0))
  1746. return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(1),
  1747. Cond);
  1748. }
  1749. }
  1750. if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(N1)) {
  1751. if (ConstantSDNode *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
  1752. // Turn (X+C1) == C2 --> X == C2-C1
  1753. if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) {
  1754. return DAG.getSetCC(dl, VT, N0.getOperand(0),
  1755. DAG.getConstant(RHSC->getAPIntValue()-
  1756. LHSR->getAPIntValue(),
  1757. N0.getValueType()), Cond);
  1758. }
  1759. // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0.
  1760. if (N0.getOpcode() == ISD::XOR)
  1761. // If we know that all of the inverted bits are zero, don't bother
  1762. // performing the inversion.
  1763. if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getAPIntValue()))
  1764. return
  1765. DAG.getSetCC(dl, VT, N0.getOperand(0),
  1766. DAG.getConstant(LHSR->getAPIntValue() ^
  1767. RHSC->getAPIntValue(),
  1768. N0.getValueType()),
  1769. Cond);
  1770. }
  1771. // Turn (C1-X) == C2 --> X == C1-C2
  1772. if (ConstantSDNode *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) {
  1773. if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) {
  1774. return
  1775. DAG.getSetCC(dl, VT, N0.getOperand(1),
  1776. DAG.getConstant(SUBC->getAPIntValue() -
  1777. RHSC->getAPIntValue(),
  1778. N0.getValueType()),
  1779. Cond);
  1780. }
  1781. }
  1782. }
  1783. // Simplify (X+Z) == X --> Z == 0
  1784. if (N0.getOperand(0) == N1)
  1785. return DAG.getSetCC(dl, VT, N0.getOperand(1),
  1786. DAG.getConstant(0, N0.getValueType()), Cond);
  1787. if (N0.getOperand(1) == N1) {
  1788. if (DAG.isCommutativeBinOp(N0.getOpcode()))
  1789. return DAG.getSetCC(dl, VT, N0.getOperand(0),
  1790. DAG.getConstant(0, N0.getValueType()), Cond);
  1791. else if (N0.getNode()->hasOneUse()) {
  1792. assert(N0.getOpcode() == ISD::SUB && "Unexpected operation!");
  1793. // (Z-X) == X --> Z == X<<1
  1794. SDValue SH = DAG.getNode(ISD::SHL, dl, N1.getValueType(),
  1795. N1,
  1796. DAG.getConstant(1, getShiftAmountTy()));
  1797. if (!DCI.isCalledByLegalizer())
  1798. DCI.AddToWorklist(SH.getNode());
  1799. return DAG.getSetCC(dl, VT, N0.getOperand(0), SH, Cond);
  1800. }
  1801. }
  1802. }
  1803. if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB ||
  1804. N1.getOpcode() == ISD::XOR) {
  1805. // Simplify X == (X+Z) --> Z == 0
  1806. if (N1.getOperand(0) == N0) {
  1807. return DAG.getSetCC(dl, VT, N1.getOperand(1),
  1808. DAG.getConstant(0, N1.getValueType()), Cond);
  1809. } else if (N1.getOperand(1) == N0) {
  1810. if (DAG.isCommutativeBinOp(N1.getOpcode())) {
  1811. return DAG.getSetCC(dl, VT, N1.getOperand(0),
  1812. DAG.getConstant(0, N1.getValueType()), Cond);
  1813. } else if (N1.getNode()->hasOneUse()) {
  1814. assert(N1.getOpcode() == ISD::SUB && "Unexpected operation!");
  1815. // X == (Z-X) --> X<<1 == Z
  1816. SDValue SH = DAG.getNode(ISD::SHL, dl, N1.getValueType(), N0,
  1817. DAG.getConstant(1, getShiftAmountTy()));
  1818. if (!DCI.isCalledByLegalizer())
  1819. DCI.AddToWorklist(SH.getNode());
  1820. return DAG.getSetCC(dl, VT, SH, N1.getOperand(0), Cond);
  1821. }
  1822. }
  1823. }
  1824. // Simplify x&y == y to x&y != 0 if y has exactly one bit set.
  1825. // Note that where y is variable and is known to have at most
  1826. // one bit set (for example, if it is z&1) we cannot do this;
  1827. // the expressions are not equivalent when y==0.
  1828. if (N0.getOpcode() == ISD::AND)
  1829. if (N0.getOperand(0) == N1 || N0.getOperand(1) == N1) {
  1830. if (ValueHasExactlyOneBitSet(N1, DAG)) {
  1831. Cond = ISD::getSetCCInverse(Cond, /*isInteger=*/true);
  1832. SDValue Zero = DAG.getConstant(0, N1.getValueType());
  1833. return DAG.getSetCC(dl, VT, N0, Zero, Cond);
  1834. }
  1835. }
  1836. if (N1.getOpcode() == ISD::AND)
  1837. if (N1.getOperand(0) == N0 || N1.getOperand(1) == N0) {
  1838. if (ValueHasExactlyOneBitSet(N0, DAG)) {
  1839. Cond = ISD::getSetCCInverse(Cond, /*isInteger=*/true);
  1840. SDValue Zero = DAG.getConstant(0, N0.getValueType());
  1841. return DAG.getSetCC(dl, VT, N1, Zero, Cond);
  1842. }
  1843. }
  1844. }
  1845. // Fold away ALL boolean setcc's.
  1846. SDValue Temp;
  1847. if (N0.getValueType() == MVT::i1 && foldBooleans) {
  1848. switch (Cond) {
  1849. default: LLVM_UNREACHABLE("Unknown integer setcc!");
  1850. case ISD::SETEQ: // X == Y -> ~(X^Y)
  1851. Temp = DAG.getNode(ISD::XOR, dl, MVT::i1, N0, N1);
  1852. N0 = DAG.getNOT(dl, Temp, MVT::i1);
  1853. if (!DCI.isCalledByLegalizer())
  1854. DCI.AddToWorklist(Temp.getNode());
  1855. break;
  1856. case ISD::SETNE: // X != Y --> (X^Y)
  1857. N0 = DAG.getNode(ISD::XOR, dl, MVT::i1, N0, N1);
  1858. break;
  1859. case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> ~X & Y
  1860. case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> ~X & Y
  1861. Temp = DAG.getNOT(dl, N0, MVT::i1);
  1862. N0 = DAG.getNode(ISD::AND, dl, MVT::i1, N1, Temp);
  1863. if (!DCI.isCalledByLegalizer())
  1864. DCI.AddToWorklist(Temp.getNode());
  1865. break;
  1866. case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> ~Y & X
  1867. case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> ~Y & X
  1868. Temp = DAG.getNOT(dl, N1, MVT::i1);
  1869. N0 = DAG.getNode(ISD::AND, dl, MVT::i1, N0, Temp);
  1870. if (!DCI.isCalledByLegalizer())
  1871. DCI.AddToWorklist(Temp.getNode());
  1872. break;
  1873. case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> ~X | Y
  1874. case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> ~X | Y
  1875. Temp = DAG.getNOT(dl, N0, MVT::i1);
  1876. N0 = DAG.getNode(ISD::OR, dl, MVT::i1, N1, Temp);
  1877. if (!DCI.isCalledByLegalizer())
  1878. DCI.AddToWorklist(Temp.getNode());
  1879. break;
  1880. case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> ~Y | X
  1881. case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> ~Y | X
  1882. Temp = DAG.getNOT(dl, N1, MVT::i1);
  1883. N0 = DAG.getNode(ISD::OR, dl, MVT::i1, N0, Temp);
  1884. break;
  1885. }
  1886. if (VT != MVT::i1) {
  1887. if (!DCI.isCalledByLegalizer())
  1888. DCI.AddToWorklist(N0.getNode());
  1889. // FIXME: If running after legalize, we probably can't do this.
  1890. N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, N0);
  1891. }
  1892. return N0;
  1893. }
  1894. // Could not fold it.
  1895. return SDValue();
  1896. }
  1897. /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
  1898. /// node is a GlobalAddress + offset.
  1899. bool TargetLowering::isGAPlusOffset(SDNode *N, GlobalValue* &GA,
  1900. int64_t &Offset) const {
  1901. if (isa<GlobalAddressSDNode>(N)) {
  1902. GlobalAddressSDNode *GASD = cast<GlobalAddressSDNode>(N);
  1903. GA = GASD->getGlobal();
  1904. Offset += GASD->getOffset();
  1905. return true;
  1906. }
  1907. if (N->getOpcode() == ISD::ADD) {
  1908. SDValue N1 = N->getOperand(0);
  1909. SDValue N2 = N->getOperand(1);
  1910. if (isGAPlusOffset(N1.getNode(), GA, Offset)) {
  1911. ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2);
  1912. if (V) {
  1913. Offset += V->getSExtValue();
  1914. return true;
  1915. }
  1916. } else if (isGAPlusOffset(N2.getNode(), GA, Offset)) {
  1917. ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1);
  1918. if (V) {
  1919. Offset += V->getSExtValue();
  1920. return true;
  1921. }
  1922. }
  1923. }
  1924. return false;
  1925. }
  1926. /// isConsecutiveLoad - Return true if LD is loading 'Bytes' bytes from a
  1927. /// location that is 'Dist' units away from the location that the 'Base' load
  1928. /// is loading from.
  1929. bool TargetLowering::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
  1930. unsigned Bytes, int Dist,
  1931. const MachineFrameInfo *MFI) const {
  1932. if (LD->getChain() != Base->getChain())
  1933. return false;
  1934. MVT VT = LD->getValueType(0);
  1935. if (VT.getSizeInBits() / 8 != Bytes)
  1936. return false;
  1937. SDValue Loc = LD->getOperand(1);
  1938. SDValue BaseLoc = Base->getOperand(1);
  1939. if (Loc.getOpcode() == ISD::FrameIndex) {
  1940. if (BaseLoc.getOpcode() != ISD::FrameIndex)
  1941. return false;
  1942. int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
  1943. int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
  1944. int FS = MFI->getObjectSize(FI);
  1945. int BFS = MFI->getObjectSize(BFI);
  1946. if (FS != BFS || FS != (int)Bytes) return false;
  1947. return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes);
  1948. }
  1949. if (Loc.getOpcode() == ISD::ADD && Loc.getOperand(0) == BaseLoc) {
  1950. ConstantSDNode *V = dyn_cast<ConstantSDNode>(Loc.getOperand(1));
  1951. if (V && (V->getSExtValue() == Dist*Bytes))
  1952. return true;
  1953. }
  1954. GlobalValue *GV1 = NULL;
  1955. GlobalValue *GV2 = NULL;
  1956. int64_t Offset1 = 0;
  1957. int64_t Offset2 = 0;
  1958. bool isGA1 = isGAPlusOffset(Loc.getNode(), GV1, Offset1);
  1959. bool isGA2 = isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
  1960. if (isGA1 && isGA2 && GV1 == GV2)
  1961. return Offset1 == (Offset2 + Dist*Bytes);
  1962. return false;
  1963. }
  1964. SDValue TargetLowering::
  1965. PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const {
  1966. // Default implementation: no optimization.
  1967. return SDValue();
  1968. }
  1969. //===----------------------------------------------------------------------===//
  1970. // Inline Assembler Implementation Methods
  1971. //===----------------------------------------------------------------------===//
  1972. TargetLowering::ConstraintType
  1973. TargetLowering::getConstraintType(const std::string &Constraint) const {
  1974. // FIXME: lots more standard ones to handle.
  1975. if (Constraint.size() == 1) {
  1976. switch (Constraint[0]) {
  1977. default: break;
  1978. case 'r': return C_RegisterClass;
  1979. case 'm': // memory
  1980. case 'o': // offsetable
  1981. case 'V': // not offsetable
  1982. return C_Memory;
  1983. case 'i': // Simple Integer or Relocatable Constant
  1984. case 'n': // Simple Integer
  1985. case 's': // Relocatable Constant
  1986. case 'X': // Allow ANY value.
  1987. case 'I': // Target registers.
  1988. case 'J':
  1989. case 'K':
  1990. case 'L':
  1991. case 'M':
  1992. case 'N':
  1993. case 'O':
  1994. case 'P':
  1995. return C_Other;
  1996. }
  1997. }
  1998. if (Constraint.size() > 1 && Constraint[0] == '{' &&
  1999. Constraint[Constraint.size()-1] == '}')
  2000. return C_Register;
  2001. return C_Unknown;
  2002. }
  2003. /// LowerXConstraint - try to replace an X constraint, which matches anything,
  2004. /// with another that has more specific requirements based on the type of the
  2005. /// corresponding operand.
  2006. const char *TargetLowering::LowerXConstraint(MVT ConstraintVT) const{
  2007. if (ConstraintVT.isInteger())
  2008. return "r";
  2009. if (ConstraintVT.isFloatingPoint())
  2010. return "f"; // works for many targets
  2011. return 0;
  2012. }
  2013. /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
  2014. /// vector. If it is invalid, don't add anything to Ops.
  2015. void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
  2016. char ConstraintLetter,
  2017. bool hasMemory,
  2018. std::vector<SDValue> &Ops,
  2019. SelectionDAG &DAG) const {
  2020. switch (ConstraintLetter) {
  2021. default: break;
  2022. case 'X': // Allows any operand; labels (basic block) use this.
  2023. if (Op.getOpcode() == ISD::BasicBlock) {
  2024. Ops.push_back(Op);
  2025. return;
  2026. }
  2027. // fall through
  2028. case 'i': // Simple Integer or Relocatable Constant
  2029. case 'n': // Simple Integer
  2030. case 's': { // Relocatable Constant
  2031. // These operands are interested in values of the form (GV+C), where C may
  2032. // be folded in as an offset of GV, or it may be explicitly added. Also, it
  2033. // is possible and fine if either GV or C are missing.
  2034. ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
  2035. GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op);
  2036. // If we have "(add GV, C)", pull out GV/C
  2037. if (Op.getOpcode() == ISD::ADD) {
  2038. C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
  2039. GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0));
  2040. if (C == 0 || GA == 0) {
  2041. C = dyn_cast<ConstantSDNode>(Op.getOperand(0));
  2042. GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(1));
  2043. }
  2044. if (C == 0 || GA == 0)
  2045. C = 0, GA = 0;
  2046. }
  2047. // If we find a valid operand, map to the TargetXXX version so that the
  2048. // value itself doesn't get selected.
  2049. if (GA) { // Either &GV or &GV+C
  2050. if (ConstraintLetter != 'n') {
  2051. int64_t Offs = GA->getOffset();
  2052. if (C) Offs += C->getZExtValue();
  2053. Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(),
  2054. Op.getValueType(), Offs));
  2055. return;
  2056. }
  2057. }
  2058. if (C) { // just C, no GV.
  2059. // Simple constants are not allowed for 's'.
  2060. if (ConstraintLetter != 's') {
  2061. // gcc prints these as sign extended. Sign extend value to 64 bits
  2062. // now; without this it would get ZExt'd later in
  2063. // ScheduleDAGSDNodes::EmitNode, which is very generic.
  2064. Ops.push_back(DAG.getTargetConstant(C->getAPIntValue().getSExtValue(),
  2065. MVT::i64));
  2066. return;
  2067. }
  2068. }
  2069. break;
  2070. }
  2071. }
  2072. }
  2073. std::vector<unsigned> TargetLowering::
  2074. getRegClassForInlineAsmConstraint(const std::string &Constraint,
  2075. MVT VT) const {
  2076. return std::vector<unsigned>();
  2077. }
  2078. std::pair<unsigned, const TargetRegisterClass*> TargetLowering::
  2079. getRegForInlineAsmConstraint(const std::string &Constraint,
  2080. MVT VT) const {
  2081. if (Constraint[0] != '{')
  2082. return std::pair<unsigned, const TargetRegisterClass*>(0, 0);
  2083. assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?");
  2084. // Remove the braces from around the name.
  2085. std::string RegName(Constraint.begin()+1, Constraint.end()-1);
  2086. // Figure out which register class contains this reg.
  2087. const TargetRegisterInfo *RI = TM.getRegisterInfo();
  2088. for (TargetRegisterInfo::regclass_iterator RCI = RI->regclass_begin(),
  2089. E = RI->regclass_end(); RCI != E; ++RCI) {
  2090. const TargetRegisterClass *RC = *RCI;
  2091. // If none of the the value types for this register class are valid, we
  2092. // can't use it. For example, 64-bit reg classes on 32-bit targets.
  2093. bool isLegal = false;
  2094. for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
  2095. I != E; ++I) {
  2096. if (isTypeLegal(*I)) {
  2097. isLegal = true;
  2098. break;
  2099. }
  2100. }
  2101. if (!isLegal) continue;
  2102. for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
  2103. I != E; ++I) {
  2104. if (StringsEqualNoCase(RegName, RI->get(*I).AsmName))
  2105. return std::make_pair(*I, RC);
  2106. }
  2107. }
  2108. return std::pair<unsigned, const TargetRegisterClass*>(0, 0);
  2109. }
  2110. //===----------------------------------------------------------------------===//
  2111. // Constraint Selection.
  2112. /// isMatchingInputConstraint - Return true of this is an input operand that is
  2113. /// a matching constraint like "4".
  2114. bool TargetLowering::AsmOperandInfo::isMatchingInputConstraint() const {
  2115. assert(!ConstraintCode.empty() && "No known constraint!");
  2116. return isdigit(ConstraintCode[0]);
  2117. }
  2118. /// getMatchedOperand - If this is an input matching constraint, this method
  2119. /// returns the output operand it matches.
  2120. unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const {
  2121. assert(!ConstraintCode.empty() && "No known constraint!");
  2122. return atoi(ConstraintCode.c_str());
  2123. }
  2124. /// getConstraintGenerality - Return an integer indicating how general CT
  2125. /// is.
  2126. static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
  2127. switch (CT) {
  2128. default: LLVM_UNREACHABLE("Unknown constraint type!");
  2129. case TargetLowering::C_Other:
  2130. case TargetLowering::C_Unknown:
  2131. return 0;
  2132. case TargetLowering::C_Register:
  2133. return 1;
  2134. case TargetLowering::C_RegisterClass:
  2135. return 2;
  2136. case TargetLowering::C_Memory:
  2137. return 3;
  2138. }
  2139. }
  2140. /// ChooseConstraint - If there are multiple different constraints that we
  2141. /// could pick for this operand (e.g. "imr") try to pick the 'best' one.
  2142. /// This is somewhat tricky: constraints fall into four classes:
  2143. /// Other -> immediates and magic values
  2144. /// Register -> one specific register
  2145. /// RegisterClass -> a group of regs
  2146. /// Memory -> memory
  2147. /// Ideally, we would pick the most specific constraint possible: if we have
  2148. /// something that fits into a register, we would pick it. The problem here
  2149. /// is that if we have something that could either be in a register or in
  2150. /// memory that use of the register could cause selection of *other*
  2151. /// operands to fail: they might only succeed if we pick memory. Because of
  2152. /// this the heuristic we use is:
  2153. ///
  2154. /// 1) If there is an 'other' constraint, and if the operand is valid for
  2155. /// that constraint, use it. This makes us take advantage of 'i'
  2156. /// constraints when available.
  2157. /// 2) Otherwise, pick the most general constraint present. This prefers
  2158. /// 'm' over 'r', for example.
  2159. ///
  2160. static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
  2161. bool hasMemory, const TargetLowering &TLI,
  2162. SDValue Op, SelectionDAG *DAG) {
  2163. assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options");
  2164. unsigned BestIdx = 0;
  2165. TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown;
  2166. int BestGenerality = -1;
  2167. // Loop over the options, keeping track of the most general one.
  2168. for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) {
  2169. TargetLowering::ConstraintType CType =
  2170. TLI.getConstraintType(OpInfo.Codes[i]);
  2171. // If this is an 'other' constraint, see if the operand is valid for it.
  2172. // For example, on X86 we might have an 'rI' constraint. If the operand
  2173. // is an integer in the range [0..31] we want to use I (saving a load
  2174. // of a register), otherwise we must use 'r'.
  2175. if (CType == TargetLowering::C_Other && Op.getNode()) {
  2176. assert(OpInfo.Codes[i].size() == 1 &&
  2177. "Unhandled multi-letter 'other' constraint");
  2178. std::vector<SDValue> ResultOps;
  2179. TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i][0], hasMemory,
  2180. ResultOps, *DAG);
  2181. if (!ResultOps.empty()) {
  2182. BestType = CType;
  2183. BestIdx = i;
  2184. break;
  2185. }
  2186. }
  2187. // This constraint letter is more general than the previous one, use it.
  2188. int Generality = getConstraintGenerality(CType);
  2189. if (Generality > BestGenerality) {
  2190. BestType = CType;
  2191. BestIdx = i;
  2192. BestGenerality = Generality;
  2193. }
  2194. }
  2195. OpInfo.ConstraintCode = OpInfo.Codes[BestIdx];
  2196. OpInfo.ConstraintType = BestType;
  2197. }
  2198. /// ComputeConstraintToUse - Determines the constraint code and constraint
  2199. /// type to use for the specific AsmOperandInfo, setting
  2200. /// OpInfo.ConstraintCode and OpInfo.ConstraintType.
  2201. void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
  2202. SDValue Op,
  2203. bool hasMemory,
  2204. SelectionDAG *DAG) const {
  2205. assert(!OpInfo.Codes.empty() && "Must have at least one constraint");
  2206. // Single-letter constraints ('r') are very common.
  2207. if (OpInfo.Codes.size() == 1) {
  2208. OpInfo.ConstraintCode = OpInfo.Codes[0];
  2209. OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
  2210. } else {
  2211. ChooseConstraint(OpInfo, hasMemory, *this, Op, DAG);
  2212. }
  2213. // 'X' matches anything.
  2214. if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) {
  2215. // Look through bitcasts over functions. In the context of an asm
  2216. // argument we don't care about bitcasting function types; the parameters
  2217. // to the function, if any, will have been handled elsewhere.
  2218. Value *v = OpInfo.CallOperandVal;
  2219. ConstantExpr *CE = NULL;
  2220. while ((CE = dyn_cast<ConstantExpr>(v)) &&
  2221. CE->getOpcode()==Instruction::BitCast)
  2222. v = CE->getOperand(0);
  2223. if (!isa<Function>(v))
  2224. v = OpInfo.CallOperandVal;
  2225. // Labels and constants are handled elsewhere ('X' is the only thing
  2226. // that matches labels). For Functions, the type here is the type of
  2227. // the result, which is not what we want to look at; leave them alone
  2228. // (minus any bitcasts).
  2229. if (isa<BasicBlock>(v) || isa<ConstantInt>(v) || isa<Function>(v)) {
  2230. OpInfo.CallOperandVal = v;
  2231. return;
  2232. }
  2233. // Otherwise, try to resolve it to something we know about by looking at
  2234. // the actual operand type.
  2235. if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) {
  2236. OpInfo.ConstraintCode = Repl;
  2237. OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
  2238. }
  2239. }
  2240. }
  2241. //===----------------------------------------------------------------------===//
  2242. // Loop Strength Reduction hooks
  2243. //===----------------------------------------------------------------------===//
  2244. /// isLegalAddressingMode - Return true if the addressing mode represented
  2245. /// by AM is legal for this target, for a load/store of the specified type.
  2246. bool TargetLowering::isLegalAddressingMode(const AddrMode &AM,
  2247. const Type *Ty) const {
  2248. // The default implementation of this implements a conservative RISCy, r+r and
  2249. // r+i addr mode.
  2250. // Allows a sign-extended 16-bit immediate field.
  2251. if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
  2252. return false;
  2253. // No global is ever allowed as a base.
  2254. if (AM.BaseGV)
  2255. return false;
  2256. // Only support r+r,
  2257. switch (AM.Scale) {
  2258. case 0: // "r+i" or just "i", depending on HasBaseReg.
  2259. break;
  2260. case 1:
  2261. if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
  2262. return false;
  2263. // Otherwise we have r+r or r+i.
  2264. break;
  2265. case 2:
  2266. if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
  2267. return false;
  2268. // Allow 2*r as r+r.
  2269. break;
  2270. }
  2271. return true;
  2272. }
  2273. /// BuildSDIVSequence - Given an ISD::SDIV node expressing a divide by constant,
  2274. /// return a DAG expression to select that will generate the same value by
  2275. /// multiplying by a magic number. See:
  2276. /// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
  2277. SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
  2278. std::vector<SDNode*>* Created) const {
  2279. MVT VT = N->getValueType(0);
  2280. DebugLoc dl= N->getDebugLoc();
  2281. // Check to see if we can do this.
  2282. // FIXME: We should be more aggressive here.
  2283. if (!isTypeLegal(VT))
  2284. return SDValue();
  2285. APInt d = cast<ConstantSDNode>(N->getOperand(1))->getAPIntValue();
  2286. APInt::ms magics = d.magic();
  2287. // Multiply the numerator (operand 0) by the magic value
  2288. // FIXME: We should support doing a MUL in a wider type
  2289. SDValue Q;
  2290. if (isOperationLegalOrCustom(ISD::MULHS, VT))
  2291. Q = DAG.getNode(ISD::MULHS, dl, VT, N->getOperand(0),
  2292. DAG.getConstant(magics.m, VT));
  2293. else if (isOperationLegalOrCustom(ISD::SMUL_LOHI, VT))
  2294. Q = SDValue(DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT),
  2295. N->getOperand(0),
  2296. DAG.getConstant(magics.m, VT)).getNode(), 1);
  2297. else
  2298. return SDValue(); // No mulhs or equvialent
  2299. // If d > 0 and m < 0, add the numerator
  2300. if (d.isStrictlyPositive() && magics.m.isNegative()) {
  2301. Q = DAG.getNode(ISD::ADD, dl, VT, Q, N->getOperand(0));
  2302. if (Created)
  2303. Created->push_back(Q.getNode());
  2304. }
  2305. // If d < 0 and m > 0, subtract the numerator.
  2306. if (d.isNegative() && magics.m.isStrictlyPositive()) {
  2307. Q = DAG.getNode(ISD::SUB, dl, VT, Q, N->getOperand(0));
  2308. if (Created)
  2309. Created->push_back(Q.getNode());
  2310. }
  2311. // Shift right algebraic if shift value is nonzero
  2312. if (magics.s > 0) {
  2313. Q = DAG.getNode(ISD::SRA, dl, VT, Q,
  2314. DAG.getConstant(magics.s, getShiftAmountTy()));
  2315. if (Created)
  2316. Created->push_back(Q.getNode());
  2317. }
  2318. // Extract the sign bit and add it to the quotient
  2319. SDValue T =
  2320. DAG.getNode(ISD::SRL, dl, VT, Q, DAG.getConstant(VT.getSizeInBits()-1,
  2321. getShiftAmountTy()));
  2322. if (Created)
  2323. Created->push_back(T.getNode());
  2324. return DAG.getNode(ISD::ADD, dl, VT, Q, T);
  2325. }
  2326. /// BuildUDIVSequence - Given an ISD::UDIV node expressing a divide by constant,
  2327. /// return a DAG expression to select that will generate the same value by
  2328. /// multiplying by a magic number. See:
  2329. /// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
  2330. SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG,
  2331. std::vector<SDNode*>* Created) const {
  2332. MVT VT = N->getValueType(0);
  2333. DebugLoc dl = N->getDebugLoc();
  2334. // Check to see if we can do this.
  2335. // FIXME: We should be more aggressive here.
  2336. if (!isTypeLegal(VT))
  2337. return SDValue();
  2338. // FIXME: We should use a narrower constant when the upper
  2339. // bits are known to be zero.
  2340. ConstantSDNode *N1C = cast<ConstantSDNode>(N->getOperand(1));
  2341. APInt::mu magics = N1C->getAPIntValue().magicu();
  2342. // Multiply the numerator (operand 0) by the magic value
  2343. // FIXME: We should support doing a MUL in a wider type
  2344. SDValue Q;
  2345. if (isOperationLegalOrCustom(ISD::MULHU, VT))
  2346. Q = DAG.getNode(ISD::MULHU, dl, VT, N->getOperand(0),
  2347. DAG.getConstant(magics.m, VT));
  2348. else if (isOperationLegalOrCustom(ISD::UMUL_LOHI, VT))
  2349. Q = SDValue(DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(VT, VT),
  2350. N->getOperand(0),
  2351. DAG.getConstant(magics.m, VT)).getNode(), 1);
  2352. else
  2353. return SDValue(); // No mulhu or equvialent
  2354. if (Created)
  2355. Created->push_back(Q.getNode());
  2356. if (magics.a == 0) {
  2357. assert(magics.s < N1C->getAPIntValue().getBitWidth() &&
  2358. "We shouldn't generate an undefined shift!");
  2359. return DAG.getNode(ISD::SRL, dl, VT, Q,
  2360. DAG.getConstant(magics.s, getShiftAmountTy()));
  2361. } else {
  2362. SDValue NPQ = DAG.getNode(ISD::SUB, dl, VT, N->getOperand(0), Q);
  2363. if (Created)
  2364. Created->push_back(NPQ.getNode());
  2365. NPQ = DAG.getNode(ISD::SRL, dl, VT, NPQ,
  2366. DAG.getConstant(1, getShiftAmountTy()));
  2367. if (Created)
  2368. Created->push_back(NPQ.getNode());
  2369. NPQ = DAG.getNode(ISD::ADD, dl, VT, NPQ, Q);
  2370. if (Created)
  2371. Created->push_back(NPQ.getNode());
  2372. return DAG.getNode(ISD::SRL, dl, VT, NPQ,
  2373. DAG.getConstant(magics.s-1, getShiftAmountTy()));
  2374. }
  2375. }
  2376. /// IgnoreHarmlessInstructions - Ignore instructions between a CALL and RET
  2377. /// node that don't prevent tail call optimization.
  2378. static SDValue IgnoreHarmlessInstructions(SDValue node) {
  2379. // Found call return.
  2380. if (node.getOpcode() == ISD::CALL) return node;
  2381. // Ignore MERGE_VALUES. Will have at least one operand.
  2382. if (node.getOpcode() == ISD::MERGE_VALUES)
  2383. return IgnoreHarmlessInstructions(node.getOperand(0));
  2384. // Ignore ANY_EXTEND node.
  2385. if (node.getOpcode() == ISD::ANY_EXTEND)
  2386. return IgnoreHarmlessInstructions(node.getOperand(0));
  2387. if (node.getOpcode() == ISD::TRUNCATE)
  2388. return IgnoreHarmlessInstructions(node.getOperand(0));
  2389. // Any other node type.
  2390. return node;
  2391. }
  2392. bool TargetLowering::CheckTailCallReturnConstraints(CallSDNode *TheCall,
  2393. SDValue Ret) {
  2394. unsigned NumOps = Ret.getNumOperands();
  2395. // ISD::CALL results:(value0, ..., valuen, chain)
  2396. // ISD::RET operands:(chain, value0, flag0, ..., valuen, flagn)
  2397. // Value return:
  2398. // Check that operand of the RET node sources from the CALL node. The RET node
  2399. // has at least two operands. Operand 0 holds the chain. Operand 1 holds the
  2400. // value.
  2401. // Also we need to check that there is no code in between the call and the
  2402. // return. Hence we also check that the incomming chain to the return sources
  2403. // from the outgoing chain of the call.
  2404. if (NumOps > 1 &&
  2405. IgnoreHarmlessInstructions(Ret.getOperand(1)) == SDValue(TheCall,0) &&
  2406. Ret.getOperand(0) == SDValue(TheCall, TheCall->getNumValues()-1))
  2407. return true;
  2408. // void return: The RET node has the chain result value of the CALL node as
  2409. // input.
  2410. if (NumOps == 1 &&
  2411. Ret.getOperand(0) == SDValue(TheCall, TheCall->getNumValues()-1))
  2412. return true;
  2413. return false;
  2414. }