CodeGenFunction.cpp 73 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967
  1. //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This coordinates the per-function state used while generating code.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "CodeGenFunction.h"
  14. #include "CGBlocks.h"
  15. #include "CGCleanup.h"
  16. #include "CGCUDARuntime.h"
  17. #include "CGCXXABI.h"
  18. #include "CGDebugInfo.h"
  19. #include "CGOpenMPRuntime.h"
  20. #include "CodeGenModule.h"
  21. #include "CodeGenPGO.h"
  22. #include "TargetInfo.h"
  23. #include "clang/AST/ASTContext.h"
  24. #include "clang/AST/Decl.h"
  25. #include "clang/AST/DeclCXX.h"
  26. #include "clang/AST/StmtCXX.h"
  27. #include "clang/Basic/Builtins.h"
  28. #include "clang/Basic/TargetInfo.h"
  29. #include "clang/CodeGen/CGFunctionInfo.h"
  30. #include "clang/Frontend/CodeGenOptions.h"
  31. #include "clang/Sema/SemaDiagnostic.h"
  32. #include "llvm/IR/DataLayout.h"
  33. #include "llvm/IR/Intrinsics.h"
  34. #include "llvm/IR/MDBuilder.h"
  35. #include "llvm/IR/Operator.h"
  36. using namespace clang;
  37. using namespace CodeGen;
  38. CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
  39. : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
  40. Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
  41. CGBuilderInserterTy(this)),
  42. CurFn(nullptr), ReturnValue(Address::invalid()),
  43. CapturedStmtInfo(nullptr),
  44. SanOpts(CGM.getLangOpts().Sanitize), IsSanitizerScope(false),
  45. CurFuncIsThunk(false), AutoreleaseResult(false), SawAsmBlock(false),
  46. IsOutlinedSEHHelper(false),
  47. BlockInfo(nullptr), BlockPointer(nullptr),
  48. LambdaThisCaptureField(nullptr), NormalCleanupDest(nullptr),
  49. NextCleanupDestIndex(1), FirstBlockInfo(nullptr), EHResumeBlock(nullptr),
  50. ExceptionSlot(nullptr), EHSelectorSlot(nullptr),
  51. DebugInfo(CGM.getModuleDebugInfo()),
  52. DisableDebugInfo(false), DidCallStackSave(false), IndirectBranch(nullptr),
  53. PGO(cgm), SwitchInsn(nullptr), SwitchWeights(nullptr),
  54. CaseRangeBlock(nullptr), UnreachableBlock(nullptr), NumReturnExprs(0),
  55. NumSimpleReturnExprs(0), CXXABIThisDecl(nullptr),
  56. CXXABIThisValue(nullptr), CXXThisValue(nullptr),
  57. CXXStructorImplicitParamDecl(nullptr),
  58. CXXStructorImplicitParamValue(nullptr), OutermostConditional(nullptr),
  59. CurLexicalScope(nullptr), TerminateLandingPad(nullptr),
  60. TerminateHandler(nullptr), TrapBB(nullptr) {
  61. if (!suppressNewContext)
  62. CGM.getCXXABI().getMangleContext().startNewFunction();
  63. llvm::FastMathFlags FMF;
  64. if (CGM.getLangOpts().FastMath)
  65. FMF.setUnsafeAlgebra();
  66. if (CGM.getLangOpts().FiniteMathOnly) {
  67. FMF.setNoNaNs();
  68. FMF.setNoInfs();
  69. }
  70. if (CGM.getCodeGenOpts().NoNaNsFPMath) {
  71. FMF.setNoNaNs();
  72. }
  73. if (CGM.getCodeGenOpts().NoSignedZeros) {
  74. FMF.setNoSignedZeros();
  75. }
  76. if (CGM.getCodeGenOpts().ReciprocalMath) {
  77. FMF.setAllowReciprocal();
  78. }
  79. Builder.setFastMathFlags(FMF);
  80. }
  81. CodeGenFunction::~CodeGenFunction() {
  82. assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
  83. // If there are any unclaimed block infos, go ahead and destroy them
  84. // now. This can happen if IR-gen gets clever and skips evaluating
  85. // something.
  86. if (FirstBlockInfo)
  87. destroyBlockInfos(FirstBlockInfo);
  88. if (getLangOpts().OpenMP) {
  89. CGM.getOpenMPRuntime().functionFinished(*this);
  90. }
  91. }
  92. CharUnits CodeGenFunction::getNaturalPointeeTypeAlignment(QualType T,
  93. AlignmentSource *Source) {
  94. return getNaturalTypeAlignment(T->getPointeeType(), Source,
  95. /*forPointee*/ true);
  96. }
  97. CharUnits CodeGenFunction::getNaturalTypeAlignment(QualType T,
  98. AlignmentSource *Source,
  99. bool forPointeeType) {
  100. // Honor alignment typedef attributes even on incomplete types.
  101. // We also honor them straight for C++ class types, even as pointees;
  102. // there's an expressivity gap here.
  103. if (auto TT = T->getAs<TypedefType>()) {
  104. if (auto Align = TT->getDecl()->getMaxAlignment()) {
  105. if (Source) *Source = AlignmentSource::AttributedType;
  106. return getContext().toCharUnitsFromBits(Align);
  107. }
  108. }
  109. if (Source) *Source = AlignmentSource::Type;
  110. CharUnits Alignment;
  111. if (T->isIncompleteType()) {
  112. Alignment = CharUnits::One(); // Shouldn't be used, but pessimistic is best.
  113. } else {
  114. // For C++ class pointees, we don't know whether we're pointing at a
  115. // base or a complete object, so we generally need to use the
  116. // non-virtual alignment.
  117. const CXXRecordDecl *RD;
  118. if (forPointeeType && (RD = T->getAsCXXRecordDecl())) {
  119. Alignment = CGM.getClassPointerAlignment(RD);
  120. } else {
  121. Alignment = getContext().getTypeAlignInChars(T);
  122. }
  123. // Cap to the global maximum type alignment unless the alignment
  124. // was somehow explicit on the type.
  125. if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) {
  126. if (Alignment.getQuantity() > MaxAlign &&
  127. !getContext().isAlignmentRequired(T))
  128. Alignment = CharUnits::fromQuantity(MaxAlign);
  129. }
  130. }
  131. return Alignment;
  132. }
  133. LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
  134. AlignmentSource AlignSource;
  135. CharUnits Alignment = getNaturalTypeAlignment(T, &AlignSource);
  136. return LValue::MakeAddr(Address(V, Alignment), T, getContext(), AlignSource,
  137. CGM.getTBAAInfo(T));
  138. }
  139. /// Given a value of type T* that may not be to a complete object,
  140. /// construct an l-value with the natural pointee alignment of T.
  141. LValue
  142. CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
  143. AlignmentSource AlignSource;
  144. CharUnits Align = getNaturalTypeAlignment(T, &AlignSource, /*pointee*/ true);
  145. return MakeAddrLValue(Address(V, Align), T, AlignSource);
  146. }
  147. llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
  148. return CGM.getTypes().ConvertTypeForMem(T);
  149. }
  150. llvm::Type *CodeGenFunction::ConvertType(QualType T) {
  151. return CGM.getTypes().ConvertType(T);
  152. }
  153. TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
  154. type = type.getCanonicalType();
  155. while (true) {
  156. switch (type->getTypeClass()) {
  157. #define TYPE(name, parent)
  158. #define ABSTRACT_TYPE(name, parent)
  159. #define NON_CANONICAL_TYPE(name, parent) case Type::name:
  160. #define DEPENDENT_TYPE(name, parent) case Type::name:
  161. #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
  162. #include "clang/AST/TypeNodes.def"
  163. llvm_unreachable("non-canonical or dependent type in IR-generation");
  164. case Type::Auto:
  165. llvm_unreachable("undeduced auto type in IR-generation");
  166. // Various scalar types.
  167. case Type::Builtin:
  168. case Type::Pointer:
  169. case Type::BlockPointer:
  170. case Type::LValueReference:
  171. case Type::RValueReference:
  172. case Type::MemberPointer:
  173. case Type::Vector:
  174. case Type::ExtVector:
  175. case Type::FunctionProto:
  176. case Type::FunctionNoProto:
  177. case Type::Enum:
  178. case Type::ObjCObjectPointer:
  179. case Type::Pipe:
  180. return TEK_Scalar;
  181. // Complexes.
  182. case Type::Complex:
  183. return TEK_Complex;
  184. // Arrays, records, and Objective-C objects.
  185. case Type::ConstantArray:
  186. case Type::IncompleteArray:
  187. case Type::VariableArray:
  188. case Type::Record:
  189. case Type::ObjCObject:
  190. case Type::ObjCInterface:
  191. return TEK_Aggregate;
  192. // We operate on atomic values according to their underlying type.
  193. case Type::Atomic:
  194. type = cast<AtomicType>(type)->getValueType();
  195. continue;
  196. }
  197. llvm_unreachable("unknown type kind!");
  198. }
  199. }
  200. llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
  201. // For cleanliness, we try to avoid emitting the return block for
  202. // simple cases.
  203. llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
  204. if (CurBB) {
  205. assert(!CurBB->getTerminator() && "Unexpected terminated block.");
  206. // We have a valid insert point, reuse it if it is empty or there are no
  207. // explicit jumps to the return block.
  208. if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
  209. ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
  210. delete ReturnBlock.getBlock();
  211. } else
  212. EmitBlock(ReturnBlock.getBlock());
  213. return llvm::DebugLoc();
  214. }
  215. // Otherwise, if the return block is the target of a single direct
  216. // branch then we can just put the code in that block instead. This
  217. // cleans up functions which started with a unified return block.
  218. if (ReturnBlock.getBlock()->hasOneUse()) {
  219. llvm::BranchInst *BI =
  220. dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
  221. if (BI && BI->isUnconditional() &&
  222. BI->getSuccessor(0) == ReturnBlock.getBlock()) {
  223. // Record/return the DebugLoc of the simple 'return' expression to be used
  224. // later by the actual 'ret' instruction.
  225. llvm::DebugLoc Loc = BI->getDebugLoc();
  226. Builder.SetInsertPoint(BI->getParent());
  227. BI->eraseFromParent();
  228. delete ReturnBlock.getBlock();
  229. return Loc;
  230. }
  231. }
  232. // FIXME: We are at an unreachable point, there is no reason to emit the block
  233. // unless it has uses. However, we still need a place to put the debug
  234. // region.end for now.
  235. EmitBlock(ReturnBlock.getBlock());
  236. return llvm::DebugLoc();
  237. }
  238. static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
  239. if (!BB) return;
  240. if (!BB->use_empty())
  241. return CGF.CurFn->getBasicBlockList().push_back(BB);
  242. delete BB;
  243. }
  244. void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
  245. assert(BreakContinueStack.empty() &&
  246. "mismatched push/pop in break/continue stack!");
  247. bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
  248. && NumSimpleReturnExprs == NumReturnExprs
  249. && ReturnBlock.getBlock()->use_empty();
  250. // Usually the return expression is evaluated before the cleanup
  251. // code. If the function contains only a simple return statement,
  252. // such as a constant, the location before the cleanup code becomes
  253. // the last useful breakpoint in the function, because the simple
  254. // return expression will be evaluated after the cleanup code. To be
  255. // safe, set the debug location for cleanup code to the location of
  256. // the return statement. Otherwise the cleanup code should be at the
  257. // end of the function's lexical scope.
  258. //
  259. // If there are multiple branches to the return block, the branch
  260. // instructions will get the location of the return statements and
  261. // all will be fine.
  262. if (CGDebugInfo *DI = getDebugInfo()) {
  263. if (OnlySimpleReturnStmts)
  264. DI->EmitLocation(Builder, LastStopPoint);
  265. else
  266. DI->EmitLocation(Builder, EndLoc);
  267. }
  268. // Pop any cleanups that might have been associated with the
  269. // parameters. Do this in whatever block we're currently in; it's
  270. // important to do this before we enter the return block or return
  271. // edges will be *really* confused.
  272. bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
  273. bool HasOnlyLifetimeMarkers =
  274. HasCleanups && EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth);
  275. bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers;
  276. if (HasCleanups) {
  277. // Make sure the line table doesn't jump back into the body for
  278. // the ret after it's been at EndLoc.
  279. if (CGDebugInfo *DI = getDebugInfo())
  280. if (OnlySimpleReturnStmts)
  281. DI->EmitLocation(Builder, EndLoc);
  282. PopCleanupBlocks(PrologueCleanupDepth);
  283. }
  284. // Emit function epilog (to return).
  285. llvm::DebugLoc Loc = EmitReturnBlock();
  286. if (ShouldInstrumentFunction())
  287. EmitFunctionInstrumentation("__cyg_profile_func_exit");
  288. // Emit debug descriptor for function end.
  289. if (CGDebugInfo *DI = getDebugInfo())
  290. DI->EmitFunctionEnd(Builder);
  291. // Reset the debug location to that of the simple 'return' expression, if any
  292. // rather than that of the end of the function's scope '}'.
  293. ApplyDebugLocation AL(*this, Loc);
  294. EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
  295. EmitEndEHSpec(CurCodeDecl);
  296. assert(EHStack.empty() &&
  297. "did not remove all scopes from cleanup stack!");
  298. // If someone did an indirect goto, emit the indirect goto block at the end of
  299. // the function.
  300. if (IndirectBranch) {
  301. EmitBlock(IndirectBranch->getParent());
  302. Builder.ClearInsertionPoint();
  303. }
  304. // If some of our locals escaped, insert a call to llvm.localescape in the
  305. // entry block.
  306. if (!EscapedLocals.empty()) {
  307. // Invert the map from local to index into a simple vector. There should be
  308. // no holes.
  309. SmallVector<llvm::Value *, 4> EscapeArgs;
  310. EscapeArgs.resize(EscapedLocals.size());
  311. for (auto &Pair : EscapedLocals)
  312. EscapeArgs[Pair.second] = Pair.first;
  313. llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
  314. &CGM.getModule(), llvm::Intrinsic::localescape);
  315. CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
  316. }
  317. // Remove the AllocaInsertPt instruction, which is just a convenience for us.
  318. llvm::Instruction *Ptr = AllocaInsertPt;
  319. AllocaInsertPt = nullptr;
  320. Ptr->eraseFromParent();
  321. // If someone took the address of a label but never did an indirect goto, we
  322. // made a zero entry PHI node, which is illegal, zap it now.
  323. if (IndirectBranch) {
  324. llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
  325. if (PN->getNumIncomingValues() == 0) {
  326. PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
  327. PN->eraseFromParent();
  328. }
  329. }
  330. EmitIfUsed(*this, EHResumeBlock);
  331. EmitIfUsed(*this, TerminateLandingPad);
  332. EmitIfUsed(*this, TerminateHandler);
  333. EmitIfUsed(*this, UnreachableBlock);
  334. if (CGM.getCodeGenOpts().EmitDeclMetadata)
  335. EmitDeclMetadata();
  336. for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator
  337. I = DeferredReplacements.begin(),
  338. E = DeferredReplacements.end();
  339. I != E; ++I) {
  340. I->first->replaceAllUsesWith(I->second);
  341. I->first->eraseFromParent();
  342. }
  343. }
  344. /// ShouldInstrumentFunction - Return true if the current function should be
  345. /// instrumented with __cyg_profile_func_* calls
  346. bool CodeGenFunction::ShouldInstrumentFunction() {
  347. if (!CGM.getCodeGenOpts().InstrumentFunctions)
  348. return false;
  349. if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
  350. return false;
  351. return true;
  352. }
  353. /// EmitFunctionInstrumentation - Emit LLVM code to call the specified
  354. /// instrumentation function with the current function and the call site, if
  355. /// function instrumentation is enabled.
  356. void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
  357. // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site);
  358. llvm::PointerType *PointerTy = Int8PtrTy;
  359. llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy };
  360. llvm::FunctionType *FunctionTy =
  361. llvm::FunctionType::get(VoidTy, ProfileFuncArgs, false);
  362. llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn);
  363. llvm::CallInst *CallSite = Builder.CreateCall(
  364. CGM.getIntrinsic(llvm::Intrinsic::returnaddress),
  365. llvm::ConstantInt::get(Int32Ty, 0),
  366. "callsite");
  367. llvm::Value *args[] = {
  368. llvm::ConstantExpr::getBitCast(CurFn, PointerTy),
  369. CallSite
  370. };
  371. EmitNounwindRuntimeCall(F, args);
  372. }
  373. void CodeGenFunction::EmitMCountInstrumentation() {
  374. llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
  375. llvm::Constant *MCountFn =
  376. CGM.CreateRuntimeFunction(FTy, getTarget().getMCountName());
  377. EmitNounwindRuntimeCall(MCountFn);
  378. }
  379. // OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument
  380. // information in the program executable. The argument information stored
  381. // includes the argument name, its type, the address and access qualifiers used.
  382. static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn,
  383. CodeGenModule &CGM, llvm::LLVMContext &Context,
  384. SmallVector<llvm::Metadata *, 5> &kernelMDArgs,
  385. CGBuilderTy &Builder, ASTContext &ASTCtx) {
  386. // Create MDNodes that represent the kernel arg metadata.
  387. // Each MDNode is a list in the form of "key", N number of values which is
  388. // the same number of values as their are kernel arguments.
  389. const PrintingPolicy &Policy = ASTCtx.getPrintingPolicy();
  390. // MDNode for the kernel argument address space qualifiers.
  391. SmallVector<llvm::Metadata *, 8> addressQuals;
  392. addressQuals.push_back(llvm::MDString::get(Context, "kernel_arg_addr_space"));
  393. // MDNode for the kernel argument access qualifiers (images only).
  394. SmallVector<llvm::Metadata *, 8> accessQuals;
  395. accessQuals.push_back(llvm::MDString::get(Context, "kernel_arg_access_qual"));
  396. // MDNode for the kernel argument type names.
  397. SmallVector<llvm::Metadata *, 8> argTypeNames;
  398. argTypeNames.push_back(llvm::MDString::get(Context, "kernel_arg_type"));
  399. // MDNode for the kernel argument base type names.
  400. SmallVector<llvm::Metadata *, 8> argBaseTypeNames;
  401. argBaseTypeNames.push_back(
  402. llvm::MDString::get(Context, "kernel_arg_base_type"));
  403. // MDNode for the kernel argument type qualifiers.
  404. SmallVector<llvm::Metadata *, 8> argTypeQuals;
  405. argTypeQuals.push_back(llvm::MDString::get(Context, "kernel_arg_type_qual"));
  406. // MDNode for the kernel argument names.
  407. SmallVector<llvm::Metadata *, 8> argNames;
  408. argNames.push_back(llvm::MDString::get(Context, "kernel_arg_name"));
  409. for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
  410. const ParmVarDecl *parm = FD->getParamDecl(i);
  411. QualType ty = parm->getType();
  412. std::string typeQuals;
  413. if (ty->isPointerType()) {
  414. QualType pointeeTy = ty->getPointeeType();
  415. // Get address qualifier.
  416. addressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(
  417. ASTCtx.getTargetAddressSpace(pointeeTy.getAddressSpace()))));
  418. // Get argument type name.
  419. std::string typeName =
  420. pointeeTy.getUnqualifiedType().getAsString(Policy) + "*";
  421. // Turn "unsigned type" to "utype"
  422. std::string::size_type pos = typeName.find("unsigned");
  423. if (pointeeTy.isCanonical() && pos != std::string::npos)
  424. typeName.erase(pos+1, 8);
  425. argTypeNames.push_back(llvm::MDString::get(Context, typeName));
  426. std::string baseTypeName =
  427. pointeeTy.getUnqualifiedType().getCanonicalType().getAsString(
  428. Policy) +
  429. "*";
  430. // Turn "unsigned type" to "utype"
  431. pos = baseTypeName.find("unsigned");
  432. if (pos != std::string::npos)
  433. baseTypeName.erase(pos+1, 8);
  434. argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName));
  435. // Get argument type qualifiers:
  436. if (ty.isRestrictQualified())
  437. typeQuals = "restrict";
  438. if (pointeeTy.isConstQualified() ||
  439. (pointeeTy.getAddressSpace() == LangAS::opencl_constant))
  440. typeQuals += typeQuals.empty() ? "const" : " const";
  441. if (pointeeTy.isVolatileQualified())
  442. typeQuals += typeQuals.empty() ? "volatile" : " volatile";
  443. } else {
  444. uint32_t AddrSpc = 0;
  445. bool isPipe = ty->isPipeType();
  446. if (ty->isImageType() || isPipe)
  447. AddrSpc =
  448. CGM.getContext().getTargetAddressSpace(LangAS::opencl_global);
  449. addressQuals.push_back(
  450. llvm::ConstantAsMetadata::get(Builder.getInt32(AddrSpc)));
  451. // Get argument type name.
  452. std::string typeName;
  453. if (isPipe)
  454. typeName = cast<PipeType>(ty)->getElementType().getAsString(Policy);
  455. else
  456. typeName = ty.getUnqualifiedType().getAsString(Policy);
  457. // Turn "unsigned type" to "utype"
  458. std::string::size_type pos = typeName.find("unsigned");
  459. if (ty.isCanonical() && pos != std::string::npos)
  460. typeName.erase(pos+1, 8);
  461. argTypeNames.push_back(llvm::MDString::get(Context, typeName));
  462. std::string baseTypeName;
  463. if (isPipe)
  464. baseTypeName =
  465. cast<PipeType>(ty)->getElementType().getCanonicalType().getAsString(Policy);
  466. else
  467. baseTypeName =
  468. ty.getUnqualifiedType().getCanonicalType().getAsString(Policy);
  469. // Turn "unsigned type" to "utype"
  470. pos = baseTypeName.find("unsigned");
  471. if (pos != std::string::npos)
  472. baseTypeName.erase(pos+1, 8);
  473. argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName));
  474. // Get argument type qualifiers:
  475. if (ty.isConstQualified())
  476. typeQuals = "const";
  477. if (ty.isVolatileQualified())
  478. typeQuals += typeQuals.empty() ? "volatile" : " volatile";
  479. if (isPipe)
  480. typeQuals = "pipe";
  481. }
  482. argTypeQuals.push_back(llvm::MDString::get(Context, typeQuals));
  483. // Get image and pipe access qualifier:
  484. // FIXME: now image and pipe share the same access qualifier maybe we can
  485. // refine it to OpenCL access qualifier and also handle write_read
  486. if (ty->isImageType()|| ty->isPipeType()) {
  487. const OpenCLImageAccessAttr *A = parm->getAttr<OpenCLImageAccessAttr>();
  488. if (A && A->isWriteOnly())
  489. accessQuals.push_back(llvm::MDString::get(Context, "write_only"));
  490. else
  491. accessQuals.push_back(llvm::MDString::get(Context, "read_only"));
  492. // FIXME: what about read_write?
  493. } else
  494. accessQuals.push_back(llvm::MDString::get(Context, "none"));
  495. // Get argument name.
  496. argNames.push_back(llvm::MDString::get(Context, parm->getName()));
  497. }
  498. kernelMDArgs.push_back(llvm::MDNode::get(Context, addressQuals));
  499. kernelMDArgs.push_back(llvm::MDNode::get(Context, accessQuals));
  500. kernelMDArgs.push_back(llvm::MDNode::get(Context, argTypeNames));
  501. kernelMDArgs.push_back(llvm::MDNode::get(Context, argBaseTypeNames));
  502. kernelMDArgs.push_back(llvm::MDNode::get(Context, argTypeQuals));
  503. if (CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
  504. kernelMDArgs.push_back(llvm::MDNode::get(Context, argNames));
  505. }
  506. void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
  507. llvm::Function *Fn)
  508. {
  509. if (!FD->hasAttr<OpenCLKernelAttr>())
  510. return;
  511. llvm::LLVMContext &Context = getLLVMContext();
  512. SmallVector<llvm::Metadata *, 5> kernelMDArgs;
  513. kernelMDArgs.push_back(llvm::ConstantAsMetadata::get(Fn));
  514. GenOpenCLArgMetadata(FD, Fn, CGM, Context, kernelMDArgs, Builder,
  515. getContext());
  516. if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
  517. QualType hintQTy = A->getTypeHint();
  518. const ExtVectorType *hintEltQTy = hintQTy->getAs<ExtVectorType>();
  519. bool isSignedInteger =
  520. hintQTy->isSignedIntegerType() ||
  521. (hintEltQTy && hintEltQTy->getElementType()->isSignedIntegerType());
  522. llvm::Metadata *attrMDArgs[] = {
  523. llvm::MDString::get(Context, "vec_type_hint"),
  524. llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
  525. CGM.getTypes().ConvertType(A->getTypeHint()))),
  526. llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
  527. llvm::IntegerType::get(Context, 32),
  528. llvm::APInt(32, (uint64_t)(isSignedInteger ? 1 : 0))))};
  529. kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
  530. }
  531. if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
  532. llvm::Metadata *attrMDArgs[] = {
  533. llvm::MDString::get(Context, "work_group_size_hint"),
  534. llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
  535. llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
  536. llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
  537. kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
  538. }
  539. if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
  540. llvm::Metadata *attrMDArgs[] = {
  541. llvm::MDString::get(Context, "reqd_work_group_size"),
  542. llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
  543. llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
  544. llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
  545. kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
  546. }
  547. llvm::MDNode *kernelMDNode = llvm::MDNode::get(Context, kernelMDArgs);
  548. llvm::NamedMDNode *OpenCLKernelMetadata =
  549. CGM.getModule().getOrInsertNamedMetadata("opencl.kernels");
  550. OpenCLKernelMetadata->addOperand(kernelMDNode);
  551. }
  552. /// Determine whether the function F ends with a return stmt.
  553. static bool endsWithReturn(const Decl* F) {
  554. const Stmt *Body = nullptr;
  555. if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
  556. Body = FD->getBody();
  557. else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
  558. Body = OMD->getBody();
  559. if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
  560. auto LastStmt = CS->body_rbegin();
  561. if (LastStmt != CS->body_rend())
  562. return isa<ReturnStmt>(*LastStmt);
  563. }
  564. return false;
  565. }
  566. void CodeGenFunction::StartFunction(GlobalDecl GD,
  567. QualType RetTy,
  568. llvm::Function *Fn,
  569. const CGFunctionInfo &FnInfo,
  570. const FunctionArgList &Args,
  571. SourceLocation Loc,
  572. SourceLocation StartLoc) {
  573. assert(!CurFn &&
  574. "Do not use a CodeGenFunction object for more than one function");
  575. const Decl *D = GD.getDecl();
  576. DidCallStackSave = false;
  577. CurCodeDecl = D;
  578. CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
  579. FnRetTy = RetTy;
  580. CurFn = Fn;
  581. CurFnInfo = &FnInfo;
  582. assert(CurFn->isDeclaration() && "Function already has body?");
  583. if (CGM.isInSanitizerBlacklist(Fn, Loc))
  584. SanOpts.clear();
  585. if (D) {
  586. // Apply the no_sanitize* attributes to SanOpts.
  587. for (auto Attr : D->specific_attrs<NoSanitizeAttr>())
  588. SanOpts.Mask &= ~Attr->getMask();
  589. }
  590. // Apply sanitizer attributes to the function.
  591. if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
  592. Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
  593. if (SanOpts.has(SanitizerKind::Thread))
  594. Fn->addFnAttr(llvm::Attribute::SanitizeThread);
  595. if (SanOpts.has(SanitizerKind::Memory))
  596. Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
  597. if (SanOpts.has(SanitizerKind::SafeStack))
  598. Fn->addFnAttr(llvm::Attribute::SafeStack);
  599. // Pass inline keyword to optimizer if it appears explicitly on any
  600. // declaration. Also, in the case of -fno-inline attach NoInline
  601. // attribute to all function that are not marked AlwaysInline.
  602. if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
  603. if (!CGM.getCodeGenOpts().NoInline) {
  604. for (auto RI : FD->redecls())
  605. if (RI->isInlineSpecified()) {
  606. Fn->addFnAttr(llvm::Attribute::InlineHint);
  607. break;
  608. }
  609. } else if (!FD->hasAttr<AlwaysInlineAttr>())
  610. Fn->addFnAttr(llvm::Attribute::NoInline);
  611. }
  612. if (getLangOpts().OpenCL) {
  613. // Add metadata for a kernel function.
  614. if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
  615. EmitOpenCLKernelMetadata(FD, Fn);
  616. }
  617. // If we are checking function types, emit a function type signature as
  618. // prologue data.
  619. if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) {
  620. if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
  621. if (llvm::Constant *PrologueSig =
  622. CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) {
  623. llvm::Constant *FTRTTIConst =
  624. CGM.GetAddrOfRTTIDescriptor(FD->getType(), /*ForEH=*/true);
  625. llvm::Constant *PrologueStructElems[] = { PrologueSig, FTRTTIConst };
  626. llvm::Constant *PrologueStructConst =
  627. llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true);
  628. Fn->setPrologueData(PrologueStructConst);
  629. }
  630. }
  631. }
  632. // If we're in C++ mode and the function name is "main", it is guaranteed
  633. // to be norecurse by the standard (3.6.1.3 "The function main shall not be
  634. // used within a program").
  635. if (getLangOpts().CPlusPlus)
  636. if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
  637. if (FD->isMain())
  638. Fn->addFnAttr(llvm::Attribute::NoRecurse);
  639. llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
  640. // Create a marker to make it easy to insert allocas into the entryblock
  641. // later. Don't create this with the builder, because we don't want it
  642. // folded.
  643. llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
  644. AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB);
  645. if (Builder.isNamePreserving())
  646. AllocaInsertPt->setName("allocapt");
  647. ReturnBlock = getJumpDestInCurrentScope("return");
  648. Builder.SetInsertPoint(EntryBB);
  649. // Emit subprogram debug descriptor.
  650. if (CGDebugInfo *DI = getDebugInfo()) {
  651. SmallVector<QualType, 16> ArgTypes;
  652. for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
  653. i != e; ++i) {
  654. ArgTypes.push_back((*i)->getType());
  655. }
  656. QualType FnType =
  657. getContext().getFunctionType(RetTy, ArgTypes,
  658. FunctionProtoType::ExtProtoInfo());
  659. DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, Builder);
  660. }
  661. if (ShouldInstrumentFunction())
  662. EmitFunctionInstrumentation("__cyg_profile_func_enter");
  663. if (CGM.getCodeGenOpts().InstrumentForProfiling)
  664. EmitMCountInstrumentation();
  665. if (RetTy->isVoidType()) {
  666. // Void type; nothing to return.
  667. ReturnValue = Address::invalid();
  668. // Count the implicit return.
  669. if (!endsWithReturn(D))
  670. ++NumReturnExprs;
  671. } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
  672. !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
  673. // Indirect aggregate return; emit returned value directly into sret slot.
  674. // This reduces code size, and affects correctness in C++.
  675. auto AI = CurFn->arg_begin();
  676. if (CurFnInfo->getReturnInfo().isSRetAfterThis())
  677. ++AI;
  678. ReturnValue = Address(&*AI, CurFnInfo->getReturnInfo().getIndirectAlign());
  679. } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
  680. !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
  681. // Load the sret pointer from the argument struct and return into that.
  682. unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
  683. llvm::Function::arg_iterator EI = CurFn->arg_end();
  684. --EI;
  685. llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx);
  686. Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result");
  687. ReturnValue = Address(Addr, getNaturalTypeAlignment(RetTy));
  688. } else {
  689. ReturnValue = CreateIRTemp(RetTy, "retval");
  690. // Tell the epilog emitter to autorelease the result. We do this
  691. // now so that various specialized functions can suppress it
  692. // during their IR-generation.
  693. if (getLangOpts().ObjCAutoRefCount &&
  694. !CurFnInfo->isReturnsRetained() &&
  695. RetTy->isObjCRetainableType())
  696. AutoreleaseResult = true;
  697. }
  698. EmitStartEHSpec(CurCodeDecl);
  699. PrologueCleanupDepth = EHStack.stable_begin();
  700. EmitFunctionProlog(*CurFnInfo, CurFn, Args);
  701. if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
  702. CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
  703. const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
  704. if (MD->getParent()->isLambda() &&
  705. MD->getOverloadedOperator() == OO_Call) {
  706. // We're in a lambda; figure out the captures.
  707. MD->getParent()->getCaptureFields(LambdaCaptureFields,
  708. LambdaThisCaptureField);
  709. if (LambdaThisCaptureField) {
  710. // If this lambda captures this, load it.
  711. LValue ThisLValue = EmitLValueForLambdaField(LambdaThisCaptureField);
  712. CXXThisValue = EmitLoadOfLValue(ThisLValue,
  713. SourceLocation()).getScalarVal();
  714. }
  715. for (auto *FD : MD->getParent()->fields()) {
  716. if (FD->hasCapturedVLAType()) {
  717. auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
  718. SourceLocation()).getScalarVal();
  719. auto VAT = FD->getCapturedVLAType();
  720. VLASizeMap[VAT->getSizeExpr()] = ExprArg;
  721. }
  722. }
  723. } else {
  724. // Not in a lambda; just use 'this' from the method.
  725. // FIXME: Should we generate a new load for each use of 'this'? The
  726. // fast register allocator would be happier...
  727. CXXThisValue = CXXABIThisValue;
  728. }
  729. }
  730. // If any of the arguments have a variably modified type, make sure to
  731. // emit the type size.
  732. for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
  733. i != e; ++i) {
  734. const VarDecl *VD = *i;
  735. // Dig out the type as written from ParmVarDecls; it's unclear whether
  736. // the standard (C99 6.9.1p10) requires this, but we're following the
  737. // precedent set by gcc.
  738. QualType Ty;
  739. if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
  740. Ty = PVD->getOriginalType();
  741. else
  742. Ty = VD->getType();
  743. if (Ty->isVariablyModifiedType())
  744. EmitVariablyModifiedType(Ty);
  745. }
  746. // Emit a location at the end of the prologue.
  747. if (CGDebugInfo *DI = getDebugInfo())
  748. DI->EmitLocation(Builder, StartLoc);
  749. }
  750. void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args,
  751. const Stmt *Body) {
  752. incrementProfileCounter(Body);
  753. if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
  754. EmitCompoundStmtWithoutScope(*S);
  755. else
  756. EmitStmt(Body);
  757. }
  758. /// When instrumenting to collect profile data, the counts for some blocks
  759. /// such as switch cases need to not include the fall-through counts, so
  760. /// emit a branch around the instrumentation code. When not instrumenting,
  761. /// this just calls EmitBlock().
  762. void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
  763. const Stmt *S) {
  764. llvm::BasicBlock *SkipCountBB = nullptr;
  765. if (HaveInsertPoint() && CGM.getCodeGenOpts().ProfileInstrGenerate) {
  766. // When instrumenting for profiling, the fallthrough to certain
  767. // statements needs to skip over the instrumentation code so that we
  768. // get an accurate count.
  769. SkipCountBB = createBasicBlock("skipcount");
  770. EmitBranch(SkipCountBB);
  771. }
  772. EmitBlock(BB);
  773. uint64_t CurrentCount = getCurrentProfileCount();
  774. incrementProfileCounter(S);
  775. setCurrentProfileCount(getCurrentProfileCount() + CurrentCount);
  776. if (SkipCountBB)
  777. EmitBlock(SkipCountBB);
  778. }
  779. /// Tries to mark the given function nounwind based on the
  780. /// non-existence of any throwing calls within it. We believe this is
  781. /// lightweight enough to do at -O0.
  782. static void TryMarkNoThrow(llvm::Function *F) {
  783. // LLVM treats 'nounwind' on a function as part of the type, so we
  784. // can't do this on functions that can be overwritten.
  785. if (F->mayBeOverridden()) return;
  786. for (llvm::BasicBlock &BB : *F)
  787. for (llvm::Instruction &I : BB)
  788. if (I.mayThrow())
  789. return;
  790. F->setDoesNotThrow();
  791. }
  792. void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
  793. const CGFunctionInfo &FnInfo) {
  794. const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
  795. // Check if we should generate debug info for this function.
  796. if (FD->hasAttr<NoDebugAttr>())
  797. DebugInfo = nullptr; // disable debug info indefinitely for this function
  798. FunctionArgList Args;
  799. QualType ResTy = FD->getReturnType();
  800. CurGD = GD;
  801. const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
  802. if (MD && MD->isInstance()) {
  803. if (CGM.getCXXABI().HasThisReturn(GD))
  804. ResTy = MD->getThisType(getContext());
  805. else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
  806. ResTy = CGM.getContext().VoidPtrTy;
  807. CGM.getCXXABI().buildThisParam(*this, Args);
  808. }
  809. for (auto *Param : FD->params()) {
  810. Args.push_back(Param);
  811. if (!Param->hasAttr<PassObjectSizeAttr>())
  812. continue;
  813. IdentifierInfo *NoID = nullptr;
  814. auto *Implicit = ImplicitParamDecl::Create(
  815. getContext(), Param->getDeclContext(), Param->getLocation(), NoID,
  816. getContext().getSizeType());
  817. SizeArguments[Param] = Implicit;
  818. Args.push_back(Implicit);
  819. }
  820. if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
  821. CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
  822. SourceRange BodyRange;
  823. if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange();
  824. CurEHLocation = BodyRange.getEnd();
  825. // Use the location of the start of the function to determine where
  826. // the function definition is located. By default use the location
  827. // of the declaration as the location for the subprogram. A function
  828. // may lack a declaration in the source code if it is created by code
  829. // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
  830. SourceLocation Loc = FD->getLocation();
  831. // If this is a function specialization then use the pattern body
  832. // as the location for the function.
  833. if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
  834. if (SpecDecl->hasBody(SpecDecl))
  835. Loc = SpecDecl->getLocation();
  836. // Emit the standard function prologue.
  837. StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
  838. // Generate the body of the function.
  839. PGO.assignRegionCounters(GD, CurFn);
  840. if (isa<CXXDestructorDecl>(FD))
  841. EmitDestructorBody(Args);
  842. else if (isa<CXXConstructorDecl>(FD))
  843. EmitConstructorBody(Args);
  844. else if (getLangOpts().CUDA &&
  845. !getLangOpts().CUDAIsDevice &&
  846. FD->hasAttr<CUDAGlobalAttr>())
  847. CGM.getCUDARuntime().emitDeviceStub(*this, Args);
  848. else if (isa<CXXConversionDecl>(FD) &&
  849. cast<CXXConversionDecl>(FD)->isLambdaToBlockPointerConversion()) {
  850. // The lambda conversion to block pointer is special; the semantics can't be
  851. // expressed in the AST, so IRGen needs to special-case it.
  852. EmitLambdaToBlockPointerBody(Args);
  853. } else if (isa<CXXMethodDecl>(FD) &&
  854. cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
  855. // The lambda static invoker function is special, because it forwards or
  856. // clones the body of the function call operator (but is actually static).
  857. EmitLambdaStaticInvokeFunction(cast<CXXMethodDecl>(FD));
  858. } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
  859. (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
  860. cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
  861. // Implicit copy-assignment gets the same special treatment as implicit
  862. // copy-constructors.
  863. emitImplicitAssignmentOperatorBody(Args);
  864. } else if (Stmt *Body = FD->getBody()) {
  865. EmitFunctionBody(Args, Body);
  866. } else
  867. llvm_unreachable("no definition for emitted function");
  868. // C++11 [stmt.return]p2:
  869. // Flowing off the end of a function [...] results in undefined behavior in
  870. // a value-returning function.
  871. // C11 6.9.1p12:
  872. // If the '}' that terminates a function is reached, and the value of the
  873. // function call is used by the caller, the behavior is undefined.
  874. if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock &&
  875. !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
  876. if (SanOpts.has(SanitizerKind::Return)) {
  877. SanitizerScope SanScope(this);
  878. llvm::Value *IsFalse = Builder.getFalse();
  879. EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
  880. "missing_return", EmitCheckSourceLocation(FD->getLocation()),
  881. None);
  882. } else if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
  883. EmitTrapCall(llvm::Intrinsic::trap);
  884. }
  885. Builder.CreateUnreachable();
  886. Builder.ClearInsertionPoint();
  887. }
  888. // Emit the standard function epilogue.
  889. FinishFunction(BodyRange.getEnd());
  890. // If we haven't marked the function nothrow through other means, do
  891. // a quick pass now to see if we can.
  892. if (!CurFn->doesNotThrow())
  893. TryMarkNoThrow(CurFn);
  894. }
  895. /// ContainsLabel - Return true if the statement contains a label in it. If
  896. /// this statement is not executed normally, it not containing a label means
  897. /// that we can just remove the code.
  898. bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
  899. // Null statement, not a label!
  900. if (!S) return false;
  901. // If this is a label, we have to emit the code, consider something like:
  902. // if (0) { ... foo: bar(); } goto foo;
  903. //
  904. // TODO: If anyone cared, we could track __label__'s, since we know that you
  905. // can't jump to one from outside their declared region.
  906. if (isa<LabelStmt>(S))
  907. return true;
  908. // If this is a case/default statement, and we haven't seen a switch, we have
  909. // to emit the code.
  910. if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
  911. return true;
  912. // If this is a switch statement, we want to ignore cases below it.
  913. if (isa<SwitchStmt>(S))
  914. IgnoreCaseStmts = true;
  915. // Scan subexpressions for verboten labels.
  916. for (const Stmt *SubStmt : S->children())
  917. if (ContainsLabel(SubStmt, IgnoreCaseStmts))
  918. return true;
  919. return false;
  920. }
  921. /// containsBreak - Return true if the statement contains a break out of it.
  922. /// If the statement (recursively) contains a switch or loop with a break
  923. /// inside of it, this is fine.
  924. bool CodeGenFunction::containsBreak(const Stmt *S) {
  925. // Null statement, not a label!
  926. if (!S) return false;
  927. // If this is a switch or loop that defines its own break scope, then we can
  928. // include it and anything inside of it.
  929. if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
  930. isa<ForStmt>(S))
  931. return false;
  932. if (isa<BreakStmt>(S))
  933. return true;
  934. // Scan subexpressions for verboten breaks.
  935. for (const Stmt *SubStmt : S->children())
  936. if (containsBreak(SubStmt))
  937. return true;
  938. return false;
  939. }
  940. /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
  941. /// to a constant, or if it does but contains a label, return false. If it
  942. /// constant folds return true and set the boolean result in Result.
  943. bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
  944. bool &ResultBool) {
  945. llvm::APSInt ResultInt;
  946. if (!ConstantFoldsToSimpleInteger(Cond, ResultInt))
  947. return false;
  948. ResultBool = ResultInt.getBoolValue();
  949. return true;
  950. }
  951. /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
  952. /// to a constant, or if it does but contains a label, return false. If it
  953. /// constant folds return true and set the folded value.
  954. bool CodeGenFunction::
  955. ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &ResultInt) {
  956. // FIXME: Rename and handle conversion of other evaluatable things
  957. // to bool.
  958. llvm::APSInt Int;
  959. if (!Cond->EvaluateAsInt(Int, getContext()))
  960. return false; // Not foldable, not integer or not fully evaluatable.
  961. if (CodeGenFunction::ContainsLabel(Cond))
  962. return false; // Contains a label.
  963. ResultInt = Int;
  964. return true;
  965. }
  966. /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
  967. /// statement) to the specified blocks. Based on the condition, this might try
  968. /// to simplify the codegen of the conditional based on the branch.
  969. ///
  970. void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
  971. llvm::BasicBlock *TrueBlock,
  972. llvm::BasicBlock *FalseBlock,
  973. uint64_t TrueCount) {
  974. Cond = Cond->IgnoreParens();
  975. if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
  976. // Handle X && Y in a condition.
  977. if (CondBOp->getOpcode() == BO_LAnd) {
  978. // If we have "1 && X", simplify the code. "0 && X" would have constant
  979. // folded if the case was simple enough.
  980. bool ConstantBool = false;
  981. if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
  982. ConstantBool) {
  983. // br(1 && X) -> br(X).
  984. incrementProfileCounter(CondBOp);
  985. return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
  986. TrueCount);
  987. }
  988. // If we have "X && 1", simplify the code to use an uncond branch.
  989. // "X && 0" would have been constant folded to 0.
  990. if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
  991. ConstantBool) {
  992. // br(X && 1) -> br(X).
  993. return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
  994. TrueCount);
  995. }
  996. // Emit the LHS as a conditional. If the LHS conditional is false, we
  997. // want to jump to the FalseBlock.
  998. llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
  999. // The counter tells us how often we evaluate RHS, and all of TrueCount
  1000. // can be propagated to that branch.
  1001. uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
  1002. ConditionalEvaluation eval(*this);
  1003. {
  1004. ApplyDebugLocation DL(*this, Cond);
  1005. EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount);
  1006. EmitBlock(LHSTrue);
  1007. }
  1008. incrementProfileCounter(CondBOp);
  1009. setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
  1010. // Any temporaries created here are conditional.
  1011. eval.begin(*this);
  1012. EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount);
  1013. eval.end(*this);
  1014. return;
  1015. }
  1016. if (CondBOp->getOpcode() == BO_LOr) {
  1017. // If we have "0 || X", simplify the code. "1 || X" would have constant
  1018. // folded if the case was simple enough.
  1019. bool ConstantBool = false;
  1020. if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
  1021. !ConstantBool) {
  1022. // br(0 || X) -> br(X).
  1023. incrementProfileCounter(CondBOp);
  1024. return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
  1025. TrueCount);
  1026. }
  1027. // If we have "X || 0", simplify the code to use an uncond branch.
  1028. // "X || 1" would have been constant folded to 1.
  1029. if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
  1030. !ConstantBool) {
  1031. // br(X || 0) -> br(X).
  1032. return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
  1033. TrueCount);
  1034. }
  1035. // Emit the LHS as a conditional. If the LHS conditional is true, we
  1036. // want to jump to the TrueBlock.
  1037. llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
  1038. // We have the count for entry to the RHS and for the whole expression
  1039. // being true, so we can divy up True count between the short circuit and
  1040. // the RHS.
  1041. uint64_t LHSCount =
  1042. getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
  1043. uint64_t RHSCount = TrueCount - LHSCount;
  1044. ConditionalEvaluation eval(*this);
  1045. {
  1046. ApplyDebugLocation DL(*this, Cond);
  1047. EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount);
  1048. EmitBlock(LHSFalse);
  1049. }
  1050. incrementProfileCounter(CondBOp);
  1051. setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
  1052. // Any temporaries created here are conditional.
  1053. eval.begin(*this);
  1054. EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount);
  1055. eval.end(*this);
  1056. return;
  1057. }
  1058. }
  1059. if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
  1060. // br(!x, t, f) -> br(x, f, t)
  1061. if (CondUOp->getOpcode() == UO_LNot) {
  1062. // Negate the count.
  1063. uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
  1064. // Negate the condition and swap the destination blocks.
  1065. return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
  1066. FalseCount);
  1067. }
  1068. }
  1069. if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
  1070. // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
  1071. llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
  1072. llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
  1073. ConditionalEvaluation cond(*this);
  1074. EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
  1075. getProfileCount(CondOp));
  1076. // When computing PGO branch weights, we only know the overall count for
  1077. // the true block. This code is essentially doing tail duplication of the
  1078. // naive code-gen, introducing new edges for which counts are not
  1079. // available. Divide the counts proportionally between the LHS and RHS of
  1080. // the conditional operator.
  1081. uint64_t LHSScaledTrueCount = 0;
  1082. if (TrueCount) {
  1083. double LHSRatio =
  1084. getProfileCount(CondOp) / (double)getCurrentProfileCount();
  1085. LHSScaledTrueCount = TrueCount * LHSRatio;
  1086. }
  1087. cond.begin(*this);
  1088. EmitBlock(LHSBlock);
  1089. incrementProfileCounter(CondOp);
  1090. {
  1091. ApplyDebugLocation DL(*this, Cond);
  1092. EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
  1093. LHSScaledTrueCount);
  1094. }
  1095. cond.end(*this);
  1096. cond.begin(*this);
  1097. EmitBlock(RHSBlock);
  1098. EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
  1099. TrueCount - LHSScaledTrueCount);
  1100. cond.end(*this);
  1101. return;
  1102. }
  1103. if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
  1104. // Conditional operator handling can give us a throw expression as a
  1105. // condition for a case like:
  1106. // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
  1107. // Fold this to:
  1108. // br(c, throw x, br(y, t, f))
  1109. EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
  1110. return;
  1111. }
  1112. // If the branch has a condition wrapped by __builtin_unpredictable,
  1113. // create metadata that specifies that the branch is unpredictable.
  1114. // Don't bother if not optimizing because that metadata would not be used.
  1115. llvm::MDNode *Unpredictable = nullptr;
  1116. if (CGM.getCodeGenOpts().OptimizationLevel != 0) {
  1117. if (const CallExpr *Call = dyn_cast<CallExpr>(Cond)) {
  1118. const Decl *TargetDecl = Call->getCalleeDecl();
  1119. if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
  1120. if (FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
  1121. llvm::MDBuilder MDHelper(getLLVMContext());
  1122. Unpredictable = MDHelper.createUnpredictable();
  1123. }
  1124. }
  1125. }
  1126. }
  1127. // Create branch weights based on the number of times we get here and the
  1128. // number of times the condition should be true.
  1129. uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
  1130. llvm::MDNode *Weights =
  1131. createProfileWeights(TrueCount, CurrentCount - TrueCount);
  1132. // Emit the code with the fully general case.
  1133. llvm::Value *CondV;
  1134. {
  1135. ApplyDebugLocation DL(*this, Cond);
  1136. CondV = EvaluateExprAsBool(Cond);
  1137. }
  1138. Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable);
  1139. }
  1140. /// ErrorUnsupported - Print out an error that codegen doesn't support the
  1141. /// specified stmt yet.
  1142. void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
  1143. CGM.ErrorUnsupported(S, Type);
  1144. }
  1145. /// emitNonZeroVLAInit - Emit the "zero" initialization of a
  1146. /// variable-length array whose elements have a non-zero bit-pattern.
  1147. ///
  1148. /// \param baseType the inner-most element type of the array
  1149. /// \param src - a char* pointing to the bit-pattern for a single
  1150. /// base element of the array
  1151. /// \param sizeInChars - the total size of the VLA, in chars
  1152. static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
  1153. Address dest, Address src,
  1154. llvm::Value *sizeInChars) {
  1155. CGBuilderTy &Builder = CGF.Builder;
  1156. CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
  1157. llvm::Value *baseSizeInChars
  1158. = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
  1159. Address begin =
  1160. Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin");
  1161. llvm::Value *end =
  1162. Builder.CreateInBoundsGEP(begin.getPointer(), sizeInChars, "vla.end");
  1163. llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
  1164. llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
  1165. llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
  1166. // Make a loop over the VLA. C99 guarantees that the VLA element
  1167. // count must be nonzero.
  1168. CGF.EmitBlock(loopBB);
  1169. llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
  1170. cur->addIncoming(begin.getPointer(), originBB);
  1171. CharUnits curAlign =
  1172. dest.getAlignment().alignmentOfArrayElement(baseSize);
  1173. // memcpy the individual element bit-pattern.
  1174. Builder.CreateMemCpy(Address(cur, curAlign), src, baseSizeInChars,
  1175. /*volatile*/ false);
  1176. // Go to the next element.
  1177. llvm::Value *next =
  1178. Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
  1179. // Leave if that's the end of the VLA.
  1180. llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
  1181. Builder.CreateCondBr(done, contBB, loopBB);
  1182. cur->addIncoming(next, loopBB);
  1183. CGF.EmitBlock(contBB);
  1184. }
  1185. void
  1186. CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
  1187. // Ignore empty classes in C++.
  1188. if (getLangOpts().CPlusPlus) {
  1189. if (const RecordType *RT = Ty->getAs<RecordType>()) {
  1190. if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
  1191. return;
  1192. }
  1193. }
  1194. // Cast the dest ptr to the appropriate i8 pointer type.
  1195. if (DestPtr.getElementType() != Int8Ty)
  1196. DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
  1197. // Get size and alignment info for this aggregate.
  1198. CharUnits size = getContext().getTypeSizeInChars(Ty);
  1199. llvm::Value *SizeVal;
  1200. const VariableArrayType *vla;
  1201. // Don't bother emitting a zero-byte memset.
  1202. if (size.isZero()) {
  1203. // But note that getTypeInfo returns 0 for a VLA.
  1204. if (const VariableArrayType *vlaType =
  1205. dyn_cast_or_null<VariableArrayType>(
  1206. getContext().getAsArrayType(Ty))) {
  1207. QualType eltType;
  1208. llvm::Value *numElts;
  1209. std::tie(numElts, eltType) = getVLASize(vlaType);
  1210. SizeVal = numElts;
  1211. CharUnits eltSize = getContext().getTypeSizeInChars(eltType);
  1212. if (!eltSize.isOne())
  1213. SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
  1214. vla = vlaType;
  1215. } else {
  1216. return;
  1217. }
  1218. } else {
  1219. SizeVal = CGM.getSize(size);
  1220. vla = nullptr;
  1221. }
  1222. // If the type contains a pointer to data member we can't memset it to zero.
  1223. // Instead, create a null constant and copy it to the destination.
  1224. // TODO: there are other patterns besides zero that we can usefully memset,
  1225. // like -1, which happens to be the pattern used by member-pointers.
  1226. if (!CGM.getTypes().isZeroInitializable(Ty)) {
  1227. // For a VLA, emit a single element, then splat that over the VLA.
  1228. if (vla) Ty = getContext().getBaseElementType(vla);
  1229. llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
  1230. llvm::GlobalVariable *NullVariable =
  1231. new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
  1232. /*isConstant=*/true,
  1233. llvm::GlobalVariable::PrivateLinkage,
  1234. NullConstant, Twine());
  1235. CharUnits NullAlign = DestPtr.getAlignment();
  1236. NullVariable->setAlignment(NullAlign.getQuantity());
  1237. Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()),
  1238. NullAlign);
  1239. if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
  1240. // Get and call the appropriate llvm.memcpy overload.
  1241. Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
  1242. return;
  1243. }
  1244. // Otherwise, just memset the whole thing to zero. This is legal
  1245. // because in LLVM, all default initializers (other than the ones we just
  1246. // handled above) are guaranteed to have a bit pattern of all zeros.
  1247. Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
  1248. }
  1249. llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
  1250. // Make sure that there is a block for the indirect goto.
  1251. if (!IndirectBranch)
  1252. GetIndirectGotoBlock();
  1253. llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
  1254. // Make sure the indirect branch includes all of the address-taken blocks.
  1255. IndirectBranch->addDestination(BB);
  1256. return llvm::BlockAddress::get(CurFn, BB);
  1257. }
  1258. llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
  1259. // If we already made the indirect branch for indirect goto, return its block.
  1260. if (IndirectBranch) return IndirectBranch->getParent();
  1261. CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
  1262. // Create the PHI node that indirect gotos will add entries to.
  1263. llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
  1264. "indirect.goto.dest");
  1265. // Create the indirect branch instruction.
  1266. IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
  1267. return IndirectBranch->getParent();
  1268. }
  1269. /// Computes the length of an array in elements, as well as the base
  1270. /// element type and a properly-typed first element pointer.
  1271. llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
  1272. QualType &baseType,
  1273. Address &addr) {
  1274. const ArrayType *arrayType = origArrayType;
  1275. // If it's a VLA, we have to load the stored size. Note that
  1276. // this is the size of the VLA in bytes, not its size in elements.
  1277. llvm::Value *numVLAElements = nullptr;
  1278. if (isa<VariableArrayType>(arrayType)) {
  1279. numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).first;
  1280. // Walk into all VLAs. This doesn't require changes to addr,
  1281. // which has type T* where T is the first non-VLA element type.
  1282. do {
  1283. QualType elementType = arrayType->getElementType();
  1284. arrayType = getContext().getAsArrayType(elementType);
  1285. // If we only have VLA components, 'addr' requires no adjustment.
  1286. if (!arrayType) {
  1287. baseType = elementType;
  1288. return numVLAElements;
  1289. }
  1290. } while (isa<VariableArrayType>(arrayType));
  1291. // We get out here only if we find a constant array type
  1292. // inside the VLA.
  1293. }
  1294. // We have some number of constant-length arrays, so addr should
  1295. // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
  1296. // down to the first element of addr.
  1297. SmallVector<llvm::Value*, 8> gepIndices;
  1298. // GEP down to the array type.
  1299. llvm::ConstantInt *zero = Builder.getInt32(0);
  1300. gepIndices.push_back(zero);
  1301. uint64_t countFromCLAs = 1;
  1302. QualType eltType;
  1303. llvm::ArrayType *llvmArrayType =
  1304. dyn_cast<llvm::ArrayType>(addr.getElementType());
  1305. while (llvmArrayType) {
  1306. assert(isa<ConstantArrayType>(arrayType));
  1307. assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
  1308. == llvmArrayType->getNumElements());
  1309. gepIndices.push_back(zero);
  1310. countFromCLAs *= llvmArrayType->getNumElements();
  1311. eltType = arrayType->getElementType();
  1312. llvmArrayType =
  1313. dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
  1314. arrayType = getContext().getAsArrayType(arrayType->getElementType());
  1315. assert((!llvmArrayType || arrayType) &&
  1316. "LLVM and Clang types are out-of-synch");
  1317. }
  1318. if (arrayType) {
  1319. // From this point onwards, the Clang array type has been emitted
  1320. // as some other type (probably a packed struct). Compute the array
  1321. // size, and just emit the 'begin' expression as a bitcast.
  1322. while (arrayType) {
  1323. countFromCLAs *=
  1324. cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
  1325. eltType = arrayType->getElementType();
  1326. arrayType = getContext().getAsArrayType(eltType);
  1327. }
  1328. llvm::Type *baseType = ConvertType(eltType);
  1329. addr = Builder.CreateElementBitCast(addr, baseType, "array.begin");
  1330. } else {
  1331. // Create the actual GEP.
  1332. addr = Address(Builder.CreateInBoundsGEP(addr.getPointer(),
  1333. gepIndices, "array.begin"),
  1334. addr.getAlignment());
  1335. }
  1336. baseType = eltType;
  1337. llvm::Value *numElements
  1338. = llvm::ConstantInt::get(SizeTy, countFromCLAs);
  1339. // If we had any VLA dimensions, factor them in.
  1340. if (numVLAElements)
  1341. numElements = Builder.CreateNUWMul(numVLAElements, numElements);
  1342. return numElements;
  1343. }
  1344. std::pair<llvm::Value*, QualType>
  1345. CodeGenFunction::getVLASize(QualType type) {
  1346. const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
  1347. assert(vla && "type was not a variable array type!");
  1348. return getVLASize(vla);
  1349. }
  1350. std::pair<llvm::Value*, QualType>
  1351. CodeGenFunction::getVLASize(const VariableArrayType *type) {
  1352. // The number of elements so far; always size_t.
  1353. llvm::Value *numElements = nullptr;
  1354. QualType elementType;
  1355. do {
  1356. elementType = type->getElementType();
  1357. llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
  1358. assert(vlaSize && "no size for VLA!");
  1359. assert(vlaSize->getType() == SizeTy);
  1360. if (!numElements) {
  1361. numElements = vlaSize;
  1362. } else {
  1363. // It's undefined behavior if this wraps around, so mark it that way.
  1364. // FIXME: Teach -fsanitize=undefined to trap this.
  1365. numElements = Builder.CreateNUWMul(numElements, vlaSize);
  1366. }
  1367. } while ((type = getContext().getAsVariableArrayType(elementType)));
  1368. return std::pair<llvm::Value*,QualType>(numElements, elementType);
  1369. }
  1370. void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
  1371. assert(type->isVariablyModifiedType() &&
  1372. "Must pass variably modified type to EmitVLASizes!");
  1373. EnsureInsertPoint();
  1374. // We're going to walk down into the type and look for VLA
  1375. // expressions.
  1376. do {
  1377. assert(type->isVariablyModifiedType());
  1378. const Type *ty = type.getTypePtr();
  1379. switch (ty->getTypeClass()) {
  1380. #define TYPE(Class, Base)
  1381. #define ABSTRACT_TYPE(Class, Base)
  1382. #define NON_CANONICAL_TYPE(Class, Base)
  1383. #define DEPENDENT_TYPE(Class, Base) case Type::Class:
  1384. #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
  1385. #include "clang/AST/TypeNodes.def"
  1386. llvm_unreachable("unexpected dependent type!");
  1387. // These types are never variably-modified.
  1388. case Type::Builtin:
  1389. case Type::Complex:
  1390. case Type::Vector:
  1391. case Type::ExtVector:
  1392. case Type::Record:
  1393. case Type::Enum:
  1394. case Type::Elaborated:
  1395. case Type::TemplateSpecialization:
  1396. case Type::ObjCObject:
  1397. case Type::ObjCInterface:
  1398. case Type::ObjCObjectPointer:
  1399. llvm_unreachable("type class is never variably-modified!");
  1400. case Type::Adjusted:
  1401. type = cast<AdjustedType>(ty)->getAdjustedType();
  1402. break;
  1403. case Type::Decayed:
  1404. type = cast<DecayedType>(ty)->getPointeeType();
  1405. break;
  1406. case Type::Pointer:
  1407. type = cast<PointerType>(ty)->getPointeeType();
  1408. break;
  1409. case Type::BlockPointer:
  1410. type = cast<BlockPointerType>(ty)->getPointeeType();
  1411. break;
  1412. case Type::LValueReference:
  1413. case Type::RValueReference:
  1414. type = cast<ReferenceType>(ty)->getPointeeType();
  1415. break;
  1416. case Type::MemberPointer:
  1417. type = cast<MemberPointerType>(ty)->getPointeeType();
  1418. break;
  1419. case Type::ConstantArray:
  1420. case Type::IncompleteArray:
  1421. // Losing element qualification here is fine.
  1422. type = cast<ArrayType>(ty)->getElementType();
  1423. break;
  1424. case Type::VariableArray: {
  1425. // Losing element qualification here is fine.
  1426. const VariableArrayType *vat = cast<VariableArrayType>(ty);
  1427. // Unknown size indication requires no size computation.
  1428. // Otherwise, evaluate and record it.
  1429. if (const Expr *size = vat->getSizeExpr()) {
  1430. // It's possible that we might have emitted this already,
  1431. // e.g. with a typedef and a pointer to it.
  1432. llvm::Value *&entry = VLASizeMap[size];
  1433. if (!entry) {
  1434. llvm::Value *Size = EmitScalarExpr(size);
  1435. // C11 6.7.6.2p5:
  1436. // If the size is an expression that is not an integer constant
  1437. // expression [...] each time it is evaluated it shall have a value
  1438. // greater than zero.
  1439. if (SanOpts.has(SanitizerKind::VLABound) &&
  1440. size->getType()->isSignedIntegerType()) {
  1441. SanitizerScope SanScope(this);
  1442. llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
  1443. llvm::Constant *StaticArgs[] = {
  1444. EmitCheckSourceLocation(size->getLocStart()),
  1445. EmitCheckTypeDescriptor(size->getType())
  1446. };
  1447. EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero),
  1448. SanitizerKind::VLABound),
  1449. "vla_bound_not_positive", StaticArgs, Size);
  1450. }
  1451. // Always zexting here would be wrong if it weren't
  1452. // undefined behavior to have a negative bound.
  1453. entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false);
  1454. }
  1455. }
  1456. type = vat->getElementType();
  1457. break;
  1458. }
  1459. case Type::FunctionProto:
  1460. case Type::FunctionNoProto:
  1461. type = cast<FunctionType>(ty)->getReturnType();
  1462. break;
  1463. case Type::Paren:
  1464. case Type::TypeOf:
  1465. case Type::UnaryTransform:
  1466. case Type::Attributed:
  1467. case Type::SubstTemplateTypeParm:
  1468. case Type::PackExpansion:
  1469. // Keep walking after single level desugaring.
  1470. type = type.getSingleStepDesugaredType(getContext());
  1471. break;
  1472. case Type::Typedef:
  1473. case Type::Decltype:
  1474. case Type::Auto:
  1475. // Stop walking: nothing to do.
  1476. return;
  1477. case Type::TypeOfExpr:
  1478. // Stop walking: emit typeof expression.
  1479. EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
  1480. return;
  1481. case Type::Atomic:
  1482. type = cast<AtomicType>(ty)->getValueType();
  1483. break;
  1484. case Type::Pipe:
  1485. type = cast<PipeType>(ty)->getElementType();
  1486. break;
  1487. }
  1488. } while (type->isVariablyModifiedType());
  1489. }
  1490. Address CodeGenFunction::EmitVAListRef(const Expr* E) {
  1491. if (getContext().getBuiltinVaListType()->isArrayType())
  1492. return EmitPointerWithAlignment(E);
  1493. return EmitLValue(E).getAddress();
  1494. }
  1495. Address CodeGenFunction::EmitMSVAListRef(const Expr *E) {
  1496. return EmitLValue(E).getAddress();
  1497. }
  1498. void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
  1499. llvm::Constant *Init) {
  1500. assert (Init && "Invalid DeclRefExpr initializer!");
  1501. if (CGDebugInfo *Dbg = getDebugInfo())
  1502. if (CGM.getCodeGenOpts().getDebugInfo() >= codegenoptions::LimitedDebugInfo)
  1503. Dbg->EmitGlobalVariable(E->getDecl(), Init);
  1504. }
  1505. CodeGenFunction::PeepholeProtection
  1506. CodeGenFunction::protectFromPeepholes(RValue rvalue) {
  1507. // At the moment, the only aggressive peephole we do in IR gen
  1508. // is trunc(zext) folding, but if we add more, we can easily
  1509. // extend this protection.
  1510. if (!rvalue.isScalar()) return PeepholeProtection();
  1511. llvm::Value *value = rvalue.getScalarVal();
  1512. if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
  1513. // Just make an extra bitcast.
  1514. assert(HaveInsertPoint());
  1515. llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
  1516. Builder.GetInsertBlock());
  1517. PeepholeProtection protection;
  1518. protection.Inst = inst;
  1519. return protection;
  1520. }
  1521. void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
  1522. if (!protection.Inst) return;
  1523. // In theory, we could try to duplicate the peepholes now, but whatever.
  1524. protection.Inst->eraseFromParent();
  1525. }
  1526. llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Value *AnnotationFn,
  1527. llvm::Value *AnnotatedVal,
  1528. StringRef AnnotationStr,
  1529. SourceLocation Location) {
  1530. llvm::Value *Args[4] = {
  1531. AnnotatedVal,
  1532. Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy),
  1533. Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy),
  1534. CGM.EmitAnnotationLineNo(Location)
  1535. };
  1536. return Builder.CreateCall(AnnotationFn, Args);
  1537. }
  1538. void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
  1539. assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
  1540. // FIXME We create a new bitcast for every annotation because that's what
  1541. // llvm-gcc was doing.
  1542. for (const auto *I : D->specific_attrs<AnnotateAttr>())
  1543. EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
  1544. Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
  1545. I->getAnnotation(), D->getLocation());
  1546. }
  1547. Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
  1548. Address Addr) {
  1549. assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
  1550. llvm::Value *V = Addr.getPointer();
  1551. llvm::Type *VTy = V->getType();
  1552. llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
  1553. CGM.Int8PtrTy);
  1554. for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
  1555. // FIXME Always emit the cast inst so we can differentiate between
  1556. // annotation on the first field of a struct and annotation on the struct
  1557. // itself.
  1558. if (VTy != CGM.Int8PtrTy)
  1559. V = Builder.Insert(new llvm::BitCastInst(V, CGM.Int8PtrTy));
  1560. V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation());
  1561. V = Builder.CreateBitCast(V, VTy);
  1562. }
  1563. return Address(V, Addr.getAlignment());
  1564. }
  1565. CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
  1566. CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF)
  1567. : CGF(CGF) {
  1568. assert(!CGF->IsSanitizerScope);
  1569. CGF->IsSanitizerScope = true;
  1570. }
  1571. CodeGenFunction::SanitizerScope::~SanitizerScope() {
  1572. CGF->IsSanitizerScope = false;
  1573. }
  1574. void CodeGenFunction::InsertHelper(llvm::Instruction *I,
  1575. const llvm::Twine &Name,
  1576. llvm::BasicBlock *BB,
  1577. llvm::BasicBlock::iterator InsertPt) const {
  1578. LoopStack.InsertHelper(I);
  1579. if (IsSanitizerScope)
  1580. CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I);
  1581. }
  1582. template <bool PreserveNames>
  1583. void CGBuilderInserter<PreserveNames>::InsertHelper(
  1584. llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
  1585. llvm::BasicBlock::iterator InsertPt) const {
  1586. llvm::IRBuilderDefaultInserter<PreserveNames>::InsertHelper(I, Name, BB,
  1587. InsertPt);
  1588. if (CGF)
  1589. CGF->InsertHelper(I, Name, BB, InsertPt);
  1590. }
  1591. #ifdef NDEBUG
  1592. #define PreserveNames false
  1593. #else
  1594. #define PreserveNames true
  1595. #endif
  1596. template void CGBuilderInserter<PreserveNames>::InsertHelper(
  1597. llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
  1598. llvm::BasicBlock::iterator InsertPt) const;
  1599. #undef PreserveNames
  1600. static bool hasRequiredFeatures(const SmallVectorImpl<StringRef> &ReqFeatures,
  1601. CodeGenModule &CGM, const FunctionDecl *FD,
  1602. std::string &FirstMissing) {
  1603. // If there aren't any required features listed then go ahead and return.
  1604. if (ReqFeatures.empty())
  1605. return false;
  1606. // Now build up the set of caller features and verify that all the required
  1607. // features are there.
  1608. llvm::StringMap<bool> CallerFeatureMap;
  1609. CGM.getFunctionFeatureMap(CallerFeatureMap, FD);
  1610. // If we have at least one of the features in the feature list return
  1611. // true, otherwise return false.
  1612. return std::all_of(
  1613. ReqFeatures.begin(), ReqFeatures.end(), [&](StringRef Feature) {
  1614. SmallVector<StringRef, 1> OrFeatures;
  1615. Feature.split(OrFeatures, "|");
  1616. return std::any_of(OrFeatures.begin(), OrFeatures.end(),
  1617. [&](StringRef Feature) {
  1618. if (!CallerFeatureMap.lookup(Feature)) {
  1619. FirstMissing = Feature.str();
  1620. return false;
  1621. }
  1622. return true;
  1623. });
  1624. });
  1625. }
  1626. // Emits an error if we don't have a valid set of target features for the
  1627. // called function.
  1628. void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
  1629. const FunctionDecl *TargetDecl) {
  1630. // Early exit if this is an indirect call.
  1631. if (!TargetDecl)
  1632. return;
  1633. // Get the current enclosing function if it exists. If it doesn't
  1634. // we can't check the target features anyhow.
  1635. const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl);
  1636. if (!FD)
  1637. return;
  1638. // Grab the required features for the call. For a builtin this is listed in
  1639. // the td file with the default cpu, for an always_inline function this is any
  1640. // listed cpu and any listed features.
  1641. unsigned BuiltinID = TargetDecl->getBuiltinID();
  1642. std::string MissingFeature;
  1643. if (BuiltinID) {
  1644. SmallVector<StringRef, 1> ReqFeatures;
  1645. const char *FeatureList =
  1646. CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
  1647. // Return if the builtin doesn't have any required features.
  1648. if (!FeatureList || StringRef(FeatureList) == "")
  1649. return;
  1650. StringRef(FeatureList).split(ReqFeatures, ",");
  1651. if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
  1652. CGM.getDiags().Report(E->getLocStart(), diag::err_builtin_needs_feature)
  1653. << TargetDecl->getDeclName()
  1654. << CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
  1655. } else if (TargetDecl->hasAttr<TargetAttr>()) {
  1656. // Get the required features for the callee.
  1657. SmallVector<StringRef, 1> ReqFeatures;
  1658. llvm::StringMap<bool> CalleeFeatureMap;
  1659. CGM.getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
  1660. for (const auto &F : CalleeFeatureMap) {
  1661. // Only positive features are "required".
  1662. if (F.getValue())
  1663. ReqFeatures.push_back(F.getKey());
  1664. }
  1665. if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
  1666. CGM.getDiags().Report(E->getLocStart(), diag::err_function_needs_feature)
  1667. << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
  1668. }
  1669. }
  1670. void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
  1671. if (!CGM.getCodeGenOpts().SanitizeStats)
  1672. return;
  1673. llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
  1674. IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
  1675. CGM.getSanStats().create(IRB, SSK);
  1676. }