ShadowStackGC.cpp 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452
  1. //===-- ShadowStackGC.cpp - GC support for uncooperative targets ----------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This file implements lowering for the llvm.gc* intrinsics for targets that do
  11. // not natively support them (which includes the C backend). Note that the code
  12. // generated is not quite as efficient as algorithms which generate stack maps
  13. // to identify roots.
  14. //
  15. // This pass implements the code transformation described in this paper:
  16. // "Accurate Garbage Collection in an Uncooperative Environment"
  17. // Fergus Henderson, ISMM, 2002
  18. //
  19. // In runtime/GC/SemiSpace.cpp is a prototype runtime which is compatible with
  20. // ShadowStackGC.
  21. //
  22. // In order to support this particular transformation, all stack roots are
  23. // coallocated in the stack. This allows a fully target-independent stack map
  24. // while introducing only minor runtime overhead.
  25. //
  26. //===----------------------------------------------------------------------===//
  27. #define DEBUG_TYPE "shadowstackgc"
  28. #include "llvm/IRBuilder.h"
  29. #include "llvm/IntrinsicInst.h"
  30. #include "llvm/Module.h"
  31. #include "llvm/ADT/StringExtras.h"
  32. #include "llvm/CodeGen/GCStrategy.h"
  33. #include "llvm/CodeGen/GCs.h"
  34. #include "llvm/Support/CallSite.h"
  35. using namespace llvm;
  36. namespace {
  37. class ShadowStackGC : public GCStrategy {
  38. /// RootChain - This is the global linked-list that contains the chain of GC
  39. /// roots.
  40. GlobalVariable *Head;
  41. /// StackEntryTy - Abstract type of a link in the shadow stack.
  42. ///
  43. StructType *StackEntryTy;
  44. StructType *FrameMapTy;
  45. /// Roots - GC roots in the current function. Each is a pair of the
  46. /// intrinsic call and its corresponding alloca.
  47. std::vector<std::pair<CallInst*,AllocaInst*> > Roots;
  48. public:
  49. ShadowStackGC();
  50. bool initializeCustomLowering(Module &M);
  51. bool performCustomLowering(Function &F);
  52. private:
  53. bool IsNullValue(Value *V);
  54. Constant *GetFrameMap(Function &F);
  55. Type* GetConcreteStackEntryType(Function &F);
  56. void CollectRoots(Function &F);
  57. static GetElementPtrInst *CreateGEP(LLVMContext &Context,
  58. IRBuilder<> &B, Value *BasePtr,
  59. int Idx1, const char *Name);
  60. static GetElementPtrInst *CreateGEP(LLVMContext &Context,
  61. IRBuilder<> &B, Value *BasePtr,
  62. int Idx1, int Idx2, const char *Name);
  63. };
  64. }
  65. static GCRegistry::Add<ShadowStackGC>
  66. X("shadow-stack", "Very portable GC for uncooperative code generators");
  67. namespace {
  68. /// EscapeEnumerator - This is a little algorithm to find all escape points
  69. /// from a function so that "finally"-style code can be inserted. In addition
  70. /// to finding the existing return and unwind instructions, it also (if
  71. /// necessary) transforms any call instructions into invokes and sends them to
  72. /// a landing pad.
  73. ///
  74. /// It's wrapped up in a state machine using the same transform C# uses for
  75. /// 'yield return' enumerators, This transform allows it to be non-allocating.
  76. class EscapeEnumerator {
  77. Function &F;
  78. const char *CleanupBBName;
  79. // State.
  80. int State;
  81. Function::iterator StateBB, StateE;
  82. IRBuilder<> Builder;
  83. public:
  84. EscapeEnumerator(Function &F, const char *N = "cleanup")
  85. : F(F), CleanupBBName(N), State(0), Builder(F.getContext()) {}
  86. IRBuilder<> *Next() {
  87. switch (State) {
  88. default:
  89. return 0;
  90. case 0:
  91. StateBB = F.begin();
  92. StateE = F.end();
  93. State = 1;
  94. case 1:
  95. // Find all 'return', 'resume', and 'unwind' instructions.
  96. while (StateBB != StateE) {
  97. BasicBlock *CurBB = StateBB++;
  98. // Branches and invokes do not escape, only unwind, resume, and return
  99. // do.
  100. TerminatorInst *TI = CurBB->getTerminator();
  101. if (!isa<ReturnInst>(TI) && !isa<ResumeInst>(TI))
  102. continue;
  103. Builder.SetInsertPoint(TI->getParent(), TI);
  104. return &Builder;
  105. }
  106. State = 2;
  107. // Find all 'call' instructions.
  108. SmallVector<Instruction*,16> Calls;
  109. for (Function::iterator BB = F.begin(),
  110. E = F.end(); BB != E; ++BB)
  111. for (BasicBlock::iterator II = BB->begin(),
  112. EE = BB->end(); II != EE; ++II)
  113. if (CallInst *CI = dyn_cast<CallInst>(II))
  114. if (!CI->getCalledFunction() ||
  115. !CI->getCalledFunction()->getIntrinsicID())
  116. Calls.push_back(CI);
  117. if (Calls.empty())
  118. return 0;
  119. // Create a cleanup block.
  120. LLVMContext &C = F.getContext();
  121. BasicBlock *CleanupBB = BasicBlock::Create(C, CleanupBBName, &F);
  122. Type *ExnTy = StructType::get(Type::getInt8PtrTy(C),
  123. Type::getInt32Ty(C), NULL);
  124. Constant *PersFn =
  125. F.getParent()->
  126. getOrInsertFunction("__gcc_personality_v0",
  127. FunctionType::get(Type::getInt32Ty(C), true));
  128. LandingPadInst *LPad = LandingPadInst::Create(ExnTy, PersFn, 1,
  129. "cleanup.lpad",
  130. CleanupBB);
  131. LPad->setCleanup(true);
  132. ResumeInst *RI = ResumeInst::Create(LPad, CleanupBB);
  133. // Transform the 'call' instructions into 'invoke's branching to the
  134. // cleanup block. Go in reverse order to make prettier BB names.
  135. SmallVector<Value*,16> Args;
  136. for (unsigned I = Calls.size(); I != 0; ) {
  137. CallInst *CI = cast<CallInst>(Calls[--I]);
  138. // Split the basic block containing the function call.
  139. BasicBlock *CallBB = CI->getParent();
  140. BasicBlock *NewBB =
  141. CallBB->splitBasicBlock(CI, CallBB->getName() + ".cont");
  142. // Remove the unconditional branch inserted at the end of CallBB.
  143. CallBB->getInstList().pop_back();
  144. NewBB->getInstList().remove(CI);
  145. // Create a new invoke instruction.
  146. Args.clear();
  147. CallSite CS(CI);
  148. Args.append(CS.arg_begin(), CS.arg_end());
  149. InvokeInst *II = InvokeInst::Create(CI->getCalledValue(),
  150. NewBB, CleanupBB,
  151. Args, CI->getName(), CallBB);
  152. II->setCallingConv(CI->getCallingConv());
  153. II->setAttributes(CI->getAttributes());
  154. CI->replaceAllUsesWith(II);
  155. delete CI;
  156. }
  157. Builder.SetInsertPoint(RI->getParent(), RI);
  158. return &Builder;
  159. }
  160. }
  161. };
  162. }
  163. // -----------------------------------------------------------------------------
  164. void llvm::linkShadowStackGC() { }
  165. ShadowStackGC::ShadowStackGC() : Head(0), StackEntryTy(0) {
  166. InitRoots = true;
  167. CustomRoots = true;
  168. }
  169. Constant *ShadowStackGC::GetFrameMap(Function &F) {
  170. // doInitialization creates the abstract type of this value.
  171. Type *VoidPtr = Type::getInt8PtrTy(F.getContext());
  172. // Truncate the ShadowStackDescriptor if some metadata is null.
  173. unsigned NumMeta = 0;
  174. SmallVector<Constant*, 16> Metadata;
  175. for (unsigned I = 0; I != Roots.size(); ++I) {
  176. Constant *C = cast<Constant>(Roots[I].first->getArgOperand(1));
  177. if (!C->isNullValue())
  178. NumMeta = I + 1;
  179. Metadata.push_back(ConstantExpr::getBitCast(C, VoidPtr));
  180. }
  181. Metadata.resize(NumMeta);
  182. Type *Int32Ty = Type::getInt32Ty(F.getContext());
  183. Constant *BaseElts[] = {
  184. ConstantInt::get(Int32Ty, Roots.size(), false),
  185. ConstantInt::get(Int32Ty, NumMeta, false),
  186. };
  187. Constant *DescriptorElts[] = {
  188. ConstantStruct::get(FrameMapTy, BaseElts),
  189. ConstantArray::get(ArrayType::get(VoidPtr, NumMeta), Metadata)
  190. };
  191. Type *EltTys[] = { DescriptorElts[0]->getType(),DescriptorElts[1]->getType()};
  192. StructType *STy = StructType::create(EltTys, "gc_map."+utostr(NumMeta));
  193. Constant *FrameMap = ConstantStruct::get(STy, DescriptorElts);
  194. // FIXME: Is this actually dangerous as WritingAnLLVMPass.html claims? Seems
  195. // that, short of multithreaded LLVM, it should be safe; all that is
  196. // necessary is that a simple Module::iterator loop not be invalidated.
  197. // Appending to the GlobalVariable list is safe in that sense.
  198. //
  199. // All of the output passes emit globals last. The ExecutionEngine
  200. // explicitly supports adding globals to the module after
  201. // initialization.
  202. //
  203. // Still, if it isn't deemed acceptable, then this transformation needs
  204. // to be a ModulePass (which means it cannot be in the 'llc' pipeline
  205. // (which uses a FunctionPassManager (which segfaults (not asserts) if
  206. // provided a ModulePass))).
  207. Constant *GV = new GlobalVariable(*F.getParent(), FrameMap->getType(), true,
  208. GlobalVariable::InternalLinkage,
  209. FrameMap, "__gc_" + F.getName());
  210. Constant *GEPIndices[2] = {
  211. ConstantInt::get(Type::getInt32Ty(F.getContext()), 0),
  212. ConstantInt::get(Type::getInt32Ty(F.getContext()), 0)
  213. };
  214. return ConstantExpr::getGetElementPtr(GV, GEPIndices);
  215. }
  216. Type* ShadowStackGC::GetConcreteStackEntryType(Function &F) {
  217. // doInitialization creates the generic version of this type.
  218. std::vector<Type*> EltTys;
  219. EltTys.push_back(StackEntryTy);
  220. for (size_t I = 0; I != Roots.size(); I++)
  221. EltTys.push_back(Roots[I].second->getAllocatedType());
  222. return StructType::create(EltTys, "gc_stackentry."+F.getName().str());
  223. }
  224. /// doInitialization - If this module uses the GC intrinsics, find them now. If
  225. /// not, exit fast.
  226. bool ShadowStackGC::initializeCustomLowering(Module &M) {
  227. // struct FrameMap {
  228. // int32_t NumRoots; // Number of roots in stack frame.
  229. // int32_t NumMeta; // Number of metadata descriptors. May be < NumRoots.
  230. // void *Meta[]; // May be absent for roots without metadata.
  231. // };
  232. std::vector<Type*> EltTys;
  233. // 32 bits is ok up to a 32GB stack frame. :)
  234. EltTys.push_back(Type::getInt32Ty(M.getContext()));
  235. // Specifies length of variable length array.
  236. EltTys.push_back(Type::getInt32Ty(M.getContext()));
  237. FrameMapTy = StructType::create(EltTys, "gc_map");
  238. PointerType *FrameMapPtrTy = PointerType::getUnqual(FrameMapTy);
  239. // struct StackEntry {
  240. // ShadowStackEntry *Next; // Caller's stack entry.
  241. // FrameMap *Map; // Pointer to constant FrameMap.
  242. // void *Roots[]; // Stack roots (in-place array, so we pretend).
  243. // };
  244. StackEntryTy = StructType::create(M.getContext(), "gc_stackentry");
  245. EltTys.clear();
  246. EltTys.push_back(PointerType::getUnqual(StackEntryTy));
  247. EltTys.push_back(FrameMapPtrTy);
  248. StackEntryTy->setBody(EltTys);
  249. PointerType *StackEntryPtrTy = PointerType::getUnqual(StackEntryTy);
  250. // Get the root chain if it already exists.
  251. Head = M.getGlobalVariable("llvm_gc_root_chain");
  252. if (!Head) {
  253. // If the root chain does not exist, insert a new one with linkonce
  254. // linkage!
  255. Head = new GlobalVariable(M, StackEntryPtrTy, false,
  256. GlobalValue::LinkOnceAnyLinkage,
  257. Constant::getNullValue(StackEntryPtrTy),
  258. "llvm_gc_root_chain");
  259. } else if (Head->hasExternalLinkage() && Head->isDeclaration()) {
  260. Head->setInitializer(Constant::getNullValue(StackEntryPtrTy));
  261. Head->setLinkage(GlobalValue::LinkOnceAnyLinkage);
  262. }
  263. return true;
  264. }
  265. bool ShadowStackGC::IsNullValue(Value *V) {
  266. if (Constant *C = dyn_cast<Constant>(V))
  267. return C->isNullValue();
  268. return false;
  269. }
  270. void ShadowStackGC::CollectRoots(Function &F) {
  271. // FIXME: Account for original alignment. Could fragment the root array.
  272. // Approach 1: Null initialize empty slots at runtime. Yuck.
  273. // Approach 2: Emit a map of the array instead of just a count.
  274. assert(Roots.empty() && "Not cleaned up?");
  275. SmallVector<std::pair<CallInst*, AllocaInst*>, 16> MetaRoots;
  276. for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
  277. for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;)
  278. if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(II++))
  279. if (Function *F = CI->getCalledFunction())
  280. if (F->getIntrinsicID() == Intrinsic::gcroot) {
  281. std::pair<CallInst*, AllocaInst*> Pair = std::make_pair(
  282. CI, cast<AllocaInst>(CI->getArgOperand(0)->stripPointerCasts()));
  283. if (IsNullValue(CI->getArgOperand(1)))
  284. Roots.push_back(Pair);
  285. else
  286. MetaRoots.push_back(Pair);
  287. }
  288. // Number roots with metadata (usually empty) at the beginning, so that the
  289. // FrameMap::Meta array can be elided.
  290. Roots.insert(Roots.begin(), MetaRoots.begin(), MetaRoots.end());
  291. }
  292. GetElementPtrInst *
  293. ShadowStackGC::CreateGEP(LLVMContext &Context, IRBuilder<> &B, Value *BasePtr,
  294. int Idx, int Idx2, const char *Name) {
  295. Value *Indices[] = { ConstantInt::get(Type::getInt32Ty(Context), 0),
  296. ConstantInt::get(Type::getInt32Ty(Context), Idx),
  297. ConstantInt::get(Type::getInt32Ty(Context), Idx2) };
  298. Value* Val = B.CreateGEP(BasePtr, Indices, Name);
  299. assert(isa<GetElementPtrInst>(Val) && "Unexpected folded constant");
  300. return dyn_cast<GetElementPtrInst>(Val);
  301. }
  302. GetElementPtrInst *
  303. ShadowStackGC::CreateGEP(LLVMContext &Context, IRBuilder<> &B, Value *BasePtr,
  304. int Idx, const char *Name) {
  305. Value *Indices[] = { ConstantInt::get(Type::getInt32Ty(Context), 0),
  306. ConstantInt::get(Type::getInt32Ty(Context), Idx) };
  307. Value *Val = B.CreateGEP(BasePtr, Indices, Name);
  308. assert(isa<GetElementPtrInst>(Val) && "Unexpected folded constant");
  309. return dyn_cast<GetElementPtrInst>(Val);
  310. }
  311. /// runOnFunction - Insert code to maintain the shadow stack.
  312. bool ShadowStackGC::performCustomLowering(Function &F) {
  313. LLVMContext &Context = F.getContext();
  314. // Find calls to llvm.gcroot.
  315. CollectRoots(F);
  316. // If there are no roots in this function, then there is no need to add a
  317. // stack map entry for it.
  318. if (Roots.empty())
  319. return false;
  320. // Build the constant map and figure the type of the shadow stack entry.
  321. Value *FrameMap = GetFrameMap(F);
  322. Type *ConcreteStackEntryTy = GetConcreteStackEntryType(F);
  323. // Build the shadow stack entry at the very start of the function.
  324. BasicBlock::iterator IP = F.getEntryBlock().begin();
  325. IRBuilder<> AtEntry(IP->getParent(), IP);
  326. Instruction *StackEntry = AtEntry.CreateAlloca(ConcreteStackEntryTy, 0,
  327. "gc_frame");
  328. while (isa<AllocaInst>(IP)) ++IP;
  329. AtEntry.SetInsertPoint(IP->getParent(), IP);
  330. // Initialize the map pointer and load the current head of the shadow stack.
  331. Instruction *CurrentHead = AtEntry.CreateLoad(Head, "gc_currhead");
  332. Instruction *EntryMapPtr = CreateGEP(Context, AtEntry, StackEntry,
  333. 0,1,"gc_frame.map");
  334. AtEntry.CreateStore(FrameMap, EntryMapPtr);
  335. // After all the allocas...
  336. for (unsigned I = 0, E = Roots.size(); I != E; ++I) {
  337. // For each root, find the corresponding slot in the aggregate...
  338. Value *SlotPtr = CreateGEP(Context, AtEntry, StackEntry, 1 + I, "gc_root");
  339. // And use it in lieu of the alloca.
  340. AllocaInst *OriginalAlloca = Roots[I].second;
  341. SlotPtr->takeName(OriginalAlloca);
  342. OriginalAlloca->replaceAllUsesWith(SlotPtr);
  343. }
  344. // Move past the original stores inserted by GCStrategy::InitRoots. This isn't
  345. // really necessary (the collector would never see the intermediate state at
  346. // runtime), but it's nicer not to push the half-initialized entry onto the
  347. // shadow stack.
  348. while (isa<StoreInst>(IP)) ++IP;
  349. AtEntry.SetInsertPoint(IP->getParent(), IP);
  350. // Push the entry onto the shadow stack.
  351. Instruction *EntryNextPtr = CreateGEP(Context, AtEntry,
  352. StackEntry,0,0,"gc_frame.next");
  353. Instruction *NewHeadVal = CreateGEP(Context, AtEntry,
  354. StackEntry, 0, "gc_newhead");
  355. AtEntry.CreateStore(CurrentHead, EntryNextPtr);
  356. AtEntry.CreateStore(NewHeadVal, Head);
  357. // For each instruction that escapes...
  358. EscapeEnumerator EE(F, "gc_cleanup");
  359. while (IRBuilder<> *AtExit = EE.Next()) {
  360. // Pop the entry from the shadow stack. Don't reuse CurrentHead from
  361. // AtEntry, since that would make the value live for the entire function.
  362. Instruction *EntryNextPtr2 = CreateGEP(Context, *AtExit, StackEntry, 0, 0,
  363. "gc_frame.next");
  364. Value *SavedHead = AtExit->CreateLoad(EntryNextPtr2, "gc_savedhead");
  365. AtExit->CreateStore(SavedHead, Head);
  366. }
  367. // Delete the original allocas (which are no longer used) and the intrinsic
  368. // calls (which are no longer valid). Doing this last avoids invalidating
  369. // iterators.
  370. for (unsigned I = 0, E = Roots.size(); I != E; ++I) {
  371. Roots[I].first->eraseFromParent();
  372. Roots[I].second->eraseFromParent();
  373. }
  374. Roots.clear();
  375. return true;
  376. }