ShadowStackGC.cpp 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455
  1. //===-- ShadowStackGC.cpp - GC support for uncooperative targets ----------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This file implements lowering for the llvm.gc* intrinsics for targets that do
  11. // not natively support them (which includes the C backend). Note that the code
  12. // generated is not quite as efficient as algorithms which generate stack maps
  13. // to identify roots.
  14. //
  15. // This pass implements the code transformation described in this paper:
  16. // "Accurate Garbage Collection in an Uncooperative Environment"
  17. // Fergus Henderson, ISMM, 2002
  18. //
  19. // In runtime/GC/SemiSpace.cpp is a prototype runtime which is compatible with
  20. // ShadowStackGC.
  21. //
  22. // In order to support this particular transformation, all stack roots are
  23. // coallocated in the stack. This allows a fully target-independent stack map
  24. // while introducing only minor runtime overhead.
  25. //
  26. //===----------------------------------------------------------------------===//
  27. #define DEBUG_TYPE "shadowstackgc"
  28. #include "llvm/CodeGen/GCs.h"
  29. #include "llvm/ADT/StringExtras.h"
  30. #include "llvm/CodeGen/GCStrategy.h"
  31. #include "llvm/IntrinsicInst.h"
  32. #include "llvm/Module.h"
  33. #include "llvm/Support/CallSite.h"
  34. #include "llvm/Support/IRBuilder.h"
  35. using namespace llvm;
  36. namespace {
  37. class ShadowStackGC : public GCStrategy {
  38. /// RootChain - This is the global linked-list that contains the chain of GC
  39. /// roots.
  40. GlobalVariable *Head;
  41. /// StackEntryTy - Abstract type of a link in the shadow stack.
  42. ///
  43. const StructType *StackEntryTy;
  44. /// Roots - GC roots in the current function. Each is a pair of the
  45. /// intrinsic call and its corresponding alloca.
  46. std::vector<std::pair<CallInst*,AllocaInst*> > Roots;
  47. public:
  48. ShadowStackGC();
  49. bool initializeCustomLowering(Module &M);
  50. bool performCustomLowering(Function &F);
  51. private:
  52. bool IsNullValue(Value *V);
  53. Constant *GetFrameMap(Function &F);
  54. const Type* GetConcreteStackEntryType(Function &F);
  55. void CollectRoots(Function &F);
  56. static GetElementPtrInst *CreateGEP(LLVMContext &Context,
  57. IRBuilder<> &B, Value *BasePtr,
  58. int Idx1, const char *Name);
  59. static GetElementPtrInst *CreateGEP(LLVMContext &Context,
  60. IRBuilder<> &B, Value *BasePtr,
  61. int Idx1, int Idx2, const char *Name);
  62. };
  63. }
  64. static GCRegistry::Add<ShadowStackGC>
  65. X("shadow-stack", "Very portable GC for uncooperative code generators");
  66. namespace {
  67. /// EscapeEnumerator - This is a little algorithm to find all escape points
  68. /// from a function so that "finally"-style code can be inserted. In addition
  69. /// to finding the existing return and unwind instructions, it also (if
  70. /// necessary) transforms any call instructions into invokes and sends them to
  71. /// a landing pad.
  72. ///
  73. /// It's wrapped up in a state machine using the same transform C# uses for
  74. /// 'yield return' enumerators, This transform allows it to be non-allocating.
  75. class EscapeEnumerator {
  76. Function &F;
  77. const char *CleanupBBName;
  78. // State.
  79. int State;
  80. Function::iterator StateBB, StateE;
  81. IRBuilder<> Builder;
  82. public:
  83. EscapeEnumerator(Function &F, const char *N = "cleanup")
  84. : F(F), CleanupBBName(N), State(0), Builder(F.getContext()) {}
  85. IRBuilder<> *Next() {
  86. switch (State) {
  87. default:
  88. return 0;
  89. case 0:
  90. StateBB = F.begin();
  91. StateE = F.end();
  92. State = 1;
  93. case 1:
  94. // Find all 'return' and 'unwind' instructions.
  95. while (StateBB != StateE) {
  96. BasicBlock *CurBB = StateBB++;
  97. // Branches and invokes do not escape, only unwind and return do.
  98. TerminatorInst *TI = CurBB->getTerminator();
  99. if (!isa<UnwindInst>(TI) && !isa<ReturnInst>(TI))
  100. continue;
  101. Builder.SetInsertPoint(TI->getParent(), TI);
  102. return &Builder;
  103. }
  104. State = 2;
  105. // Find all 'call' instructions.
  106. SmallVector<Instruction*,16> Calls;
  107. for (Function::iterator BB = F.begin(),
  108. E = F.end(); BB != E; ++BB)
  109. for (BasicBlock::iterator II = BB->begin(),
  110. EE = BB->end(); II != EE; ++II)
  111. if (CallInst *CI = dyn_cast<CallInst>(II))
  112. if (!CI->getCalledFunction() ||
  113. !CI->getCalledFunction()->getIntrinsicID())
  114. Calls.push_back(CI);
  115. if (Calls.empty())
  116. return 0;
  117. // Create a cleanup block.
  118. BasicBlock *CleanupBB = BasicBlock::Create(F.getContext(),
  119. CleanupBBName, &F);
  120. UnwindInst *UI = new UnwindInst(F.getContext(), CleanupBB);
  121. // Transform the 'call' instructions into 'invoke's branching to the
  122. // cleanup block. Go in reverse order to make prettier BB names.
  123. SmallVector<Value*,16> Args;
  124. for (unsigned I = Calls.size(); I != 0; ) {
  125. CallInst *CI = cast<CallInst>(Calls[--I]);
  126. // Split the basic block containing the function call.
  127. BasicBlock *CallBB = CI->getParent();
  128. BasicBlock *NewBB =
  129. CallBB->splitBasicBlock(CI, CallBB->getName() + ".cont");
  130. // Remove the unconditional branch inserted at the end of CallBB.
  131. CallBB->getInstList().pop_back();
  132. NewBB->getInstList().remove(CI);
  133. // Create a new invoke instruction.
  134. Args.clear();
  135. CallSite CS(CI);
  136. Args.append(CS.arg_begin(), CS.arg_end());
  137. InvokeInst *II = InvokeInst::Create(CI->getCalledValue(),
  138. NewBB, CleanupBB,
  139. Args.begin(), Args.end(),
  140. CI->getName(), CallBB);
  141. II->setCallingConv(CI->getCallingConv());
  142. II->setAttributes(CI->getAttributes());
  143. CI->replaceAllUsesWith(II);
  144. delete CI;
  145. }
  146. Builder.SetInsertPoint(UI->getParent(), UI);
  147. return &Builder;
  148. }
  149. }
  150. };
  151. }
  152. // -----------------------------------------------------------------------------
  153. void llvm::linkShadowStackGC() { }
  154. ShadowStackGC::ShadowStackGC() : Head(0), StackEntryTy(0) {
  155. InitRoots = true;
  156. CustomRoots = true;
  157. }
  158. Constant *ShadowStackGC::GetFrameMap(Function &F) {
  159. // doInitialization creates the abstract type of this value.
  160. const Type *VoidPtr = Type::getInt8PtrTy(F.getContext());
  161. // Truncate the ShadowStackDescriptor if some metadata is null.
  162. unsigned NumMeta = 0;
  163. SmallVector<Constant*, 16> Metadata;
  164. for (unsigned I = 0; I != Roots.size(); ++I) {
  165. Constant *C = cast<Constant>(Roots[I].first->getArgOperand(1));
  166. if (!C->isNullValue())
  167. NumMeta = I + 1;
  168. Metadata.push_back(ConstantExpr::getBitCast(C, VoidPtr));
  169. }
  170. const Type *Int32Ty = Type::getInt32Ty(F.getContext());
  171. Constant *BaseElts[] = {
  172. ConstantInt::get(Int32Ty, Roots.size(), false),
  173. ConstantInt::get(Int32Ty, NumMeta, false),
  174. };
  175. Constant *DescriptorElts[] = {
  176. ConstantStruct::get(StructType::get(Int32Ty, Int32Ty, NULL), BaseElts),
  177. ConstantArray::get(ArrayType::get(VoidPtr, NumMeta),
  178. Metadata.begin(), NumMeta)
  179. };
  180. Constant *FrameMap =
  181. ConstantStruct::get(StructType::get(DescriptorElts[0]->getType(),
  182. DescriptorElts[1]->getType(), NULL),
  183. DescriptorElts);
  184. std::string TypeName("gc_map.");
  185. TypeName += utostr(NumMeta);
  186. F.getParent()->addTypeName(TypeName, FrameMap->getType());
  187. // FIXME: Is this actually dangerous as WritingAnLLVMPass.html claims? Seems
  188. // that, short of multithreaded LLVM, it should be safe; all that is
  189. // necessary is that a simple Module::iterator loop not be invalidated.
  190. // Appending to the GlobalVariable list is safe in that sense.
  191. //
  192. // All of the output passes emit globals last. The ExecutionEngine
  193. // explicitly supports adding globals to the module after
  194. // initialization.
  195. //
  196. // Still, if it isn't deemed acceptable, then this transformation needs
  197. // to be a ModulePass (which means it cannot be in the 'llc' pipeline
  198. // (which uses a FunctionPassManager (which segfaults (not asserts) if
  199. // provided a ModulePass))).
  200. Constant *GV = new GlobalVariable(*F.getParent(), FrameMap->getType(), true,
  201. GlobalVariable::InternalLinkage,
  202. FrameMap, "__gc_" + F.getName());
  203. Constant *GEPIndices[2] = {
  204. ConstantInt::get(Type::getInt32Ty(F.getContext()), 0),
  205. ConstantInt::get(Type::getInt32Ty(F.getContext()), 0)
  206. };
  207. return ConstantExpr::getGetElementPtr(GV, GEPIndices, 2);
  208. }
  209. const Type* ShadowStackGC::GetConcreteStackEntryType(Function &F) {
  210. // doInitialization creates the generic version of this type.
  211. std::vector<const Type*> EltTys;
  212. EltTys.push_back(StackEntryTy);
  213. for (size_t I = 0; I != Roots.size(); I++)
  214. EltTys.push_back(Roots[I].second->getAllocatedType());
  215. Type *Ty = StructType::get(F.getContext(), EltTys);
  216. std::string TypeName("gc_stackentry.");
  217. TypeName += F.getName();
  218. F.getParent()->addTypeName(TypeName, Ty);
  219. return Ty;
  220. }
  221. /// doInitialization - If this module uses the GC intrinsics, find them now. If
  222. /// not, exit fast.
  223. bool ShadowStackGC::initializeCustomLowering(Module &M) {
  224. // struct FrameMap {
  225. // int32_t NumRoots; // Number of roots in stack frame.
  226. // int32_t NumMeta; // Number of metadata descriptors. May be < NumRoots.
  227. // void *Meta[]; // May be absent for roots without metadata.
  228. // };
  229. std::vector<const Type*> EltTys;
  230. // 32 bits is ok up to a 32GB stack frame. :)
  231. EltTys.push_back(Type::getInt32Ty(M.getContext()));
  232. // Specifies length of variable length array.
  233. EltTys.push_back(Type::getInt32Ty(M.getContext()));
  234. StructType *FrameMapTy = StructType::get(M.getContext(), EltTys);
  235. M.addTypeName("gc_map", FrameMapTy);
  236. PointerType *FrameMapPtrTy = PointerType::getUnqual(FrameMapTy);
  237. // struct StackEntry {
  238. // ShadowStackEntry *Next; // Caller's stack entry.
  239. // FrameMap *Map; // Pointer to constant FrameMap.
  240. // void *Roots[]; // Stack roots (in-place array, so we pretend).
  241. // };
  242. OpaqueType *RecursiveTy = OpaqueType::get(M.getContext());
  243. EltTys.clear();
  244. EltTys.push_back(PointerType::getUnqual(RecursiveTy));
  245. EltTys.push_back(FrameMapPtrTy);
  246. PATypeHolder LinkTyH = StructType::get(M.getContext(), EltTys);
  247. RecursiveTy->refineAbstractTypeTo(LinkTyH.get());
  248. StackEntryTy = cast<StructType>(LinkTyH.get());
  249. const PointerType *StackEntryPtrTy = PointerType::getUnqual(StackEntryTy);
  250. M.addTypeName("gc_stackentry", LinkTyH.get()); // FIXME: Is this safe from
  251. // a FunctionPass?
  252. // Get the root chain if it already exists.
  253. Head = M.getGlobalVariable("llvm_gc_root_chain");
  254. if (!Head) {
  255. // If the root chain does not exist, insert a new one with linkonce
  256. // linkage!
  257. Head = new GlobalVariable(M, StackEntryPtrTy, false,
  258. GlobalValue::LinkOnceAnyLinkage,
  259. Constant::getNullValue(StackEntryPtrTy),
  260. "llvm_gc_root_chain");
  261. } else if (Head->hasExternalLinkage() && Head->isDeclaration()) {
  262. Head->setInitializer(Constant::getNullValue(StackEntryPtrTy));
  263. Head->setLinkage(GlobalValue::LinkOnceAnyLinkage);
  264. }
  265. return true;
  266. }
  267. bool ShadowStackGC::IsNullValue(Value *V) {
  268. if (Constant *C = dyn_cast<Constant>(V))
  269. return C->isNullValue();
  270. return false;
  271. }
  272. void ShadowStackGC::CollectRoots(Function &F) {
  273. // FIXME: Account for original alignment. Could fragment the root array.
  274. // Approach 1: Null initialize empty slots at runtime. Yuck.
  275. // Approach 2: Emit a map of the array instead of just a count.
  276. assert(Roots.empty() && "Not cleaned up?");
  277. SmallVector<std::pair<CallInst*, AllocaInst*>, 16> MetaRoots;
  278. for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
  279. for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;)
  280. if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(II++))
  281. if (Function *F = CI->getCalledFunction())
  282. if (F->getIntrinsicID() == Intrinsic::gcroot) {
  283. std::pair<CallInst*, AllocaInst*> Pair = std::make_pair(
  284. CI, cast<AllocaInst>(CI->getArgOperand(0)->stripPointerCasts()));
  285. if (IsNullValue(CI->getArgOperand(1)))
  286. Roots.push_back(Pair);
  287. else
  288. MetaRoots.push_back(Pair);
  289. }
  290. // Number roots with metadata (usually empty) at the beginning, so that the
  291. // FrameMap::Meta array can be elided.
  292. Roots.insert(Roots.begin(), MetaRoots.begin(), MetaRoots.end());
  293. }
  294. GetElementPtrInst *
  295. ShadowStackGC::CreateGEP(LLVMContext &Context, IRBuilder<> &B, Value *BasePtr,
  296. int Idx, int Idx2, const char *Name) {
  297. Value *Indices[] = { ConstantInt::get(Type::getInt32Ty(Context), 0),
  298. ConstantInt::get(Type::getInt32Ty(Context), Idx),
  299. ConstantInt::get(Type::getInt32Ty(Context), Idx2) };
  300. Value* Val = B.CreateGEP(BasePtr, Indices, Indices + 3, Name);
  301. assert(isa<GetElementPtrInst>(Val) && "Unexpected folded constant");
  302. return dyn_cast<GetElementPtrInst>(Val);
  303. }
  304. GetElementPtrInst *
  305. ShadowStackGC::CreateGEP(LLVMContext &Context, IRBuilder<> &B, Value *BasePtr,
  306. int Idx, const char *Name) {
  307. Value *Indices[] = { ConstantInt::get(Type::getInt32Ty(Context), 0),
  308. ConstantInt::get(Type::getInt32Ty(Context), Idx) };
  309. Value *Val = B.CreateGEP(BasePtr, Indices, Indices + 2, Name);
  310. assert(isa<GetElementPtrInst>(Val) && "Unexpected folded constant");
  311. return dyn_cast<GetElementPtrInst>(Val);
  312. }
  313. /// runOnFunction - Insert code to maintain the shadow stack.
  314. bool ShadowStackGC::performCustomLowering(Function &F) {
  315. LLVMContext &Context = F.getContext();
  316. // Find calls to llvm.gcroot.
  317. CollectRoots(F);
  318. // If there are no roots in this function, then there is no need to add a
  319. // stack map entry for it.
  320. if (Roots.empty())
  321. return false;
  322. // Build the constant map and figure the type of the shadow stack entry.
  323. Value *FrameMap = GetFrameMap(F);
  324. const Type *ConcreteStackEntryTy = GetConcreteStackEntryType(F);
  325. // Build the shadow stack entry at the very start of the function.
  326. BasicBlock::iterator IP = F.getEntryBlock().begin();
  327. IRBuilder<> AtEntry(IP->getParent(), IP);
  328. Instruction *StackEntry = AtEntry.CreateAlloca(ConcreteStackEntryTy, 0,
  329. "gc_frame");
  330. while (isa<AllocaInst>(IP)) ++IP;
  331. AtEntry.SetInsertPoint(IP->getParent(), IP);
  332. // Initialize the map pointer and load the current head of the shadow stack.
  333. Instruction *CurrentHead = AtEntry.CreateLoad(Head, "gc_currhead");
  334. Instruction *EntryMapPtr = CreateGEP(Context, AtEntry, StackEntry,
  335. 0,1,"gc_frame.map");
  336. AtEntry.CreateStore(FrameMap, EntryMapPtr);
  337. // After all the allocas...
  338. for (unsigned I = 0, E = Roots.size(); I != E; ++I) {
  339. // For each root, find the corresponding slot in the aggregate...
  340. Value *SlotPtr = CreateGEP(Context, AtEntry, StackEntry, 1 + I, "gc_root");
  341. // And use it in lieu of the alloca.
  342. AllocaInst *OriginalAlloca = Roots[I].second;
  343. SlotPtr->takeName(OriginalAlloca);
  344. OriginalAlloca->replaceAllUsesWith(SlotPtr);
  345. }
  346. // Move past the original stores inserted by GCStrategy::InitRoots. This isn't
  347. // really necessary (the collector would never see the intermediate state at
  348. // runtime), but it's nicer not to push the half-initialized entry onto the
  349. // shadow stack.
  350. while (isa<StoreInst>(IP)) ++IP;
  351. AtEntry.SetInsertPoint(IP->getParent(), IP);
  352. // Push the entry onto the shadow stack.
  353. Instruction *EntryNextPtr = CreateGEP(Context, AtEntry,
  354. StackEntry,0,0,"gc_frame.next");
  355. Instruction *NewHeadVal = CreateGEP(Context, AtEntry,
  356. StackEntry, 0, "gc_newhead");
  357. AtEntry.CreateStore(CurrentHead, EntryNextPtr);
  358. AtEntry.CreateStore(NewHeadVal, Head);
  359. // For each instruction that escapes...
  360. EscapeEnumerator EE(F, "gc_cleanup");
  361. while (IRBuilder<> *AtExit = EE.Next()) {
  362. // Pop the entry from the shadow stack. Don't reuse CurrentHead from
  363. // AtEntry, since that would make the value live for the entire function.
  364. Instruction *EntryNextPtr2 = CreateGEP(Context, *AtExit, StackEntry, 0, 0,
  365. "gc_frame.next");
  366. Value *SavedHead = AtExit->CreateLoad(EntryNextPtr2, "gc_savedhead");
  367. AtExit->CreateStore(SavedHead, Head);
  368. }
  369. // Delete the original allocas (which are no longer used) and the intrinsic
  370. // calls (which are no longer valid). Doing this last avoids invalidating
  371. // iterators.
  372. for (unsigned I = 0, E = Roots.size(); I != E; ++I) {
  373. Roots[I].first->eraseFromParent();
  374. Roots[I].second->eraseFromParent();
  375. }
  376. Roots.clear();
  377. return true;
  378. }