ShadowStackGC.cpp 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449
  1. //===-- ShadowStackGC.cpp - GC support for uncooperative targets ----------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This file implements lowering for the llvm.gc* intrinsics for targets that do
  11. // not natively support them (which includes the C backend). Note that the code
  12. // generated is not quite as efficient as algorithms which generate stack maps
  13. // to identify roots.
  14. //
  15. // This pass implements the code transformation described in this paper:
  16. // "Accurate Garbage Collection in an Uncooperative Environment"
  17. // Fergus Henderson, ISMM, 2002
  18. //
  19. // In runtime/GC/SemiSpace.cpp is a prototype runtime which is compatible with
  20. // ShadowStackGC.
  21. //
  22. // In order to support this particular transformation, all stack roots are
  23. // coallocated in the stack. This allows a fully target-independent stack map
  24. // while introducing only minor runtime overhead.
  25. //
  26. //===----------------------------------------------------------------------===//
  27. #define DEBUG_TYPE "shadowstackgc"
  28. #include "llvm/CodeGen/GCs.h"
  29. #include "llvm/ADT/StringExtras.h"
  30. #include "llvm/CodeGen/GCStrategy.h"
  31. #include "llvm/IntrinsicInst.h"
  32. #include "llvm/Module.h"
  33. #include "llvm/Support/Compiler.h"
  34. #include "llvm/Support/IRBuilder.h"
  35. using namespace llvm;
  36. namespace {
  37. class VISIBILITY_HIDDEN ShadowStackGC : public GCStrategy {
  38. /// RootChain - This is the global linked-list that contains the chain of GC
  39. /// roots.
  40. GlobalVariable *Head;
  41. /// StackEntryTy - Abstract type of a link in the shadow stack.
  42. ///
  43. const StructType *StackEntryTy;
  44. /// Roots - GC roots in the current function. Each is a pair of the
  45. /// intrinsic call and its corresponding alloca.
  46. std::vector<std::pair<CallInst*,AllocaInst*> > Roots;
  47. public:
  48. ShadowStackGC();
  49. bool initializeCustomLowering(Module &M);
  50. bool performCustomLowering(Function &F);
  51. private:
  52. bool IsNullValue(Value *V);
  53. Constant *GetFrameMap(Function &F);
  54. const Type* GetConcreteStackEntryType(Function &F);
  55. void CollectRoots(Function &F);
  56. static GetElementPtrInst *CreateGEP(LLVMContext &Context,
  57. IRBuilder<> &B, Value *BasePtr,
  58. int Idx1, const char *Name);
  59. static GetElementPtrInst *CreateGEP(LLVMContext &Context,
  60. IRBuilder<> &B, Value *BasePtr,
  61. int Idx1, int Idx2, const char *Name);
  62. };
  63. }
  64. static GCRegistry::Add<ShadowStackGC>
  65. X("shadow-stack", "Very portable GC for uncooperative code generators");
  66. namespace {
  67. /// EscapeEnumerator - This is a little algorithm to find all escape points
  68. /// from a function so that "finally"-style code can be inserted. In addition
  69. /// to finding the existing return and unwind instructions, it also (if
  70. /// necessary) transforms any call instructions into invokes and sends them to
  71. /// a landing pad.
  72. ///
  73. /// It's wrapped up in a state machine using the same transform C# uses for
  74. /// 'yield return' enumerators, This transform allows it to be non-allocating.
  75. class VISIBILITY_HIDDEN EscapeEnumerator {
  76. Function &F;
  77. const char *CleanupBBName;
  78. // State.
  79. int State;
  80. Function::iterator StateBB, StateE;
  81. IRBuilder<> Builder;
  82. public:
  83. EscapeEnumerator(Function &F, const char *N = "cleanup")
  84. : F(F), CleanupBBName(N), State(0), Builder(F.getContext()) {}
  85. IRBuilder<> *Next() {
  86. switch (State) {
  87. default:
  88. return 0;
  89. case 0:
  90. StateBB = F.begin();
  91. StateE = F.end();
  92. State = 1;
  93. case 1:
  94. // Find all 'return' and 'unwind' instructions.
  95. while (StateBB != StateE) {
  96. BasicBlock *CurBB = StateBB++;
  97. // Branches and invokes do not escape, only unwind and return do.
  98. TerminatorInst *TI = CurBB->getTerminator();
  99. if (!isa<UnwindInst>(TI) && !isa<ReturnInst>(TI))
  100. continue;
  101. Builder.SetInsertPoint(TI->getParent(), TI);
  102. return &Builder;
  103. }
  104. State = 2;
  105. // Find all 'call' instructions.
  106. SmallVector<Instruction*,16> Calls;
  107. for (Function::iterator BB = F.begin(),
  108. E = F.end(); BB != E; ++BB)
  109. for (BasicBlock::iterator II = BB->begin(),
  110. EE = BB->end(); II != EE; ++II)
  111. if (CallInst *CI = dyn_cast<CallInst>(II))
  112. if (!CI->getCalledFunction() ||
  113. !CI->getCalledFunction()->getIntrinsicID())
  114. Calls.push_back(CI);
  115. if (Calls.empty())
  116. return 0;
  117. // Create a cleanup block.
  118. BasicBlock *CleanupBB = BasicBlock::Create(F.getContext(),
  119. CleanupBBName, &F);
  120. UnwindInst *UI = new UnwindInst(F.getContext(), CleanupBB);
  121. // Transform the 'call' instructions into 'invoke's branching to the
  122. // cleanup block. Go in reverse order to make prettier BB names.
  123. SmallVector<Value*,16> Args;
  124. for (unsigned I = Calls.size(); I != 0; ) {
  125. CallInst *CI = cast<CallInst>(Calls[--I]);
  126. // Split the basic block containing the function call.
  127. BasicBlock *CallBB = CI->getParent();
  128. BasicBlock *NewBB =
  129. CallBB->splitBasicBlock(CI, CallBB->getName() + ".cont");
  130. // Remove the unconditional branch inserted at the end of CallBB.
  131. CallBB->getInstList().pop_back();
  132. NewBB->getInstList().remove(CI);
  133. // Create a new invoke instruction.
  134. Args.clear();
  135. Args.append(CI->op_begin() + 1, CI->op_end());
  136. InvokeInst *II = InvokeInst::Create(CI->getOperand(0),
  137. NewBB, CleanupBB,
  138. Args.begin(), Args.end(),
  139. CI->getName(), CallBB);
  140. II->setCallingConv(CI->getCallingConv());
  141. II->setAttributes(CI->getAttributes());
  142. CI->replaceAllUsesWith(II);
  143. delete CI;
  144. }
  145. Builder.SetInsertPoint(UI->getParent(), UI);
  146. return &Builder;
  147. }
  148. }
  149. };
  150. }
  151. // -----------------------------------------------------------------------------
  152. void llvm::linkShadowStackGC() { }
  153. ShadowStackGC::ShadowStackGC() : Head(0), StackEntryTy(0) {
  154. InitRoots = true;
  155. CustomRoots = true;
  156. }
  157. Constant *ShadowStackGC::GetFrameMap(Function &F) {
  158. // doInitialization creates the abstract type of this value.
  159. Type *VoidPtr = PointerType::getUnqual(Type::getInt8Ty(F.getContext()));
  160. // Truncate the ShadowStackDescriptor if some metadata is null.
  161. unsigned NumMeta = 0;
  162. SmallVector<Constant*,16> Metadata;
  163. for (unsigned I = 0; I != Roots.size(); ++I) {
  164. Constant *C = cast<Constant>(Roots[I].first->getOperand(2));
  165. if (!C->isNullValue())
  166. NumMeta = I + 1;
  167. Metadata.push_back(ConstantExpr::getBitCast(C, VoidPtr));
  168. }
  169. Constant *BaseElts[] = {
  170. ConstantInt::get(Type::getInt32Ty(F.getContext()), Roots.size(), false),
  171. ConstantInt::get(Type::getInt32Ty(F.getContext()), NumMeta, false),
  172. };
  173. Constant *DescriptorElts[] = {
  174. ConstantStruct::get(F.getContext(), BaseElts, 2),
  175. ConstantArray::get(ArrayType::get(VoidPtr, NumMeta),
  176. Metadata.begin(), NumMeta)
  177. };
  178. Constant *FrameMap = ConstantStruct::get(F.getContext(), DescriptorElts, 2);
  179. std::string TypeName("gc_map.");
  180. TypeName += utostr(NumMeta);
  181. F.getParent()->addTypeName(TypeName, FrameMap->getType());
  182. // FIXME: Is this actually dangerous as WritingAnLLVMPass.html claims? Seems
  183. // that, short of multithreaded LLVM, it should be safe; all that is
  184. // necessary is that a simple Module::iterator loop not be invalidated.
  185. // Appending to the GlobalVariable list is safe in that sense.
  186. //
  187. // All of the output passes emit globals last. The ExecutionEngine
  188. // explicitly supports adding globals to the module after
  189. // initialization.
  190. //
  191. // Still, if it isn't deemed acceptable, then this transformation needs
  192. // to be a ModulePass (which means it cannot be in the 'llc' pipeline
  193. // (which uses a FunctionPassManager (which segfaults (not asserts) if
  194. // provided a ModulePass))).
  195. Constant *GV = new GlobalVariable(*F.getParent(), FrameMap->getType(), true,
  196. GlobalVariable::InternalLinkage,
  197. FrameMap, "__gc_" + F.getName());
  198. Constant *GEPIndices[2] = {
  199. ConstantInt::get(Type::getInt32Ty(F.getContext()), 0),
  200. ConstantInt::get(Type::getInt32Ty(F.getContext()), 0)
  201. };
  202. return ConstantExpr::getGetElementPtr(GV, GEPIndices, 2);
  203. }
  204. const Type* ShadowStackGC::GetConcreteStackEntryType(Function &F) {
  205. // doInitialization creates the generic version of this type.
  206. std::vector<const Type*> EltTys;
  207. EltTys.push_back(StackEntryTy);
  208. for (size_t I = 0; I != Roots.size(); I++)
  209. EltTys.push_back(Roots[I].second->getAllocatedType());
  210. Type *Ty = StructType::get(F.getContext(), EltTys);
  211. std::string TypeName("gc_stackentry.");
  212. TypeName += F.getName();
  213. F.getParent()->addTypeName(TypeName, Ty);
  214. return Ty;
  215. }
  216. /// doInitialization - If this module uses the GC intrinsics, find them now. If
  217. /// not, exit fast.
  218. bool ShadowStackGC::initializeCustomLowering(Module &M) {
  219. // struct FrameMap {
  220. // int32_t NumRoots; // Number of roots in stack frame.
  221. // int32_t NumMeta; // Number of metadata descriptors. May be < NumRoots.
  222. // void *Meta[]; // May be absent for roots without metadata.
  223. // };
  224. std::vector<const Type*> EltTys;
  225. // 32 bits is ok up to a 32GB stack frame. :)
  226. EltTys.push_back(Type::getInt32Ty(M.getContext()));
  227. // Specifies length of variable length array.
  228. EltTys.push_back(Type::getInt32Ty(M.getContext()));
  229. StructType *FrameMapTy = StructType::get(M.getContext(), EltTys);
  230. M.addTypeName("gc_map", FrameMapTy);
  231. PointerType *FrameMapPtrTy = PointerType::getUnqual(FrameMapTy);
  232. // struct StackEntry {
  233. // ShadowStackEntry *Next; // Caller's stack entry.
  234. // FrameMap *Map; // Pointer to constant FrameMap.
  235. // void *Roots[]; // Stack roots (in-place array, so we pretend).
  236. // };
  237. OpaqueType *RecursiveTy = OpaqueType::get();
  238. EltTys.clear();
  239. EltTys.push_back(PointerType::getUnqual(RecursiveTy));
  240. EltTys.push_back(FrameMapPtrTy);
  241. PATypeHolder LinkTyH = StructType::get(M.getContext(), EltTys);
  242. RecursiveTy->refineAbstractTypeTo(LinkTyH.get());
  243. StackEntryTy = cast<StructType>(LinkTyH.get());
  244. const PointerType *StackEntryPtrTy = PointerType::getUnqual(StackEntryTy);
  245. M.addTypeName("gc_stackentry", LinkTyH.get()); // FIXME: Is this safe from
  246. // a FunctionPass?
  247. // Get the root chain if it already exists.
  248. Head = M.getGlobalVariable("llvm_gc_root_chain");
  249. if (!Head) {
  250. // If the root chain does not exist, insert a new one with linkonce
  251. // linkage!
  252. Head = new GlobalVariable(M, StackEntryPtrTy, false,
  253. GlobalValue::LinkOnceAnyLinkage,
  254. Constant::getNullValue(StackEntryPtrTy),
  255. "llvm_gc_root_chain");
  256. } else if (Head->hasExternalLinkage() && Head->isDeclaration()) {
  257. Head->setInitializer(Constant::getNullValue(StackEntryPtrTy));
  258. Head->setLinkage(GlobalValue::LinkOnceAnyLinkage);
  259. }
  260. return true;
  261. }
  262. bool ShadowStackGC::IsNullValue(Value *V) {
  263. if (Constant *C = dyn_cast<Constant>(V))
  264. return C->isNullValue();
  265. return false;
  266. }
  267. void ShadowStackGC::CollectRoots(Function &F) {
  268. // FIXME: Account for original alignment. Could fragment the root array.
  269. // Approach 1: Null initialize empty slots at runtime. Yuck.
  270. // Approach 2: Emit a map of the array instead of just a count.
  271. assert(Roots.empty() && "Not cleaned up?");
  272. SmallVector<std::pair<CallInst*,AllocaInst*>,16> MetaRoots;
  273. for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
  274. for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;)
  275. if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(II++))
  276. if (Function *F = CI->getCalledFunction())
  277. if (F->getIntrinsicID() == Intrinsic::gcroot) {
  278. std::pair<CallInst*,AllocaInst*> Pair = std::make_pair(
  279. CI, cast<AllocaInst>(CI->getOperand(1)->stripPointerCasts()));
  280. if (IsNullValue(CI->getOperand(2)))
  281. Roots.push_back(Pair);
  282. else
  283. MetaRoots.push_back(Pair);
  284. }
  285. // Number roots with metadata (usually empty) at the beginning, so that the
  286. // FrameMap::Meta array can be elided.
  287. Roots.insert(Roots.begin(), MetaRoots.begin(), MetaRoots.end());
  288. }
  289. GetElementPtrInst *
  290. ShadowStackGC::CreateGEP(LLVMContext &Context, IRBuilder<> &B, Value *BasePtr,
  291. int Idx, int Idx2, const char *Name) {
  292. Value *Indices[] = { ConstantInt::get(Type::getInt32Ty(Context), 0),
  293. ConstantInt::get(Type::getInt32Ty(Context), Idx),
  294. ConstantInt::get(Type::getInt32Ty(Context), Idx2) };
  295. Value* Val = B.CreateGEP(BasePtr, Indices, Indices + 3, Name);
  296. assert(isa<GetElementPtrInst>(Val) && "Unexpected folded constant");
  297. return dyn_cast<GetElementPtrInst>(Val);
  298. }
  299. GetElementPtrInst *
  300. ShadowStackGC::CreateGEP(LLVMContext &Context, IRBuilder<> &B, Value *BasePtr,
  301. int Idx, const char *Name) {
  302. Value *Indices[] = { ConstantInt::get(Type::getInt32Ty(Context), 0),
  303. ConstantInt::get(Type::getInt32Ty(Context), Idx) };
  304. Value *Val = B.CreateGEP(BasePtr, Indices, Indices + 2, Name);
  305. assert(isa<GetElementPtrInst>(Val) && "Unexpected folded constant");
  306. return dyn_cast<GetElementPtrInst>(Val);
  307. }
  308. /// runOnFunction - Insert code to maintain the shadow stack.
  309. bool ShadowStackGC::performCustomLowering(Function &F) {
  310. LLVMContext &Context = F.getContext();
  311. // Find calls to llvm.gcroot.
  312. CollectRoots(F);
  313. // If there are no roots in this function, then there is no need to add a
  314. // stack map entry for it.
  315. if (Roots.empty())
  316. return false;
  317. // Build the constant map and figure the type of the shadow stack entry.
  318. Value *FrameMap = GetFrameMap(F);
  319. const Type *ConcreteStackEntryTy = GetConcreteStackEntryType(F);
  320. // Build the shadow stack entry at the very start of the function.
  321. BasicBlock::iterator IP = F.getEntryBlock().begin();
  322. IRBuilder<> AtEntry(IP->getParent(), IP);
  323. Instruction *StackEntry = AtEntry.CreateAlloca(ConcreteStackEntryTy, 0,
  324. "gc_frame");
  325. while (isa<AllocaInst>(IP)) ++IP;
  326. AtEntry.SetInsertPoint(IP->getParent(), IP);
  327. // Initialize the map pointer and load the current head of the shadow stack.
  328. Instruction *CurrentHead = AtEntry.CreateLoad(Head, "gc_currhead");
  329. Instruction *EntryMapPtr = CreateGEP(Context, AtEntry, StackEntry,
  330. 0,1,"gc_frame.map");
  331. AtEntry.CreateStore(FrameMap, EntryMapPtr);
  332. // After all the allocas...
  333. for (unsigned I = 0, E = Roots.size(); I != E; ++I) {
  334. // For each root, find the corresponding slot in the aggregate...
  335. Value *SlotPtr = CreateGEP(Context, AtEntry, StackEntry, 1 + I, "gc_root");
  336. // And use it in lieu of the alloca.
  337. AllocaInst *OriginalAlloca = Roots[I].second;
  338. SlotPtr->takeName(OriginalAlloca);
  339. OriginalAlloca->replaceAllUsesWith(SlotPtr);
  340. }
  341. // Move past the original stores inserted by GCStrategy::InitRoots. This isn't
  342. // really necessary (the collector would never see the intermediate state at
  343. // runtime), but it's nicer not to push the half-initialized entry onto the
  344. // shadow stack.
  345. while (isa<StoreInst>(IP)) ++IP;
  346. AtEntry.SetInsertPoint(IP->getParent(), IP);
  347. // Push the entry onto the shadow stack.
  348. Instruction *EntryNextPtr = CreateGEP(Context, AtEntry,
  349. StackEntry,0,0,"gc_frame.next");
  350. Instruction *NewHeadVal = CreateGEP(Context, AtEntry,
  351. StackEntry, 0, "gc_newhead");
  352. AtEntry.CreateStore(CurrentHead, EntryNextPtr);
  353. AtEntry.CreateStore(NewHeadVal, Head);
  354. // For each instruction that escapes...
  355. EscapeEnumerator EE(F, "gc_cleanup");
  356. while (IRBuilder<> *AtExit = EE.Next()) {
  357. // Pop the entry from the shadow stack. Don't reuse CurrentHead from
  358. // AtEntry, since that would make the value live for the entire function.
  359. Instruction *EntryNextPtr2 = CreateGEP(Context, *AtExit, StackEntry, 0, 0,
  360. "gc_frame.next");
  361. Value *SavedHead = AtExit->CreateLoad(EntryNextPtr2, "gc_savedhead");
  362. AtExit->CreateStore(SavedHead, Head);
  363. }
  364. // Delete the original allocas (which are no longer used) and the intrinsic
  365. // calls (which are no longer valid). Doing this last avoids invalidating
  366. // iterators.
  367. for (unsigned I = 0, E = Roots.size(); I != E; ++I) {
  368. Roots[I].first->eraseFromParent();
  369. Roots[I].second->eraseFromParent();
  370. }
  371. Roots.clear();
  372. return true;
  373. }