ShadowStackGC.cpp 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449
  1. //===-- ShadowStackGC.cpp - GC support for uncooperative targets ----------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This file implements lowering for the llvm.gc* intrinsics for targets that do
  11. // not natively support them (which includes the C backend). Note that the code
  12. // generated is not quite as efficient as algorithms which generate stack maps
  13. // to identify roots.
  14. //
  15. // This pass implements the code transformation described in this paper:
  16. // "Accurate Garbage Collection in an Uncooperative Environment"
  17. // Fergus Henderson, ISMM, 2002
  18. //
  19. // In runtime/GC/SemiSpace.cpp is a prototype runtime which is compatible with
  20. // ShadowStackGC.
  21. //
  22. // In order to support this particular transformation, all stack roots are
  23. // coallocated in the stack. This allows a fully target-independent stack map
  24. // while introducing only minor runtime overhead.
  25. //
  26. //===----------------------------------------------------------------------===//
  27. #define DEBUG_TYPE "shadowstackgc"
  28. #include "llvm/CodeGen/GCs.h"
  29. #include "llvm/ADT/StringExtras.h"
  30. #include "llvm/CodeGen/GCStrategy.h"
  31. #include "llvm/IntrinsicInst.h"
  32. #include "llvm/Module.h"
  33. #include "llvm/Support/IRBuilder.h"
  34. using namespace llvm;
  35. namespace {
  36. class ShadowStackGC : public GCStrategy {
  37. /// RootChain - This is the global linked-list that contains the chain of GC
  38. /// roots.
  39. GlobalVariable *Head;
  40. /// StackEntryTy - Abstract type of a link in the shadow stack.
  41. ///
  42. const StructType *StackEntryTy;
  43. /// Roots - GC roots in the current function. Each is a pair of the
  44. /// intrinsic call and its corresponding alloca.
  45. std::vector<std::pair<CallInst*,AllocaInst*> > Roots;
  46. public:
  47. ShadowStackGC();
  48. bool initializeCustomLowering(Module &M);
  49. bool performCustomLowering(Function &F);
  50. private:
  51. bool IsNullValue(Value *V);
  52. Constant *GetFrameMap(Function &F);
  53. const Type* GetConcreteStackEntryType(Function &F);
  54. void CollectRoots(Function &F);
  55. static GetElementPtrInst *CreateGEP(LLVMContext &Context,
  56. IRBuilder<> &B, Value *BasePtr,
  57. int Idx1, const char *Name);
  58. static GetElementPtrInst *CreateGEP(LLVMContext &Context,
  59. IRBuilder<> &B, Value *BasePtr,
  60. int Idx1, int Idx2, const char *Name);
  61. };
  62. }
  63. static GCRegistry::Add<ShadowStackGC>
  64. X("shadow-stack", "Very portable GC for uncooperative code generators");
  65. namespace {
  66. /// EscapeEnumerator - This is a little algorithm to find all escape points
  67. /// from a function so that "finally"-style code can be inserted. In addition
  68. /// to finding the existing return and unwind instructions, it also (if
  69. /// necessary) transforms any call instructions into invokes and sends them to
  70. /// a landing pad.
  71. ///
  72. /// It's wrapped up in a state machine using the same transform C# uses for
  73. /// 'yield return' enumerators, This transform allows it to be non-allocating.
  74. class EscapeEnumerator {
  75. Function &F;
  76. const char *CleanupBBName;
  77. // State.
  78. int State;
  79. Function::iterator StateBB, StateE;
  80. IRBuilder<> Builder;
  81. public:
  82. EscapeEnumerator(Function &F, const char *N = "cleanup")
  83. : F(F), CleanupBBName(N), State(0), Builder(F.getContext()) {}
  84. IRBuilder<> *Next() {
  85. switch (State) {
  86. default:
  87. return 0;
  88. case 0:
  89. StateBB = F.begin();
  90. StateE = F.end();
  91. State = 1;
  92. case 1:
  93. // Find all 'return' and 'unwind' instructions.
  94. while (StateBB != StateE) {
  95. BasicBlock *CurBB = StateBB++;
  96. // Branches and invokes do not escape, only unwind and return do.
  97. TerminatorInst *TI = CurBB->getTerminator();
  98. if (!isa<UnwindInst>(TI) && !isa<ReturnInst>(TI))
  99. continue;
  100. Builder.SetInsertPoint(TI->getParent(), TI);
  101. return &Builder;
  102. }
  103. State = 2;
  104. // Find all 'call' instructions.
  105. SmallVector<Instruction*,16> Calls;
  106. for (Function::iterator BB = F.begin(),
  107. E = F.end(); BB != E; ++BB)
  108. for (BasicBlock::iterator II = BB->begin(),
  109. EE = BB->end(); II != EE; ++II)
  110. if (CallInst *CI = dyn_cast<CallInst>(II))
  111. if (!CI->getCalledFunction() ||
  112. !CI->getCalledFunction()->getIntrinsicID())
  113. Calls.push_back(CI);
  114. if (Calls.empty())
  115. return 0;
  116. // Create a cleanup block.
  117. BasicBlock *CleanupBB = BasicBlock::Create(F.getContext(),
  118. CleanupBBName, &F);
  119. UnwindInst *UI = new UnwindInst(F.getContext(), CleanupBB);
  120. // Transform the 'call' instructions into 'invoke's branching to the
  121. // cleanup block. Go in reverse order to make prettier BB names.
  122. SmallVector<Value*,16> Args;
  123. for (unsigned I = Calls.size(); I != 0; ) {
  124. CallInst *CI = cast<CallInst>(Calls[--I]);
  125. // Split the basic block containing the function call.
  126. BasicBlock *CallBB = CI->getParent();
  127. BasicBlock *NewBB =
  128. CallBB->splitBasicBlock(CI, CallBB->getName() + ".cont");
  129. // Remove the unconditional branch inserted at the end of CallBB.
  130. CallBB->getInstList().pop_back();
  131. NewBB->getInstList().remove(CI);
  132. // Create a new invoke instruction.
  133. Args.clear();
  134. Args.append(CI->op_begin(), CI->op_end() - 1);
  135. InvokeInst *II = InvokeInst::Create(CI->getCalledValue(),
  136. NewBB, CleanupBB,
  137. Args.begin(), Args.end(),
  138. CI->getName(), CallBB);
  139. II->setCallingConv(CI->getCallingConv());
  140. II->setAttributes(CI->getAttributes());
  141. CI->replaceAllUsesWith(II);
  142. delete CI;
  143. }
  144. Builder.SetInsertPoint(UI->getParent(), UI);
  145. return &Builder;
  146. }
  147. }
  148. };
  149. }
  150. // -----------------------------------------------------------------------------
  151. void llvm::linkShadowStackGC() { }
  152. ShadowStackGC::ShadowStackGC() : Head(0), StackEntryTy(0) {
  153. InitRoots = true;
  154. CustomRoots = true;
  155. }
  156. Constant *ShadowStackGC::GetFrameMap(Function &F) {
  157. // doInitialization creates the abstract type of this value.
  158. const Type *VoidPtr = Type::getInt8PtrTy(F.getContext());
  159. // Truncate the ShadowStackDescriptor if some metadata is null.
  160. unsigned NumMeta = 0;
  161. SmallVector<Constant*,16> Metadata;
  162. for (unsigned I = 0; I != Roots.size(); ++I) {
  163. Constant *C = cast<Constant>(Roots[I].first->getOperand(1));
  164. if (!C->isNullValue())
  165. NumMeta = I + 1;
  166. Metadata.push_back(ConstantExpr::getBitCast(C, VoidPtr));
  167. }
  168. Constant *BaseElts[] = {
  169. ConstantInt::get(Type::getInt32Ty(F.getContext()), Roots.size(), false),
  170. ConstantInt::get(Type::getInt32Ty(F.getContext()), NumMeta, false),
  171. };
  172. Constant *DescriptorElts[] = {
  173. ConstantStruct::get(F.getContext(), BaseElts, 2, false),
  174. ConstantArray::get(ArrayType::get(VoidPtr, NumMeta),
  175. Metadata.begin(), NumMeta)
  176. };
  177. Constant *FrameMap = ConstantStruct::get(F.getContext(), DescriptorElts, 2,
  178. false);
  179. std::string TypeName("gc_map.");
  180. TypeName += utostr(NumMeta);
  181. F.getParent()->addTypeName(TypeName, FrameMap->getType());
  182. // FIXME: Is this actually dangerous as WritingAnLLVMPass.html claims? Seems
  183. // that, short of multithreaded LLVM, it should be safe; all that is
  184. // necessary is that a simple Module::iterator loop not be invalidated.
  185. // Appending to the GlobalVariable list is safe in that sense.
  186. //
  187. // All of the output passes emit globals last. The ExecutionEngine
  188. // explicitly supports adding globals to the module after
  189. // initialization.
  190. //
  191. // Still, if it isn't deemed acceptable, then this transformation needs
  192. // to be a ModulePass (which means it cannot be in the 'llc' pipeline
  193. // (which uses a FunctionPassManager (which segfaults (not asserts) if
  194. // provided a ModulePass))).
  195. Constant *GV = new GlobalVariable(*F.getParent(), FrameMap->getType(), true,
  196. GlobalVariable::InternalLinkage,
  197. FrameMap, "__gc_" + F.getName());
  198. Constant *GEPIndices[2] = {
  199. ConstantInt::get(Type::getInt32Ty(F.getContext()), 0),
  200. ConstantInt::get(Type::getInt32Ty(F.getContext()), 0)
  201. };
  202. return ConstantExpr::getGetElementPtr(GV, GEPIndices, 2);
  203. }
  204. const Type* ShadowStackGC::GetConcreteStackEntryType(Function &F) {
  205. // doInitialization creates the generic version of this type.
  206. std::vector<const Type*> EltTys;
  207. EltTys.push_back(StackEntryTy);
  208. for (size_t I = 0; I != Roots.size(); I++)
  209. EltTys.push_back(Roots[I].second->getAllocatedType());
  210. Type *Ty = StructType::get(F.getContext(), EltTys);
  211. std::string TypeName("gc_stackentry.");
  212. TypeName += F.getName();
  213. F.getParent()->addTypeName(TypeName, Ty);
  214. return Ty;
  215. }
  216. /// doInitialization - If this module uses the GC intrinsics, find them now. If
  217. /// not, exit fast.
  218. bool ShadowStackGC::initializeCustomLowering(Module &M) {
  219. // struct FrameMap {
  220. // int32_t NumRoots; // Number of roots in stack frame.
  221. // int32_t NumMeta; // Number of metadata descriptors. May be < NumRoots.
  222. // void *Meta[]; // May be absent for roots without metadata.
  223. // };
  224. std::vector<const Type*> EltTys;
  225. // 32 bits is ok up to a 32GB stack frame. :)
  226. EltTys.push_back(Type::getInt32Ty(M.getContext()));
  227. // Specifies length of variable length array.
  228. EltTys.push_back(Type::getInt32Ty(M.getContext()));
  229. StructType *FrameMapTy = StructType::get(M.getContext(), EltTys);
  230. M.addTypeName("gc_map", FrameMapTy);
  231. PointerType *FrameMapPtrTy = PointerType::getUnqual(FrameMapTy);
  232. // struct StackEntry {
  233. // ShadowStackEntry *Next; // Caller's stack entry.
  234. // FrameMap *Map; // Pointer to constant FrameMap.
  235. // void *Roots[]; // Stack roots (in-place array, so we pretend).
  236. // };
  237. OpaqueType *RecursiveTy = OpaqueType::get(M.getContext());
  238. EltTys.clear();
  239. EltTys.push_back(PointerType::getUnqual(RecursiveTy));
  240. EltTys.push_back(FrameMapPtrTy);
  241. PATypeHolder LinkTyH = StructType::get(M.getContext(), EltTys);
  242. RecursiveTy->refineAbstractTypeTo(LinkTyH.get());
  243. StackEntryTy = cast<StructType>(LinkTyH.get());
  244. const PointerType *StackEntryPtrTy = PointerType::getUnqual(StackEntryTy);
  245. M.addTypeName("gc_stackentry", LinkTyH.get()); // FIXME: Is this safe from
  246. // a FunctionPass?
  247. // Get the root chain if it already exists.
  248. Head = M.getGlobalVariable("llvm_gc_root_chain");
  249. if (!Head) {
  250. // If the root chain does not exist, insert a new one with linkonce
  251. // linkage!
  252. Head = new GlobalVariable(M, StackEntryPtrTy, false,
  253. GlobalValue::LinkOnceAnyLinkage,
  254. Constant::getNullValue(StackEntryPtrTy),
  255. "llvm_gc_root_chain");
  256. } else if (Head->hasExternalLinkage() && Head->isDeclaration()) {
  257. Head->setInitializer(Constant::getNullValue(StackEntryPtrTy));
  258. Head->setLinkage(GlobalValue::LinkOnceAnyLinkage);
  259. }
  260. return true;
  261. }
  262. bool ShadowStackGC::IsNullValue(Value *V) {
  263. if (Constant *C = dyn_cast<Constant>(V))
  264. return C->isNullValue();
  265. return false;
  266. }
  267. void ShadowStackGC::CollectRoots(Function &F) {
  268. // FIXME: Account for original alignment. Could fragment the root array.
  269. // Approach 1: Null initialize empty slots at runtime. Yuck.
  270. // Approach 2: Emit a map of the array instead of just a count.
  271. assert(Roots.empty() && "Not cleaned up?");
  272. SmallVector<std::pair<CallInst*, AllocaInst*>,16> MetaRoots;
  273. for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
  274. for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;)
  275. if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(II++))
  276. if (Function *F = CI->getCalledFunction())
  277. if (F->getIntrinsicID() == Intrinsic::gcroot) {
  278. std::pair<CallInst*, AllocaInst*> Pair = std::make_pair(
  279. CI, cast<AllocaInst>(CI->getOperand(0)->stripPointerCasts()));
  280. if (IsNullValue(CI->getOperand(1)))
  281. Roots.push_back(Pair);
  282. else
  283. MetaRoots.push_back(Pair);
  284. }
  285. // Number roots with metadata (usually empty) at the beginning, so that the
  286. // FrameMap::Meta array can be elided.
  287. Roots.insert(Roots.begin(), MetaRoots.begin(), MetaRoots.end());
  288. }
  289. GetElementPtrInst *
  290. ShadowStackGC::CreateGEP(LLVMContext &Context, IRBuilder<> &B, Value *BasePtr,
  291. int Idx, int Idx2, const char *Name) {
  292. Value *Indices[] = { ConstantInt::get(Type::getInt32Ty(Context), 0),
  293. ConstantInt::get(Type::getInt32Ty(Context), Idx),
  294. ConstantInt::get(Type::getInt32Ty(Context), Idx2) };
  295. Value* Val = B.CreateGEP(BasePtr, Indices, Indices + 3, Name);
  296. assert(isa<GetElementPtrInst>(Val) && "Unexpected folded constant");
  297. return dyn_cast<GetElementPtrInst>(Val);
  298. }
  299. GetElementPtrInst *
  300. ShadowStackGC::CreateGEP(LLVMContext &Context, IRBuilder<> &B, Value *BasePtr,
  301. int Idx, const char *Name) {
  302. Value *Indices[] = { ConstantInt::get(Type::getInt32Ty(Context), 0),
  303. ConstantInt::get(Type::getInt32Ty(Context), Idx) };
  304. Value *Val = B.CreateGEP(BasePtr, Indices, Indices + 2, Name);
  305. assert(isa<GetElementPtrInst>(Val) && "Unexpected folded constant");
  306. return dyn_cast<GetElementPtrInst>(Val);
  307. }
  308. /// runOnFunction - Insert code to maintain the shadow stack.
  309. bool ShadowStackGC::performCustomLowering(Function &F) {
  310. LLVMContext &Context = F.getContext();
  311. // Find calls to llvm.gcroot.
  312. CollectRoots(F);
  313. // If there are no roots in this function, then there is no need to add a
  314. // stack map entry for it.
  315. if (Roots.empty())
  316. return false;
  317. // Build the constant map and figure the type of the shadow stack entry.
  318. Value *FrameMap = GetFrameMap(F);
  319. const Type *ConcreteStackEntryTy = GetConcreteStackEntryType(F);
  320. // Build the shadow stack entry at the very start of the function.
  321. BasicBlock::iterator IP = F.getEntryBlock().begin();
  322. IRBuilder<> AtEntry(IP->getParent(), IP);
  323. Instruction *StackEntry = AtEntry.CreateAlloca(ConcreteStackEntryTy, 0,
  324. "gc_frame");
  325. while (isa<AllocaInst>(IP)) ++IP;
  326. AtEntry.SetInsertPoint(IP->getParent(), IP);
  327. // Initialize the map pointer and load the current head of the shadow stack.
  328. Instruction *CurrentHead = AtEntry.CreateLoad(Head, "gc_currhead");
  329. Instruction *EntryMapPtr = CreateGEP(Context, AtEntry, StackEntry,
  330. 0,1,"gc_frame.map");
  331. AtEntry.CreateStore(FrameMap, EntryMapPtr);
  332. // After all the allocas...
  333. for (unsigned I = 0, E = Roots.size(); I != E; ++I) {
  334. // For each root, find the corresponding slot in the aggregate...
  335. Value *SlotPtr = CreateGEP(Context, AtEntry, StackEntry, 1 + I, "gc_root");
  336. // And use it in lieu of the alloca.
  337. AllocaInst *OriginalAlloca = Roots[I].second;
  338. SlotPtr->takeName(OriginalAlloca);
  339. OriginalAlloca->replaceAllUsesWith(SlotPtr);
  340. }
  341. // Move past the original stores inserted by GCStrategy::InitRoots. This isn't
  342. // really necessary (the collector would never see the intermediate state at
  343. // runtime), but it's nicer not to push the half-initialized entry onto the
  344. // shadow stack.
  345. while (isa<StoreInst>(IP)) ++IP;
  346. AtEntry.SetInsertPoint(IP->getParent(), IP);
  347. // Push the entry onto the shadow stack.
  348. Instruction *EntryNextPtr = CreateGEP(Context, AtEntry,
  349. StackEntry,0,0,"gc_frame.next");
  350. Instruction *NewHeadVal = CreateGEP(Context, AtEntry,
  351. StackEntry, 0, "gc_newhead");
  352. AtEntry.CreateStore(CurrentHead, EntryNextPtr);
  353. AtEntry.CreateStore(NewHeadVal, Head);
  354. // For each instruction that escapes...
  355. EscapeEnumerator EE(F, "gc_cleanup");
  356. while (IRBuilder<> *AtExit = EE.Next()) {
  357. // Pop the entry from the shadow stack. Don't reuse CurrentHead from
  358. // AtEntry, since that would make the value live for the entire function.
  359. Instruction *EntryNextPtr2 = CreateGEP(Context, *AtExit, StackEntry, 0, 0,
  360. "gc_frame.next");
  361. Value *SavedHead = AtExit->CreateLoad(EntryNextPtr2, "gc_savedhead");
  362. AtExit->CreateStore(SavedHead, Head);
  363. }
  364. // Delete the original allocas (which are no longer used) and the intrinsic
  365. // calls (which are no longer valid). Doing this last avoids invalidating
  366. // iterators.
  367. for (unsigned I = 0, E = Roots.size(); I != E; ++I) {
  368. Roots[I].first->eraseFromParent();
  369. Roots[I].second->eraseFromParent();
  370. }
  371. Roots.clear();
  372. return true;
  373. }