CallingConvLower.cpp 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292
  1. //===-- CallingConvLower.cpp - Calling Conventions ------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements the CCState class, used for lowering and implementing
  10. // calling conventions.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "llvm/CodeGen/CallingConvLower.h"
  14. #include "llvm/CodeGen/MachineFrameInfo.h"
  15. #include "llvm/CodeGen/MachineRegisterInfo.h"
  16. #include "llvm/CodeGen/TargetLowering.h"
  17. #include "llvm/CodeGen/TargetRegisterInfo.h"
  18. #include "llvm/CodeGen/TargetSubtargetInfo.h"
  19. #include "llvm/IR/DataLayout.h"
  20. #include "llvm/Support/Debug.h"
  21. #include "llvm/Support/ErrorHandling.h"
  22. #include "llvm/Support/SaveAndRestore.h"
  23. #include "llvm/Support/raw_ostream.h"
  24. #include <algorithm>
  25. using namespace llvm;
  26. CCState::CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &mf,
  27. SmallVectorImpl<CCValAssign> &locs, LLVMContext &C)
  28. : CallingConv(CC), IsVarArg(isVarArg), MF(mf),
  29. TRI(*MF.getSubtarget().getRegisterInfo()), Locs(locs), Context(C) {
  30. // No stack is used.
  31. StackOffset = 0;
  32. clearByValRegsInfo();
  33. UsedRegs.resize((TRI.getNumRegs()+31)/32);
  34. }
  35. /// Allocate space on the stack large enough to pass an argument by value.
  36. /// The size and alignment information of the argument is encoded in
  37. /// its parameter attribute.
  38. void CCState::HandleByVal(unsigned ValNo, MVT ValVT, MVT LocVT,
  39. CCValAssign::LocInfo LocInfo, int MinSize,
  40. int MinAlignment, ISD::ArgFlagsTy ArgFlags) {
  41. llvm::Align MinAlign(MinAlignment);
  42. llvm::Align Align(ArgFlags.getByValAlign());
  43. unsigned Size = ArgFlags.getByValSize();
  44. if (MinSize > (int)Size)
  45. Size = MinSize;
  46. if (MinAlign > Align)
  47. Align = MinAlign;
  48. ensureMaxAlignment(Align);
  49. MF.getSubtarget().getTargetLowering()->HandleByVal(this, Size, Align.value());
  50. Size = unsigned(alignTo(Size, MinAlign));
  51. unsigned Offset = AllocateStack(Size, Align.value());
  52. addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
  53. }
  54. /// Mark a register and all of its aliases as allocated.
  55. void CCState::MarkAllocated(unsigned Reg) {
  56. for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI)
  57. UsedRegs[*AI/32] |= 1 << (*AI&31);
  58. }
  59. bool CCState::IsShadowAllocatedReg(unsigned Reg) const {
  60. if (!isAllocated(Reg))
  61. return false;
  62. for (auto const &ValAssign : Locs) {
  63. if (ValAssign.isRegLoc()) {
  64. for (MCRegAliasIterator AI(ValAssign.getLocReg(), &TRI, true);
  65. AI.isValid(); ++AI) {
  66. if (*AI == Reg)
  67. return false;
  68. }
  69. }
  70. }
  71. return true;
  72. }
  73. /// Analyze an array of argument values,
  74. /// incorporating info about the formals into this state.
  75. void
  76. CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
  77. CCAssignFn Fn) {
  78. unsigned NumArgs = Ins.size();
  79. for (unsigned i = 0; i != NumArgs; ++i) {
  80. MVT ArgVT = Ins[i].VT;
  81. ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
  82. if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this))
  83. report_fatal_error("unable to allocate function argument #" + Twine(i));
  84. }
  85. }
  86. /// Analyze the return values of a function, returning true if the return can
  87. /// be performed without sret-demotion and false otherwise.
  88. bool CCState::CheckReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
  89. CCAssignFn Fn) {
  90. // Determine which register each value should be copied into.
  91. for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
  92. MVT VT = Outs[i].VT;
  93. ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
  94. if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this))
  95. return false;
  96. }
  97. return true;
  98. }
  99. /// Analyze the returned values of a return,
  100. /// incorporating info about the result values into this state.
  101. void CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
  102. CCAssignFn Fn) {
  103. // Determine which register each value should be copied into.
  104. for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
  105. MVT VT = Outs[i].VT;
  106. ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
  107. if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this))
  108. report_fatal_error("unable to allocate function return #" + Twine(i));
  109. }
  110. }
  111. /// Analyze the outgoing arguments to a call,
  112. /// incorporating info about the passed values into this state.
  113. void CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
  114. CCAssignFn Fn) {
  115. unsigned NumOps = Outs.size();
  116. for (unsigned i = 0; i != NumOps; ++i) {
  117. MVT ArgVT = Outs[i].VT;
  118. ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
  119. if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
  120. #ifndef NDEBUG
  121. dbgs() << "Call operand #" << i << " has unhandled type "
  122. << EVT(ArgVT).getEVTString() << '\n';
  123. #endif
  124. llvm_unreachable(nullptr);
  125. }
  126. }
  127. }
  128. /// Same as above except it takes vectors of types and argument flags.
  129. void CCState::AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs,
  130. SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
  131. CCAssignFn Fn) {
  132. unsigned NumOps = ArgVTs.size();
  133. for (unsigned i = 0; i != NumOps; ++i) {
  134. MVT ArgVT = ArgVTs[i];
  135. ISD::ArgFlagsTy ArgFlags = Flags[i];
  136. if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
  137. #ifndef NDEBUG
  138. dbgs() << "Call operand #" << i << " has unhandled type "
  139. << EVT(ArgVT).getEVTString() << '\n';
  140. #endif
  141. llvm_unreachable(nullptr);
  142. }
  143. }
  144. }
  145. /// Analyze the return values of a call, incorporating info about the passed
  146. /// values into this state.
  147. void CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
  148. CCAssignFn Fn) {
  149. for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
  150. MVT VT = Ins[i].VT;
  151. ISD::ArgFlagsTy Flags = Ins[i].Flags;
  152. if (Fn(i, VT, VT, CCValAssign::Full, Flags, *this)) {
  153. #ifndef NDEBUG
  154. dbgs() << "Call result #" << i << " has unhandled type "
  155. << EVT(VT).getEVTString() << '\n';
  156. #endif
  157. llvm_unreachable(nullptr);
  158. }
  159. }
  160. }
  161. /// Same as above except it's specialized for calls that produce a single value.
  162. void CCState::AnalyzeCallResult(MVT VT, CCAssignFn Fn) {
  163. if (Fn(0, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), *this)) {
  164. #ifndef NDEBUG
  165. dbgs() << "Call result has unhandled type "
  166. << EVT(VT).getEVTString() << '\n';
  167. #endif
  168. llvm_unreachable(nullptr);
  169. }
  170. }
  171. static bool isValueTypeInRegForCC(CallingConv::ID CC, MVT VT) {
  172. if (VT.isVector())
  173. return true; // Assume -msse-regparm might be in effect.
  174. if (!VT.isInteger())
  175. return false;
  176. if (CC == CallingConv::X86_VectorCall || CC == CallingConv::X86_FastCall)
  177. return true;
  178. return false;
  179. }
  180. void CCState::getRemainingRegParmsForType(SmallVectorImpl<MCPhysReg> &Regs,
  181. MVT VT, CCAssignFn Fn) {
  182. unsigned SavedStackOffset = StackOffset;
  183. llvm::Align SavedMaxStackArgAlign = MaxStackArgAlign;
  184. unsigned NumLocs = Locs.size();
  185. // Set the 'inreg' flag if it is used for this calling convention.
  186. ISD::ArgFlagsTy Flags;
  187. if (isValueTypeInRegForCC(CallingConv, VT))
  188. Flags.setInReg();
  189. // Allocate something of this value type repeatedly until we get assigned a
  190. // location in memory.
  191. bool HaveRegParm = true;
  192. while (HaveRegParm) {
  193. if (Fn(0, VT, VT, CCValAssign::Full, Flags, *this)) {
  194. #ifndef NDEBUG
  195. dbgs() << "Call has unhandled type " << EVT(VT).getEVTString()
  196. << " while computing remaining regparms\n";
  197. #endif
  198. llvm_unreachable(nullptr);
  199. }
  200. HaveRegParm = Locs.back().isRegLoc();
  201. }
  202. // Copy all the registers from the value locations we added.
  203. assert(NumLocs < Locs.size() && "CC assignment failed to add location");
  204. for (unsigned I = NumLocs, E = Locs.size(); I != E; ++I)
  205. if (Locs[I].isRegLoc())
  206. Regs.push_back(MCPhysReg(Locs[I].getLocReg()));
  207. // Clear the assigned values and stack memory. We leave the registers marked
  208. // as allocated so that future queries don't return the same registers, i.e.
  209. // when i64 and f64 are both passed in GPRs.
  210. StackOffset = SavedStackOffset;
  211. MaxStackArgAlign = SavedMaxStackArgAlign;
  212. Locs.resize(NumLocs);
  213. }
  214. void CCState::analyzeMustTailForwardedRegisters(
  215. SmallVectorImpl<ForwardedRegister> &Forwards, ArrayRef<MVT> RegParmTypes,
  216. CCAssignFn Fn) {
  217. // Oftentimes calling conventions will not user register parameters for
  218. // variadic functions, so we need to assume we're not variadic so that we get
  219. // all the registers that might be used in a non-variadic call.
  220. SaveAndRestore<bool> SavedVarArg(IsVarArg, false);
  221. SaveAndRestore<bool> SavedMustTail(AnalyzingMustTailForwardedRegs, true);
  222. for (MVT RegVT : RegParmTypes) {
  223. SmallVector<MCPhysReg, 8> RemainingRegs;
  224. getRemainingRegParmsForType(RemainingRegs, RegVT, Fn);
  225. const TargetLowering *TL = MF.getSubtarget().getTargetLowering();
  226. const TargetRegisterClass *RC = TL->getRegClassFor(RegVT);
  227. for (MCPhysReg PReg : RemainingRegs) {
  228. unsigned VReg = MF.addLiveIn(PReg, RC);
  229. Forwards.push_back(ForwardedRegister(VReg, PReg, RegVT));
  230. }
  231. }
  232. }
  233. bool CCState::resultsCompatible(CallingConv::ID CalleeCC,
  234. CallingConv::ID CallerCC, MachineFunction &MF,
  235. LLVMContext &C,
  236. const SmallVectorImpl<ISD::InputArg> &Ins,
  237. CCAssignFn CalleeFn, CCAssignFn CallerFn) {
  238. if (CalleeCC == CallerCC)
  239. return true;
  240. SmallVector<CCValAssign, 4> RVLocs1;
  241. CCState CCInfo1(CalleeCC, false, MF, RVLocs1, C);
  242. CCInfo1.AnalyzeCallResult(Ins, CalleeFn);
  243. SmallVector<CCValAssign, 4> RVLocs2;
  244. CCState CCInfo2(CallerCC, false, MF, RVLocs2, C);
  245. CCInfo2.AnalyzeCallResult(Ins, CallerFn);
  246. if (RVLocs1.size() != RVLocs2.size())
  247. return false;
  248. for (unsigned I = 0, E = RVLocs1.size(); I != E; ++I) {
  249. const CCValAssign &Loc1 = RVLocs1[I];
  250. const CCValAssign &Loc2 = RVLocs2[I];
  251. if (Loc1.getLocInfo() != Loc2.getLocInfo())
  252. return false;
  253. bool RegLoc1 = Loc1.isRegLoc();
  254. if (RegLoc1 != Loc2.isRegLoc())
  255. return false;
  256. if (RegLoc1) {
  257. if (Loc1.getLocReg() != Loc2.getLocReg())
  258. return false;
  259. } else {
  260. if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset())
  261. return false;
  262. }
  263. }
  264. return true;
  265. }