123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898 |
- //===-- MachineFunction.cpp -----------------------------------------------===//
- //
- // The LLVM Compiler Infrastructure
- //
- // This file is distributed under the University of Illinois Open Source
- // License. See LICENSE.TXT for details.
- //
- //===----------------------------------------------------------------------===//
- //
- // Collect native machine code information for a function. This allows
- // target-specific information about the generated code to be stored with each
- // function.
- //
- //===----------------------------------------------------------------------===//
- #include "llvm/CodeGen/MachineFunction.h"
- #include "llvm/ADT/STLExtras.h"
- #include "llvm/ADT/SmallString.h"
- #include "llvm/Analysis/ConstantFolding.h"
- #include "llvm/CodeGen/MachineConstantPool.h"
- #include "llvm/CodeGen/MachineFrameInfo.h"
- #include "llvm/CodeGen/MachineFunctionPass.h"
- #include "llvm/CodeGen/MachineInstr.h"
- #include "llvm/CodeGen/MachineJumpTableInfo.h"
- #include "llvm/CodeGen/MachineModuleInfo.h"
- #include "llvm/CodeGen/MachineRegisterInfo.h"
- #include "llvm/CodeGen/Passes.h"
- #include "llvm/DebugInfo.h"
- #include "llvm/IR/DataLayout.h"
- #include "llvm/IR/Function.h"
- #include "llvm/MC/MCAsmInfo.h"
- #include "llvm/MC/MCContext.h"
- #include "llvm/Support/Debug.h"
- #include "llvm/Support/GraphWriter.h"
- #include "llvm/Support/raw_ostream.h"
- #include "llvm/Target/TargetFrameLowering.h"
- #include "llvm/Target/TargetLowering.h"
- #include "llvm/Target/TargetMachine.h"
- using namespace llvm;
- //===----------------------------------------------------------------------===//
- // MachineFunction implementation
- //===----------------------------------------------------------------------===//
- // Out of line virtual method.
- MachineFunctionInfo::~MachineFunctionInfo() {}
- void ilist_traits<MachineBasicBlock>::deleteNode(MachineBasicBlock *MBB) {
- MBB->getParent()->DeleteMachineBasicBlock(MBB);
- }
- MachineFunction::MachineFunction(const Function *F, const TargetMachine &TM,
- unsigned FunctionNum, MachineModuleInfo &mmi,
- GCModuleInfo* gmi)
- : Fn(F), Target(TM), Ctx(mmi.getContext()), MMI(mmi), GMI(gmi) {
- if (TM.getRegisterInfo())
- RegInfo = new (Allocator) MachineRegisterInfo(*TM.getRegisterInfo());
- else
- RegInfo = 0;
- MFInfo = 0;
- FrameInfo = new (Allocator) MachineFrameInfo(*TM.getFrameLowering(),
- TM.Options.RealignStack);
- if (Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
- Attribute::StackAlignment))
- FrameInfo->ensureMaxAlignment(Fn->getAttributes().
- getStackAlignment(AttributeSet::FunctionIndex));
- ConstantPool = new (Allocator) MachineConstantPool(TM.getDataLayout());
- Alignment = TM.getTargetLowering()->getMinFunctionAlignment();
- // FIXME: Shouldn't use pref alignment if explicit alignment is set on Fn.
- if (!Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
- Attribute::OptimizeForSize))
- Alignment = std::max(Alignment,
- TM.getTargetLowering()->getPrefFunctionAlignment());
- FunctionNumber = FunctionNum;
- JumpTableInfo = 0;
- }
- MachineFunction::~MachineFunction() {
- // Don't call destructors on MachineInstr and MachineOperand. All of their
- // memory comes from the BumpPtrAllocator which is about to be purged.
- //
- // Do call MachineBasicBlock destructors, it contains std::vectors.
- for (iterator I = begin(), E = end(); I != E; I = BasicBlocks.erase(I))
- I->Insts.clearAndLeakNodesUnsafely();
- InstructionRecycler.clear(Allocator);
- OperandRecycler.clear(Allocator);
- BasicBlockRecycler.clear(Allocator);
- if (RegInfo) {
- RegInfo->~MachineRegisterInfo();
- Allocator.Deallocate(RegInfo);
- }
- if (MFInfo) {
- MFInfo->~MachineFunctionInfo();
- Allocator.Deallocate(MFInfo);
- }
- FrameInfo->~MachineFrameInfo();
- Allocator.Deallocate(FrameInfo);
- ConstantPool->~MachineConstantPool();
- Allocator.Deallocate(ConstantPool);
- if (JumpTableInfo) {
- JumpTableInfo->~MachineJumpTableInfo();
- Allocator.Deallocate(JumpTableInfo);
- }
- }
- /// getOrCreateJumpTableInfo - Get the JumpTableInfo for this function, if it
- /// does already exist, allocate one.
- MachineJumpTableInfo *MachineFunction::
- getOrCreateJumpTableInfo(unsigned EntryKind) {
- if (JumpTableInfo) return JumpTableInfo;
- JumpTableInfo = new (Allocator)
- MachineJumpTableInfo((MachineJumpTableInfo::JTEntryKind)EntryKind);
- return JumpTableInfo;
- }
- /// RenumberBlocks - This discards all of the MachineBasicBlock numbers and
- /// recomputes them. This guarantees that the MBB numbers are sequential,
- /// dense, and match the ordering of the blocks within the function. If a
- /// specific MachineBasicBlock is specified, only that block and those after
- /// it are renumbered.
- void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) {
- if (empty()) { MBBNumbering.clear(); return; }
- MachineFunction::iterator MBBI, E = end();
- if (MBB == 0)
- MBBI = begin();
- else
- MBBI = MBB;
- // Figure out the block number this should have.
- unsigned BlockNo = 0;
- if (MBBI != begin())
- BlockNo = prior(MBBI)->getNumber()+1;
- for (; MBBI != E; ++MBBI, ++BlockNo) {
- if (MBBI->getNumber() != (int)BlockNo) {
- // Remove use of the old number.
- if (MBBI->getNumber() != -1) {
- assert(MBBNumbering[MBBI->getNumber()] == &*MBBI &&
- "MBB number mismatch!");
- MBBNumbering[MBBI->getNumber()] = 0;
- }
- // If BlockNo is already taken, set that block's number to -1.
- if (MBBNumbering[BlockNo])
- MBBNumbering[BlockNo]->setNumber(-1);
- MBBNumbering[BlockNo] = MBBI;
- MBBI->setNumber(BlockNo);
- }
- }
- // Okay, all the blocks are renumbered. If we have compactified the block
- // numbering, shrink MBBNumbering now.
- assert(BlockNo <= MBBNumbering.size() && "Mismatch!");
- MBBNumbering.resize(BlockNo);
- }
- /// CreateMachineInstr - Allocate a new MachineInstr. Use this instead
- /// of `new MachineInstr'.
- ///
- MachineInstr *
- MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID,
- DebugLoc DL, bool NoImp) {
- return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
- MachineInstr(*this, MCID, DL, NoImp);
- }
- /// CloneMachineInstr - Create a new MachineInstr which is a copy of the
- /// 'Orig' instruction, identical in all ways except the instruction
- /// has no parent, prev, or next.
- ///
- MachineInstr *
- MachineFunction::CloneMachineInstr(const MachineInstr *Orig) {
- return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
- MachineInstr(*this, *Orig);
- }
- /// DeleteMachineInstr - Delete the given MachineInstr.
- ///
- /// This function also serves as the MachineInstr destructor - the real
- /// ~MachineInstr() destructor must be empty.
- void
- MachineFunction::DeleteMachineInstr(MachineInstr *MI) {
- // Strip it for parts. The operand array and the MI object itself are
- // independently recyclable.
- if (MI->Operands)
- deallocateOperandArray(MI->CapOperands, MI->Operands);
- // Don't call ~MachineInstr() which must be trivial anyway because
- // ~MachineFunction drops whole lists of MachineInstrs wihout calling their
- // destructors.
- InstructionRecycler.Deallocate(Allocator, MI);
- }
- /// CreateMachineBasicBlock - Allocate a new MachineBasicBlock. Use this
- /// instead of `new MachineBasicBlock'.
- ///
- MachineBasicBlock *
- MachineFunction::CreateMachineBasicBlock(const BasicBlock *bb) {
- return new (BasicBlockRecycler.Allocate<MachineBasicBlock>(Allocator))
- MachineBasicBlock(*this, bb);
- }
- /// DeleteMachineBasicBlock - Delete the given MachineBasicBlock.
- ///
- void
- MachineFunction::DeleteMachineBasicBlock(MachineBasicBlock *MBB) {
- assert(MBB->getParent() == this && "MBB parent mismatch!");
- MBB->~MachineBasicBlock();
- BasicBlockRecycler.Deallocate(Allocator, MBB);
- }
- MachineMemOperand *
- MachineFunction::getMachineMemOperand(MachinePointerInfo PtrInfo, unsigned f,
- uint64_t s, unsigned base_alignment,
- const MDNode *TBAAInfo,
- const MDNode *Ranges) {
- return new (Allocator) MachineMemOperand(PtrInfo, f, s, base_alignment,
- TBAAInfo, Ranges);
- }
- MachineMemOperand *
- MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
- int64_t Offset, uint64_t Size) {
- return new (Allocator)
- MachineMemOperand(MachinePointerInfo(MMO->getValue(),
- MMO->getOffset()+Offset),
- MMO->getFlags(), Size,
- MMO->getBaseAlignment(), 0);
- }
- MachineInstr::mmo_iterator
- MachineFunction::allocateMemRefsArray(unsigned long Num) {
- return Allocator.Allocate<MachineMemOperand *>(Num);
- }
- std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator>
- MachineFunction::extractLoadMemRefs(MachineInstr::mmo_iterator Begin,
- MachineInstr::mmo_iterator End) {
- // Count the number of load mem refs.
- unsigned Num = 0;
- for (MachineInstr::mmo_iterator I = Begin; I != End; ++I)
- if ((*I)->isLoad())
- ++Num;
- // Allocate a new array and populate it with the load information.
- MachineInstr::mmo_iterator Result = allocateMemRefsArray(Num);
- unsigned Index = 0;
- for (MachineInstr::mmo_iterator I = Begin; I != End; ++I) {
- if ((*I)->isLoad()) {
- if (!(*I)->isStore())
- // Reuse the MMO.
- Result[Index] = *I;
- else {
- // Clone the MMO and unset the store flag.
- MachineMemOperand *JustLoad =
- getMachineMemOperand((*I)->getPointerInfo(),
- (*I)->getFlags() & ~MachineMemOperand::MOStore,
- (*I)->getSize(), (*I)->getBaseAlignment(),
- (*I)->getTBAAInfo());
- Result[Index] = JustLoad;
- }
- ++Index;
- }
- }
- return std::make_pair(Result, Result + Num);
- }
- std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator>
- MachineFunction::extractStoreMemRefs(MachineInstr::mmo_iterator Begin,
- MachineInstr::mmo_iterator End) {
- // Count the number of load mem refs.
- unsigned Num = 0;
- for (MachineInstr::mmo_iterator I = Begin; I != End; ++I)
- if ((*I)->isStore())
- ++Num;
- // Allocate a new array and populate it with the store information.
- MachineInstr::mmo_iterator Result = allocateMemRefsArray(Num);
- unsigned Index = 0;
- for (MachineInstr::mmo_iterator I = Begin; I != End; ++I) {
- if ((*I)->isStore()) {
- if (!(*I)->isLoad())
- // Reuse the MMO.
- Result[Index] = *I;
- else {
- // Clone the MMO and unset the load flag.
- MachineMemOperand *JustStore =
- getMachineMemOperand((*I)->getPointerInfo(),
- (*I)->getFlags() & ~MachineMemOperand::MOLoad,
- (*I)->getSize(), (*I)->getBaseAlignment(),
- (*I)->getTBAAInfo());
- Result[Index] = JustStore;
- }
- ++Index;
- }
- }
- return std::make_pair(Result, Result + Num);
- }
- #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
- void MachineFunction::dump() const {
- print(dbgs());
- }
- #endif
- StringRef MachineFunction::getName() const {
- assert(getFunction() && "No function!");
- return getFunction()->getName();
- }
- void MachineFunction::print(raw_ostream &OS, SlotIndexes *Indexes) const {
- OS << "# Machine code for function " << getName() << ": ";
- if (RegInfo) {
- OS << (RegInfo->isSSA() ? "SSA" : "Post SSA");
- if (!RegInfo->tracksLiveness())
- OS << ", not tracking liveness";
- }
- OS << '\n';
- // Print Frame Information
- FrameInfo->print(*this, OS);
- // Print JumpTable Information
- if (JumpTableInfo)
- JumpTableInfo->print(OS);
- // Print Constant Pool
- ConstantPool->print(OS);
- const TargetRegisterInfo *TRI = getTarget().getRegisterInfo();
- if (RegInfo && !RegInfo->livein_empty()) {
- OS << "Function Live Ins: ";
- for (MachineRegisterInfo::livein_iterator
- I = RegInfo->livein_begin(), E = RegInfo->livein_end(); I != E; ++I) {
- OS << PrintReg(I->first, TRI);
- if (I->second)
- OS << " in " << PrintReg(I->second, TRI);
- if (llvm::next(I) != E)
- OS << ", ";
- }
- OS << '\n';
- }
- for (const_iterator BB = begin(), E = end(); BB != E; ++BB) {
- OS << '\n';
- BB->print(OS, Indexes);
- }
- OS << "\n# End machine code for function " << getName() << ".\n\n";
- }
- namespace llvm {
- template<>
- struct DOTGraphTraits<const MachineFunction*> : public DefaultDOTGraphTraits {
- DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
- static std::string getGraphName(const MachineFunction *F) {
- return "CFG for '" + F->getName().str() + "' function";
- }
- std::string getNodeLabel(const MachineBasicBlock *Node,
- const MachineFunction *Graph) {
- std::string OutStr;
- {
- raw_string_ostream OSS(OutStr);
- if (isSimple()) {
- OSS << "BB#" << Node->getNumber();
- if (const BasicBlock *BB = Node->getBasicBlock())
- OSS << ": " << BB->getName();
- } else
- Node->print(OSS);
- }
- if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
- // Process string output to make it nicer...
- for (unsigned i = 0; i != OutStr.length(); ++i)
- if (OutStr[i] == '\n') { // Left justify
- OutStr[i] = '\\';
- OutStr.insert(OutStr.begin()+i+1, 'l');
- }
- return OutStr;
- }
- };
- }
- void MachineFunction::viewCFG() const
- {
- #ifndef NDEBUG
- ViewGraph(this, "mf" + getName());
- #else
- errs() << "MachineFunction::viewCFG is only available in debug builds on "
- << "systems with Graphviz or gv!\n";
- #endif // NDEBUG
- }
- void MachineFunction::viewCFGOnly() const
- {
- #ifndef NDEBUG
- ViewGraph(this, "mf" + getName(), true);
- #else
- errs() << "MachineFunction::viewCFGOnly is only available in debug builds on "
- << "systems with Graphviz or gv!\n";
- #endif // NDEBUG
- }
- /// addLiveIn - Add the specified physical register as a live-in value and
- /// create a corresponding virtual register for it.
- unsigned MachineFunction::addLiveIn(unsigned PReg,
- const TargetRegisterClass *RC) {
- MachineRegisterInfo &MRI = getRegInfo();
- unsigned VReg = MRI.getLiveInVirtReg(PReg);
- if (VReg) {
- assert(MRI.getRegClass(VReg) == RC && "Register class mismatch!");
- return VReg;
- }
- VReg = MRI.createVirtualRegister(RC);
- MRI.addLiveIn(PReg, VReg);
- return VReg;
- }
- /// getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
- /// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
- /// normal 'L' label is returned.
- MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
- bool isLinkerPrivate) const {
- assert(JumpTableInfo && "No jump tables");
- assert(JTI < JumpTableInfo->getJumpTables().size() && "Invalid JTI!");
- const MCAsmInfo &MAI = *getTarget().getMCAsmInfo();
- const char *Prefix = isLinkerPrivate ? MAI.getLinkerPrivateGlobalPrefix() :
- MAI.getPrivateGlobalPrefix();
- SmallString<60> Name;
- raw_svector_ostream(Name)
- << Prefix << "JTI" << getFunctionNumber() << '_' << JTI;
- return Ctx.GetOrCreateSymbol(Name.str());
- }
- /// getPICBaseSymbol - Return a function-local symbol to represent the PIC
- /// base.
- MCSymbol *MachineFunction::getPICBaseSymbol() const {
- const MCAsmInfo &MAI = *Target.getMCAsmInfo();
- return Ctx.GetOrCreateSymbol(Twine(MAI.getPrivateGlobalPrefix())+
- Twine(getFunctionNumber())+"$pb");
- }
- //===----------------------------------------------------------------------===//
- // MachineFrameInfo implementation
- //===----------------------------------------------------------------------===//
- /// ensureMaxAlignment - Make sure the function is at least Align bytes
- /// aligned.
- void MachineFrameInfo::ensureMaxAlignment(unsigned Align) {
- if (!TFI.isStackRealignable() || !RealignOption)
- assert(Align <= TFI.getStackAlignment() &&
- "For targets without stack realignment, Align is out of limit!");
- if (MaxAlignment < Align) MaxAlignment = Align;
- }
- /// clampStackAlignment - Clamp the alignment if requested and emit a warning.
- static inline unsigned clampStackAlignment(bool ShouldClamp, unsigned Align,
- unsigned StackAlign) {
- if (!ShouldClamp || Align <= StackAlign)
- return Align;
- DEBUG(dbgs() << "Warning: requested alignment " << Align
- << " exceeds the stack alignment " << StackAlign
- << " when stack realignment is off" << '\n');
- return StackAlign;
- }
- /// CreateStackObject - Create a new statically sized stack object, returning
- /// a nonnegative identifier to represent it.
- ///
- int MachineFrameInfo::CreateStackObject(uint64_t Size, unsigned Alignment,
- bool isSS, bool MayNeedSP, const AllocaInst *Alloca) {
- assert(Size != 0 && "Cannot allocate zero size stack objects!");
- Alignment = clampStackAlignment(!TFI.isStackRealignable() || !RealignOption,
- Alignment, TFI.getStackAlignment());
- Objects.push_back(StackObject(Size, Alignment, 0, false, isSS, MayNeedSP,
- Alloca));
- int Index = (int)Objects.size() - NumFixedObjects - 1;
- assert(Index >= 0 && "Bad frame index!");
- ensureMaxAlignment(Alignment);
- return Index;
- }
- /// CreateSpillStackObject - Create a new statically sized stack object that
- /// represents a spill slot, returning a nonnegative identifier to represent
- /// it.
- ///
- int MachineFrameInfo::CreateSpillStackObject(uint64_t Size,
- unsigned Alignment) {
- Alignment = clampStackAlignment(!TFI.isStackRealignable() || !RealignOption,
- Alignment, TFI.getStackAlignment());
- CreateStackObject(Size, Alignment, true, false);
- int Index = (int)Objects.size() - NumFixedObjects - 1;
- ensureMaxAlignment(Alignment);
- return Index;
- }
- /// CreateVariableSizedObject - Notify the MachineFrameInfo object that a
- /// variable sized object has been created. This must be created whenever a
- /// variable sized object is created, whether or not the index returned is
- /// actually used.
- ///
- int MachineFrameInfo::CreateVariableSizedObject(unsigned Alignment) {
- HasVarSizedObjects = true;
- Alignment = clampStackAlignment(!TFI.isStackRealignable() || !RealignOption,
- Alignment, TFI.getStackAlignment());
- Objects.push_back(StackObject(0, Alignment, 0, false, false, true, 0));
- ensureMaxAlignment(Alignment);
- return (int)Objects.size()-NumFixedObjects-1;
- }
- /// CreateFixedObject - Create a new object at a fixed location on the stack.
- /// All fixed objects should be created before other objects are created for
- /// efficiency. By default, fixed objects are immutable. This returns an
- /// index with a negative value.
- ///
- int MachineFrameInfo::CreateFixedObject(uint64_t Size, int64_t SPOffset,
- bool Immutable) {
- assert(Size != 0 && "Cannot allocate zero size fixed stack objects!");
- // The alignment of the frame index can be determined from its offset from
- // the incoming frame position. If the frame object is at offset 32 and
- // the stack is guaranteed to be 16-byte aligned, then we know that the
- // object is 16-byte aligned.
- unsigned StackAlign = TFI.getStackAlignment();
- unsigned Align = MinAlign(SPOffset, StackAlign);
- Align = clampStackAlignment(!TFI.isStackRealignable() || !RealignOption,
- Align, TFI.getStackAlignment());
- Objects.insert(Objects.begin(), StackObject(Size, Align, SPOffset, Immutable,
- /*isSS*/ false,
- /*NeedSP*/ false,
- /*Alloca*/ 0));
- return -++NumFixedObjects;
- }
- BitVector
- MachineFrameInfo::getPristineRegs(const MachineBasicBlock *MBB) const {
- assert(MBB && "MBB must be valid");
- const MachineFunction *MF = MBB->getParent();
- assert(MF && "MBB must be part of a MachineFunction");
- const TargetMachine &TM = MF->getTarget();
- const TargetRegisterInfo *TRI = TM.getRegisterInfo();
- BitVector BV(TRI->getNumRegs());
- // Before CSI is calculated, no registers are considered pristine. They can be
- // freely used and PEI will make sure they are saved.
- if (!isCalleeSavedInfoValid())
- return BV;
- for (const uint16_t *CSR = TRI->getCalleeSavedRegs(MF); CSR && *CSR; ++CSR)
- BV.set(*CSR);
- // The entry MBB always has all CSRs pristine.
- if (MBB == &MF->front())
- return BV;
- // On other MBBs the saved CSRs are not pristine.
- const std::vector<CalleeSavedInfo> &CSI = getCalleeSavedInfo();
- for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(),
- E = CSI.end(); I != E; ++I)
- BV.reset(I->getReg());
- return BV;
- }
- unsigned MachineFrameInfo::estimateStackSize(const MachineFunction &MF) const {
- const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
- const TargetRegisterInfo *RegInfo = MF.getTarget().getRegisterInfo();
- unsigned MaxAlign = getMaxAlignment();
- int Offset = 0;
- // This code is very, very similar to PEI::calculateFrameObjectOffsets().
- // It really should be refactored to share code. Until then, changes
- // should keep in mind that there's tight coupling between the two.
- for (int i = getObjectIndexBegin(); i != 0; ++i) {
- int FixedOff = -getObjectOffset(i);
- if (FixedOff > Offset) Offset = FixedOff;
- }
- for (unsigned i = 0, e = getObjectIndexEnd(); i != e; ++i) {
- if (isDeadObjectIndex(i))
- continue;
- Offset += getObjectSize(i);
- unsigned Align = getObjectAlignment(i);
- // Adjust to alignment boundary
- Offset = (Offset+Align-1)/Align*Align;
- MaxAlign = std::max(Align, MaxAlign);
- }
- if (adjustsStack() && TFI->hasReservedCallFrame(MF))
- Offset += getMaxCallFrameSize();
- // Round up the size to a multiple of the alignment. If the function has
- // any calls or alloca's, align to the target's StackAlignment value to
- // ensure that the callee's frame or the alloca data is suitably aligned;
- // otherwise, for leaf functions, align to the TransientStackAlignment
- // value.
- unsigned StackAlign;
- if (adjustsStack() || hasVarSizedObjects() ||
- (RegInfo->needsStackRealignment(MF) && getObjectIndexEnd() != 0))
- StackAlign = TFI->getStackAlignment();
- else
- StackAlign = TFI->getTransientStackAlignment();
- // If the frame pointer is eliminated, all frame offsets will be relative to
- // SP not FP. Align to MaxAlign so this works.
- StackAlign = std::max(StackAlign, MaxAlign);
- unsigned AlignMask = StackAlign - 1;
- Offset = (Offset + AlignMask) & ~uint64_t(AlignMask);
- return (unsigned)Offset;
- }
- void MachineFrameInfo::print(const MachineFunction &MF, raw_ostream &OS) const{
- if (Objects.empty()) return;
- const TargetFrameLowering *FI = MF.getTarget().getFrameLowering();
- int ValOffset = (FI ? FI->getOffsetOfLocalArea() : 0);
- OS << "Frame Objects:\n";
- for (unsigned i = 0, e = Objects.size(); i != e; ++i) {
- const StackObject &SO = Objects[i];
- OS << " fi#" << (int)(i-NumFixedObjects) << ": ";
- if (SO.Size == ~0ULL) {
- OS << "dead\n";
- continue;
- }
- if (SO.Size == 0)
- OS << "variable sized";
- else
- OS << "size=" << SO.Size;
- OS << ", align=" << SO.Alignment;
- if (i < NumFixedObjects)
- OS << ", fixed";
- if (i < NumFixedObjects || SO.SPOffset != -1) {
- int64_t Off = SO.SPOffset - ValOffset;
- OS << ", at location [SP";
- if (Off > 0)
- OS << "+" << Off;
- else if (Off < 0)
- OS << Off;
- OS << "]";
- }
- OS << "\n";
- }
- }
- #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
- void MachineFrameInfo::dump(const MachineFunction &MF) const {
- print(MF, dbgs());
- }
- #endif
- //===----------------------------------------------------------------------===//
- // MachineJumpTableInfo implementation
- //===----------------------------------------------------------------------===//
- /// getEntrySize - Return the size of each entry in the jump table.
- unsigned MachineJumpTableInfo::getEntrySize(const DataLayout &TD) const {
- // The size of a jump table entry is 4 bytes unless the entry is just the
- // address of a block, in which case it is the pointer size.
- switch (getEntryKind()) {
- case MachineJumpTableInfo::EK_BlockAddress:
- return TD.getPointerSize();
- case MachineJumpTableInfo::EK_GPRel64BlockAddress:
- return 8;
- case MachineJumpTableInfo::EK_GPRel32BlockAddress:
- case MachineJumpTableInfo::EK_LabelDifference32:
- case MachineJumpTableInfo::EK_Custom32:
- return 4;
- case MachineJumpTableInfo::EK_Inline:
- return 0;
- }
- llvm_unreachable("Unknown jump table encoding!");
- }
- /// getEntryAlignment - Return the alignment of each entry in the jump table.
- unsigned MachineJumpTableInfo::getEntryAlignment(const DataLayout &TD) const {
- // The alignment of a jump table entry is the alignment of int32 unless the
- // entry is just the address of a block, in which case it is the pointer
- // alignment.
- switch (getEntryKind()) {
- case MachineJumpTableInfo::EK_BlockAddress:
- return TD.getPointerABIAlignment();
- case MachineJumpTableInfo::EK_GPRel64BlockAddress:
- return TD.getABIIntegerTypeAlignment(64);
- case MachineJumpTableInfo::EK_GPRel32BlockAddress:
- case MachineJumpTableInfo::EK_LabelDifference32:
- case MachineJumpTableInfo::EK_Custom32:
- return TD.getABIIntegerTypeAlignment(32);
- case MachineJumpTableInfo::EK_Inline:
- return 1;
- }
- llvm_unreachable("Unknown jump table encoding!");
- }
- /// createJumpTableIndex - Create a new jump table entry in the jump table info.
- ///
- unsigned MachineJumpTableInfo::createJumpTableIndex(
- const std::vector<MachineBasicBlock*> &DestBBs) {
- assert(!DestBBs.empty() && "Cannot create an empty jump table!");
- JumpTables.push_back(MachineJumpTableEntry(DestBBs));
- return JumpTables.size()-1;
- }
- /// ReplaceMBBInJumpTables - If Old is the target of any jump tables, update
- /// the jump tables to branch to New instead.
- bool MachineJumpTableInfo::ReplaceMBBInJumpTables(MachineBasicBlock *Old,
- MachineBasicBlock *New) {
- assert(Old != New && "Not making a change?");
- bool MadeChange = false;
- for (size_t i = 0, e = JumpTables.size(); i != e; ++i)
- ReplaceMBBInJumpTable(i, Old, New);
- return MadeChange;
- }
- /// ReplaceMBBInJumpTable - If Old is a target of the jump tables, update
- /// the jump table to branch to New instead.
- bool MachineJumpTableInfo::ReplaceMBBInJumpTable(unsigned Idx,
- MachineBasicBlock *Old,
- MachineBasicBlock *New) {
- assert(Old != New && "Not making a change?");
- bool MadeChange = false;
- MachineJumpTableEntry &JTE = JumpTables[Idx];
- for (size_t j = 0, e = JTE.MBBs.size(); j != e; ++j)
- if (JTE.MBBs[j] == Old) {
- JTE.MBBs[j] = New;
- MadeChange = true;
- }
- return MadeChange;
- }
- void MachineJumpTableInfo::print(raw_ostream &OS) const {
- if (JumpTables.empty()) return;
- OS << "Jump Tables:\n";
- for (unsigned i = 0, e = JumpTables.size(); i != e; ++i) {
- OS << " jt#" << i << ": ";
- for (unsigned j = 0, f = JumpTables[i].MBBs.size(); j != f; ++j)
- OS << " BB#" << JumpTables[i].MBBs[j]->getNumber();
- }
- OS << '\n';
- }
- #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
- void MachineJumpTableInfo::dump() const { print(dbgs()); }
- #endif
- //===----------------------------------------------------------------------===//
- // MachineConstantPool implementation
- //===----------------------------------------------------------------------===//
- void MachineConstantPoolValue::anchor() { }
- Type *MachineConstantPoolEntry::getType() const {
- if (isMachineConstantPoolEntry())
- return Val.MachineCPVal->getType();
- return Val.ConstVal->getType();
- }
- unsigned MachineConstantPoolEntry::getRelocationInfo() const {
- if (isMachineConstantPoolEntry())
- return Val.MachineCPVal->getRelocationInfo();
- return Val.ConstVal->getRelocationInfo();
- }
- MachineConstantPool::~MachineConstantPool() {
- for (unsigned i = 0, e = Constants.size(); i != e; ++i)
- if (Constants[i].isMachineConstantPoolEntry())
- delete Constants[i].Val.MachineCPVal;
- for (DenseSet<MachineConstantPoolValue*>::iterator I =
- MachineCPVsSharingEntries.begin(), E = MachineCPVsSharingEntries.end();
- I != E; ++I)
- delete *I;
- }
- /// CanShareConstantPoolEntry - Test whether the given two constants
- /// can be allocated the same constant pool entry.
- static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
- const DataLayout *TD) {
- // Handle the trivial case quickly.
- if (A == B) return true;
- // If they have the same type but weren't the same constant, quickly
- // reject them.
- if (A->getType() == B->getType()) return false;
- // We can't handle structs or arrays.
- if (isa<StructType>(A->getType()) || isa<ArrayType>(A->getType()) ||
- isa<StructType>(B->getType()) || isa<ArrayType>(B->getType()))
- return false;
-
- // For now, only support constants with the same size.
- uint64_t StoreSize = TD->getTypeStoreSize(A->getType());
- if (StoreSize != TD->getTypeStoreSize(B->getType()) ||
- StoreSize > 128)
- return false;
- Type *IntTy = IntegerType::get(A->getContext(), StoreSize*8);
- // Try constant folding a bitcast of both instructions to an integer. If we
- // get two identical ConstantInt's, then we are good to share them. We use
- // the constant folding APIs to do this so that we get the benefit of
- // DataLayout.
- if (isa<PointerType>(A->getType()))
- A = ConstantFoldInstOperands(Instruction::PtrToInt, IntTy,
- const_cast<Constant*>(A), TD);
- else if (A->getType() != IntTy)
- A = ConstantFoldInstOperands(Instruction::BitCast, IntTy,
- const_cast<Constant*>(A), TD);
- if (isa<PointerType>(B->getType()))
- B = ConstantFoldInstOperands(Instruction::PtrToInt, IntTy,
- const_cast<Constant*>(B), TD);
- else if (B->getType() != IntTy)
- B = ConstantFoldInstOperands(Instruction::BitCast, IntTy,
- const_cast<Constant*>(B), TD);
- return A == B;
- }
- /// getConstantPoolIndex - Create a new entry in the constant pool or return
- /// an existing one. User must specify the log2 of the minimum required
- /// alignment for the object.
- ///
- unsigned MachineConstantPool::getConstantPoolIndex(const Constant *C,
- unsigned Alignment) {
- assert(Alignment && "Alignment must be specified!");
- if (Alignment > PoolAlignment) PoolAlignment = Alignment;
- // Check to see if we already have this constant.
- //
- // FIXME, this could be made much more efficient for large constant pools.
- for (unsigned i = 0, e = Constants.size(); i != e; ++i)
- if (!Constants[i].isMachineConstantPoolEntry() &&
- CanShareConstantPoolEntry(Constants[i].Val.ConstVal, C, TD)) {
- if ((unsigned)Constants[i].getAlignment() < Alignment)
- Constants[i].Alignment = Alignment;
- return i;
- }
- Constants.push_back(MachineConstantPoolEntry(C, Alignment));
- return Constants.size()-1;
- }
- unsigned MachineConstantPool::getConstantPoolIndex(MachineConstantPoolValue *V,
- unsigned Alignment) {
- assert(Alignment && "Alignment must be specified!");
- if (Alignment > PoolAlignment) PoolAlignment = Alignment;
- // Check to see if we already have this constant.
- //
- // FIXME, this could be made much more efficient for large constant pools.
- int Idx = V->getExistingMachineCPValue(this, Alignment);
- if (Idx != -1) {
- MachineCPVsSharingEntries.insert(V);
- return (unsigned)Idx;
- }
- Constants.push_back(MachineConstantPoolEntry(V, Alignment));
- return Constants.size()-1;
- }
- void MachineConstantPool::print(raw_ostream &OS) const {
- if (Constants.empty()) return;
- OS << "Constant Pool:\n";
- for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
- OS << " cp#" << i << ": ";
- if (Constants[i].isMachineConstantPoolEntry())
- Constants[i].Val.MachineCPVal->print(OS);
- else
- OS << *(const Value*)Constants[i].Val.ConstVal;
- OS << ", align=" << Constants[i].getAlignment();
- OS << "\n";
- }
- }
- #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
- void MachineConstantPool::dump() const { print(dbgs()); }
- #endif
|