|
@@ -457,6 +457,14 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
|
|
|
setOperationAction(ISD::SETCC , MVT::i64 , Custom);
|
|
|
}
|
|
|
setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
|
|
|
+ // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intened to support
|
|
|
+ // SjLj exception handling but a light-weight setjmp/longjmp replacement to
|
|
|
+ // support continuation, user-level threading, and etc.. As a result, not
|
|
|
+ // other SjLj exception interfaces are implemented and please don't build
|
|
|
+ // your own exception handling based on them.
|
|
|
+ // LLVM/Clang supports zero-cost DWARF exception handling.
|
|
|
+ setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
|
|
|
+ setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
|
|
|
|
|
|
// Darwin ABI issue.
|
|
|
setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
|
|
@@ -10351,6 +10359,21 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
|
|
|
Chain, DAG.getRegister(StoreAddrReg, getPointerTy()));
|
|
|
}
|
|
|
|
|
|
+SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
|
|
|
+ SelectionDAG &DAG) const {
|
|
|
+ DebugLoc DL = Op.getDebugLoc();
|
|
|
+ return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
|
|
|
+ DAG.getVTList(MVT::i32, MVT::Other),
|
|
|
+ Op.getOperand(0), Op.getOperand(1));
|
|
|
+}
|
|
|
+
|
|
|
+SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
|
|
|
+ SelectionDAG &DAG) const {
|
|
|
+ DebugLoc DL = Op.getDebugLoc();
|
|
|
+ return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
|
|
|
+ Op.getOperand(0), Op.getOperand(1));
|
|
|
+}
|
|
|
+
|
|
|
static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
|
|
|
return Op.getOperand(0);
|
|
|
}
|
|
@@ -11375,6 +11398,8 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
|
|
|
return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
|
|
|
case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
|
|
|
case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
|
|
|
+ case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
|
|
|
+ case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
|
|
|
case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
|
|
|
case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
|
|
|
case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
|
|
@@ -11667,6 +11692,8 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
|
|
|
case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
|
|
|
case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
|
|
|
case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
|
|
|
+ case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
|
|
|
+ case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
|
|
|
case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
|
|
|
case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
|
|
|
case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
|
|
@@ -13212,6 +13239,173 @@ X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
|
|
|
return BB;
|
|
|
}
|
|
|
|
|
|
+MachineBasicBlock *
|
|
|
+X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
|
|
|
+ MachineBasicBlock *MBB) const {
|
|
|
+ DebugLoc DL = MI->getDebugLoc();
|
|
|
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
|
|
|
+
|
|
|
+ MachineFunction *MF = MBB->getParent();
|
|
|
+ MachineRegisterInfo &MRI = MF->getRegInfo();
|
|
|
+
|
|
|
+ const BasicBlock *BB = MBB->getBasicBlock();
|
|
|
+ MachineFunction::iterator I = MBB;
|
|
|
+ ++I;
|
|
|
+
|
|
|
+ // Memory Reference
|
|
|
+ MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
|
|
|
+ MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
|
|
|
+
|
|
|
+ unsigned DstReg;
|
|
|
+ unsigned MemOpndSlot = 0;
|
|
|
+
|
|
|
+ unsigned CurOp = 0;
|
|
|
+
|
|
|
+ DstReg = MI->getOperand(CurOp++).getReg();
|
|
|
+ const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
|
|
|
+ assert(RC->hasType(MVT::i32) && "Invalid destination!");
|
|
|
+ unsigned mainDstReg = MRI.createVirtualRegister(RC);
|
|
|
+ unsigned restoreDstReg = MRI.createVirtualRegister(RC);
|
|
|
+
|
|
|
+ MemOpndSlot = CurOp;
|
|
|
+
|
|
|
+ MVT PVT = getPointerTy();
|
|
|
+ assert((PVT == MVT::i64 || PVT == MVT::i32) &&
|
|
|
+ "Invalid Pointer Size!");
|
|
|
+
|
|
|
+ // For v = setjmp(buf), we generate
|
|
|
+ //
|
|
|
+ // thisMBB:
|
|
|
+ // buf[Label_Offset] = ljMBB
|
|
|
+ // SjLjSetup restoreMBB
|
|
|
+ //
|
|
|
+ // mainMBB:
|
|
|
+ // v_main = 0
|
|
|
+ //
|
|
|
+ // sinkMBB:
|
|
|
+ // v = phi(main, restore)
|
|
|
+ //
|
|
|
+ // restoreMBB:
|
|
|
+ // v_restore = 1
|
|
|
+
|
|
|
+ MachineBasicBlock *thisMBB = MBB;
|
|
|
+ MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
|
|
|
+ MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
|
|
|
+ MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
|
|
|
+ MF->insert(I, mainMBB);
|
|
|
+ MF->insert(I, sinkMBB);
|
|
|
+ MF->push_back(restoreMBB);
|
|
|
+
|
|
|
+ MachineInstrBuilder MIB;
|
|
|
+
|
|
|
+ // Transfer the remainder of BB and its successor edges to sinkMBB.
|
|
|
+ sinkMBB->splice(sinkMBB->begin(), MBB,
|
|
|
+ llvm::next(MachineBasicBlock::iterator(MI)), MBB->end());
|
|
|
+ sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
|
|
|
+
|
|
|
+ // thisMBB:
|
|
|
+ unsigned PtrImmStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
|
|
|
+ const int64_t Label_Offset = 1 * PVT.getStoreSize();
|
|
|
+
|
|
|
+ // Store IP
|
|
|
+ MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrImmStoreOpc));
|
|
|
+ for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
|
|
|
+ if (i == X86::AddrDisp)
|
|
|
+ MIB.addDisp(MI->getOperand(MemOpndSlot + i), Label_Offset);
|
|
|
+ else
|
|
|
+ MIB.addOperand(MI->getOperand(MemOpndSlot + i));
|
|
|
+ }
|
|
|
+ MIB.addMBB(restoreMBB);
|
|
|
+ MIB.setMemRefs(MMOBegin, MMOEnd);
|
|
|
+ // Setup
|
|
|
+ MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
|
|
|
+ .addMBB(restoreMBB);
|
|
|
+ MIB.addRegMask(RegInfo->getNoPreservedMask());
|
|
|
+ thisMBB->addSuccessor(mainMBB);
|
|
|
+ thisMBB->addSuccessor(restoreMBB);
|
|
|
+
|
|
|
+ // mainMBB:
|
|
|
+ // EAX = 0
|
|
|
+ BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
|
|
|
+ mainMBB->addSuccessor(sinkMBB);
|
|
|
+
|
|
|
+ // sinkMBB:
|
|
|
+ BuildMI(*sinkMBB, sinkMBB->begin(), DL,
|
|
|
+ TII->get(X86::PHI), DstReg)
|
|
|
+ .addReg(mainDstReg).addMBB(mainMBB)
|
|
|
+ .addReg(restoreDstReg).addMBB(restoreMBB);
|
|
|
+
|
|
|
+ // restoreMBB:
|
|
|
+ BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
|
|
|
+ BuildMI(restoreMBB, DL, TII->get(X86::JMP_4)).addMBB(sinkMBB);
|
|
|
+ restoreMBB->addSuccessor(sinkMBB);
|
|
|
+
|
|
|
+ MI->eraseFromParent();
|
|
|
+ return sinkMBB;
|
|
|
+}
|
|
|
+
|
|
|
+MachineBasicBlock *
|
|
|
+X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
|
|
|
+ MachineBasicBlock *MBB) const {
|
|
|
+ DebugLoc DL = MI->getDebugLoc();
|
|
|
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
|
|
|
+
|
|
|
+ MachineFunction *MF = MBB->getParent();
|
|
|
+ MachineRegisterInfo &MRI = MF->getRegInfo();
|
|
|
+
|
|
|
+ // Memory Reference
|
|
|
+ MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
|
|
|
+ MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
|
|
|
+
|
|
|
+ MVT PVT = getPointerTy();
|
|
|
+ assert((PVT == MVT::i64 || PVT == MVT::i32) &&
|
|
|
+ "Invalid Pointer Size!");
|
|
|
+
|
|
|
+ const TargetRegisterClass *RC =
|
|
|
+ (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
|
|
|
+ unsigned Tmp = MRI.createVirtualRegister(RC);
|
|
|
+ // Since FP is only updated here but NOT referenced, it's treated as GPR.
|
|
|
+ unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
|
|
|
+ unsigned SP = RegInfo->getStackRegister();
|
|
|
+
|
|
|
+ MachineInstrBuilder MIB;
|
|
|
+
|
|
|
+ const int64_t Label_Offset = 1 * PVT.getStoreSize();
|
|
|
+ const int64_t SP_Offset = 2 * PVT.getStoreSize();
|
|
|
+
|
|
|
+ unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
|
|
|
+ unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
|
|
|
+
|
|
|
+ // Reload FP
|
|
|
+ MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP);
|
|
|
+ for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
|
|
|
+ MIB.addOperand(MI->getOperand(i));
|
|
|
+ MIB.setMemRefs(MMOBegin, MMOEnd);
|
|
|
+ // Reload IP
|
|
|
+ MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
|
|
|
+ for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
|
|
|
+ if (i == X86::AddrDisp)
|
|
|
+ MIB.addDisp(MI->getOperand(i), Label_Offset);
|
|
|
+ else
|
|
|
+ MIB.addOperand(MI->getOperand(i));
|
|
|
+ }
|
|
|
+ MIB.setMemRefs(MMOBegin, MMOEnd);
|
|
|
+ // Reload SP
|
|
|
+ MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP);
|
|
|
+ for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
|
|
|
+ if (i == X86::AddrDisp)
|
|
|
+ MIB.addDisp(MI->getOperand(i), SP_Offset);
|
|
|
+ else
|
|
|
+ MIB.addOperand(MI->getOperand(i));
|
|
|
+ }
|
|
|
+ MIB.setMemRefs(MMOBegin, MMOEnd);
|
|
|
+ // Jump
|
|
|
+ BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
|
|
|
+
|
|
|
+ MI->eraseFromParent();
|
|
|
+ return MBB;
|
|
|
+}
|
|
|
+
|
|
|
MachineBasicBlock *
|
|
|
X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
|
|
|
MachineBasicBlock *BB) const {
|
|
@@ -13427,6 +13621,14 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
|
|
|
|
|
|
case X86::VAARG_64:
|
|
|
return EmitVAARG64WithCustomInserter(MI, BB);
|
|
|
+
|
|
|
+ case X86::EH_SjLj_SetJmp32:
|
|
|
+ case X86::EH_SjLj_SetJmp64:
|
|
|
+ return emitEHSjLjSetJmp(MI, BB);
|
|
|
+
|
|
|
+ case X86::EH_SjLj_LongJmp32:
|
|
|
+ case X86::EH_SjLj_LongJmp64:
|
|
|
+ return emitEHSjLjLongJmp(MI, BB);
|
|
|
}
|
|
|
}
|
|
|
|