|
@@ -178,24 +178,24 @@ class ARMFastISel : public FastISel {
|
|
bool isLoadTypeLegal(Type *Ty, MVT &VT);
|
|
bool isLoadTypeLegal(Type *Ty, MVT &VT);
|
|
bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
|
|
bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
|
|
bool isZExt);
|
|
bool isZExt);
|
|
- bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr,
|
|
|
|
|
|
+ bool ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
|
|
unsigned Alignment = 0, bool isZExt = true,
|
|
unsigned Alignment = 0, bool isZExt = true,
|
|
bool allocReg = true);
|
|
bool allocReg = true);
|
|
- bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr,
|
|
|
|
|
|
+ bool ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
|
|
unsigned Alignment = 0);
|
|
unsigned Alignment = 0);
|
|
bool ARMComputeAddress(const Value *Obj, Address &Addr);
|
|
bool ARMComputeAddress(const Value *Obj, Address &Addr);
|
|
void ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3);
|
|
void ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3);
|
|
bool ARMIsMemCpySmall(uint64_t Len);
|
|
bool ARMIsMemCpySmall(uint64_t Len);
|
|
bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len,
|
|
bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len,
|
|
unsigned Alignment);
|
|
unsigned Alignment);
|
|
- unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt);
|
|
|
|
- unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT);
|
|
|
|
- unsigned ARMMaterializeInt(const Constant *C, EVT VT);
|
|
|
|
- unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT);
|
|
|
|
- unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg);
|
|
|
|
- unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg);
|
|
|
|
|
|
+ unsigned ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt);
|
|
|
|
+ unsigned ARMMaterializeFP(const ConstantFP *CFP, MVT VT);
|
|
|
|
+ unsigned ARMMaterializeInt(const Constant *C, MVT VT);
|
|
|
|
+ unsigned ARMMaterializeGV(const GlobalValue *GV, MVT VT);
|
|
|
|
+ unsigned ARMMoveToFPReg(MVT VT, unsigned SrcReg);
|
|
|
|
+ unsigned ARMMoveToIntReg(MVT VT, unsigned SrcReg);
|
|
unsigned ARMSelectCallOp(bool UseReg);
|
|
unsigned ARMSelectCallOp(bool UseReg);
|
|
- unsigned ARMLowerPICELF(const GlobalValue *GV, unsigned Align, EVT VT);
|
|
|
|
|
|
+ unsigned ARMLowerPICELF(const GlobalValue *GV, unsigned Align, MVT VT);
|
|
|
|
|
|
// Call handling routines.
|
|
// Call handling routines.
|
|
private:
|
|
private:
|
|
@@ -487,7 +487,7 @@ unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT,
|
|
|
|
|
|
// TODO: Don't worry about 64-bit now, but when this is fixed remove the
|
|
// TODO: Don't worry about 64-bit now, but when this is fixed remove the
|
|
// checks from the various callers.
|
|
// checks from the various callers.
|
|
-unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) {
|
|
|
|
|
|
+unsigned ARMFastISel::ARMMoveToFPReg(MVT VT, unsigned SrcReg) {
|
|
if (VT == MVT::f64) return 0;
|
|
if (VT == MVT::f64) return 0;
|
|
|
|
|
|
unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
|
|
unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
|
|
@@ -497,7 +497,7 @@ unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) {
|
|
return MoveReg;
|
|
return MoveReg;
|
|
}
|
|
}
|
|
|
|
|
|
-unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) {
|
|
|
|
|
|
+unsigned ARMFastISel::ARMMoveToIntReg(MVT VT, unsigned SrcReg) {
|
|
if (VT == MVT::i64) return 0;
|
|
if (VT == MVT::i64) return 0;
|
|
|
|
|
|
unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
|
|
unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
|
|
@@ -510,7 +510,7 @@ unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) {
|
|
// For double width floating point we need to materialize two constants
|
|
// For double width floating point we need to materialize two constants
|
|
// (the high and the low) into integer registers then use a move to get
|
|
// (the high and the low) into integer registers then use a move to get
|
|
// the combined constant into an FP reg.
|
|
// the combined constant into an FP reg.
|
|
-unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) {
|
|
|
|
|
|
+unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) {
|
|
const APFloat Val = CFP->getValueAPF();
|
|
const APFloat Val = CFP->getValueAPF();
|
|
bool is64bit = VT == MVT::f64;
|
|
bool is64bit = VT == MVT::f64;
|
|
|
|
|
|
@@ -554,7 +554,7 @@ unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) {
|
|
return DestReg;
|
|
return DestReg;
|
|
}
|
|
}
|
|
|
|
|
|
-unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) {
|
|
|
|
|
|
+unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) {
|
|
|
|
|
|
if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
|
|
if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
|
|
return false;
|
|
return false;
|
|
@@ -616,7 +616,7 @@ unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) {
|
|
return DestReg;
|
|
return DestReg;
|
|
}
|
|
}
|
|
|
|
|
|
-unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) {
|
|
|
|
|
|
+unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
|
|
// For now 32-bit only.
|
|
// For now 32-bit only.
|
|
if (VT != MVT::i32) return 0;
|
|
if (VT != MVT::i32) return 0;
|
|
|
|
|
|
@@ -719,10 +719,7 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) {
|
|
}
|
|
}
|
|
|
|
|
|
unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) {
|
|
unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) {
|
|
- EVT VT = TLI.getValueType(C->getType(), true);
|
|
|
|
-
|
|
|
|
- // Only handle simple types.
|
|
|
|
- if (!VT.isSimple()) return 0;
|
|
|
|
|
|
+ MVT VT = TLI.getSimpleValueType(C->getType(), true);
|
|
|
|
|
|
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
|
|
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
|
|
return ARMMaterializeFP(CFP, VT);
|
|
return ARMMaterializeFP(CFP, VT);
|
|
@@ -1003,14 +1000,13 @@ void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr,
|
|
AddOptionalDefs(MIB);
|
|
AddOptionalDefs(MIB);
|
|
}
|
|
}
|
|
|
|
|
|
-bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr,
|
|
|
|
|
|
+bool ARMFastISel::ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
|
|
unsigned Alignment, bool isZExt, bool allocReg) {
|
|
unsigned Alignment, bool isZExt, bool allocReg) {
|
|
- assert(VT.isSimple() && "Non-simple types are invalid here!");
|
|
|
|
unsigned Opc;
|
|
unsigned Opc;
|
|
bool useAM3 = false;
|
|
bool useAM3 = false;
|
|
bool needVMOV = false;
|
|
bool needVMOV = false;
|
|
const TargetRegisterClass *RC;
|
|
const TargetRegisterClass *RC;
|
|
- switch (VT.getSimpleVT().SimpleTy) {
|
|
|
|
|
|
+ switch (VT.SimpleTy) {
|
|
// This is mostly going to be Neon/vector support.
|
|
// This is mostly going to be Neon/vector support.
|
|
default: return false;
|
|
default: return false;
|
|
case MVT::i1:
|
|
case MVT::i1:
|
|
@@ -1127,11 +1123,11 @@ bool ARMFastISel::SelectLoad(const Instruction *I) {
|
|
return true;
|
|
return true;
|
|
}
|
|
}
|
|
|
|
|
|
-bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr,
|
|
|
|
|
|
+bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
|
|
unsigned Alignment) {
|
|
unsigned Alignment) {
|
|
unsigned StrOpc;
|
|
unsigned StrOpc;
|
|
bool useAM3 = false;
|
|
bool useAM3 = false;
|
|
- switch (VT.getSimpleVT().SimpleTy) {
|
|
|
|
|
|
+ switch (VT.SimpleTy) {
|
|
// This is mostly going to be Neon/vector support.
|
|
// This is mostly going to be Neon/vector support.
|
|
default: return false;
|
|
default: return false;
|
|
case MVT::i1: {
|
|
case MVT::i1: {
|
|
@@ -1405,8 +1401,7 @@ bool ARMFastISel::SelectIndirectBr(const Instruction *I) {
|
|
bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
|
|
bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
|
|
bool isZExt) {
|
|
bool isZExt) {
|
|
Type *Ty = Src1Value->getType();
|
|
Type *Ty = Src1Value->getType();
|
|
- EVT SrcVT = TLI.getValueType(Ty, true);
|
|
|
|
- if (!SrcVT.isSimple()) return false;
|
|
|
|
|
|
+ MVT SrcVT = TLI.getSimpleValueType(Ty, true);
|
|
|
|
|
|
bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy());
|
|
bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy());
|
|
if (isFloat && !Subtarget->hasVFP2())
|
|
if (isFloat && !Subtarget->hasVFP2())
|
|
@@ -1443,7 +1438,7 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
|
|
unsigned CmpOpc;
|
|
unsigned CmpOpc;
|
|
bool isICmp = true;
|
|
bool isICmp = true;
|
|
bool needsExt = false;
|
|
bool needsExt = false;
|
|
- switch (SrcVT.getSimpleVT().SimpleTy) {
|
|
|
|
|
|
+ switch (SrcVT.SimpleTy) {
|
|
default: return false;
|
|
default: return false;
|
|
// TODO: Verify compares.
|
|
// TODO: Verify compares.
|
|
case MVT::f32:
|
|
case MVT::f32:
|
|
@@ -1595,7 +1590,7 @@ bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) {
|
|
return false;
|
|
return false;
|
|
|
|
|
|
Value *Src = I->getOperand(0);
|
|
Value *Src = I->getOperand(0);
|
|
- EVT SrcVT = TLI.getValueType(Src->getType(), true);
|
|
|
|
|
|
+ MVT SrcVT = TLI.getSimpleValueType(Src->getType(), true);
|
|
if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
|
|
if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
|
|
return false;
|
|
return false;
|
|
|
|
|
|
@@ -1604,7 +1599,7 @@ bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) {
|
|
|
|
|
|
// Handle sign-extension.
|
|
// Handle sign-extension.
|
|
if (SrcVT == MVT::i16 || SrcVT == MVT::i8) {
|
|
if (SrcVT == MVT::i16 || SrcVT == MVT::i8) {
|
|
- EVT DestVT = MVT::i32;
|
|
|
|
|
|
+ MVT DestVT = MVT::i32;
|
|
SrcReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT,
|
|
SrcReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT,
|
|
/*isZExt*/!isSigned);
|
|
/*isZExt*/!isSigned);
|
|
if (SrcReg == 0) return false;
|
|
if (SrcReg == 0) return false;
|
|
@@ -1811,7 +1806,7 @@ bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
|
|
}
|
|
}
|
|
|
|
|
|
bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) {
|
|
bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) {
|
|
- EVT VT = TLI.getValueType(I->getType(), true);
|
|
|
|
|
|
+ MVT VT = TLI.getSimpleValueType(I->getType(), true);
|
|
|
|
|
|
// We can get here in the case when we want to use NEON for our fp
|
|
// We can get here in the case when we want to use NEON for our fp
|
|
// operations, but can't figure out how to. Just use the vfp instructions
|
|
// operations, but can't figure out how to. Just use the vfp instructions
|
|
@@ -2055,7 +2050,7 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
|
|
if (RVLocs.size() == 2 && RetVT == MVT::f64) {
|
|
if (RVLocs.size() == 2 && RetVT == MVT::f64) {
|
|
// For this move we copy into two registers and then move into the
|
|
// For this move we copy into two registers and then move into the
|
|
// double fp reg we want.
|
|
// double fp reg we want.
|
|
- EVT DestVT = RVLocs[0].getValVT();
|
|
|
|
|
|
+ MVT DestVT = RVLocs[0].getValVT();
|
|
const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT);
|
|
const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT);
|
|
unsigned ResultReg = createResultReg(DstRC);
|
|
unsigned ResultReg = createResultReg(DstRC);
|
|
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
|
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
|
@@ -2070,7 +2065,7 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
|
|
UpdateValueMap(I, ResultReg);
|
|
UpdateValueMap(I, ResultReg);
|
|
} else {
|
|
} else {
|
|
assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!");
|
|
assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!");
|
|
- EVT CopyVT = RVLocs[0].getValVT();
|
|
|
|
|
|
+ MVT CopyVT = RVLocs[0].getValVT();
|
|
|
|
|
|
// Special handling for extended integers.
|
|
// Special handling for extended integers.
|
|
if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
|
|
if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
|
|
@@ -2129,8 +2124,8 @@ bool ARMFastISel::SelectRet(const Instruction *I) {
|
|
return false;
|
|
return false;
|
|
|
|
|
|
unsigned SrcReg = Reg + VA.getValNo();
|
|
unsigned SrcReg = Reg + VA.getValNo();
|
|
- EVT RVVT = TLI.getValueType(RV->getType());
|
|
|
|
- EVT DestVT = VA.getValVT();
|
|
|
|
|
|
+ MVT RVVT = TLI.getSimpleValueType(RV->getType());
|
|
|
|
+ MVT DestVT = VA.getValVT();
|
|
// Special handling for extended integers.
|
|
// Special handling for extended integers.
|
|
if (RVVT != DestVT) {
|
|
if (RVVT != DestVT) {
|
|
if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
|
|
if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
|
|
@@ -2175,7 +2170,7 @@ unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) {
|
|
unsigned ARMFastISel::getLibcallReg(const Twine &Name) {
|
|
unsigned ARMFastISel::getLibcallReg(const Twine &Name) {
|
|
GlobalValue *GV = new GlobalVariable(Type::getInt32Ty(*Context), false,
|
|
GlobalValue *GV = new GlobalVariable(Type::getInt32Ty(*Context), false,
|
|
GlobalValue::ExternalLinkage, 0, Name);
|
|
GlobalValue::ExternalLinkage, 0, Name);
|
|
- return ARMMaterializeGV(GV, TLI.getValueType(GV->getType()));
|
|
|
|
|
|
+ return ARMMaterializeGV(GV, TLI.getSimpleValueType(GV->getType()));
|
|
}
|
|
}
|
|
|
|
|
|
// A quick function that will emit a call for a named libcall in F with the
|
|
// A quick function that will emit a call for a named libcall in F with the
|
|
@@ -2587,7 +2582,7 @@ bool ARMFastISel::SelectTrunc(const Instruction *I) {
|
|
return true;
|
|
return true;
|
|
}
|
|
}
|
|
|
|
|
|
-unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT,
|
|
|
|
|
|
+unsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
|
|
bool isZExt) {
|
|
bool isZExt) {
|
|
if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8)
|
|
if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8)
|
|
return 0;
|
|
return 0;
|
|
@@ -2595,8 +2590,7 @@ unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT,
|
|
unsigned Opc;
|
|
unsigned Opc;
|
|
bool isBoolZext = false;
|
|
bool isBoolZext = false;
|
|
const TargetRegisterClass *RC = TLI.getRegClassFor(MVT::i32);
|
|
const TargetRegisterClass *RC = TLI.getRegClassFor(MVT::i32);
|
|
- if (!SrcVT.isSimple()) return 0;
|
|
|
|
- switch (SrcVT.getSimpleVT().SimpleTy) {
|
|
|
|
|
|
+ switch (SrcVT.SimpleTy) {
|
|
default: return 0;
|
|
default: return 0;
|
|
case MVT::i16:
|
|
case MVT::i16:
|
|
if (!Subtarget->hasV6Ops()) return 0;
|
|
if (!Subtarget->hasV6Ops()) return 0;
|
|
@@ -2643,9 +2637,9 @@ bool ARMFastISel::SelectIntExt(const Instruction *I) {
|
|
Value *Src = I->getOperand(0);
|
|
Value *Src = I->getOperand(0);
|
|
Type *SrcTy = Src->getType();
|
|
Type *SrcTy = Src->getType();
|
|
|
|
|
|
- EVT SrcVT, DestVT;
|
|
|
|
- SrcVT = TLI.getValueType(SrcTy, true);
|
|
|
|
- DestVT = TLI.getValueType(DestTy, true);
|
|
|
|
|
|
+ MVT SrcVT, DestVT;
|
|
|
|
+ SrcVT = TLI.getSimpleValueType(SrcTy, true);
|
|
|
|
+ DestVT = TLI.getSimpleValueType(DestTy, true);
|
|
|
|
|
|
bool isZExt = isa<ZExtInst>(I);
|
|
bool isZExt = isa<ZExtInst>(I);
|
|
unsigned SrcReg = getRegForValue(Src);
|
|
unsigned SrcReg = getRegForValue(Src);
|
|
@@ -2830,7 +2824,7 @@ bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
|
|
}
|
|
}
|
|
|
|
|
|
unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV,
|
|
unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV,
|
|
- unsigned Align, EVT VT) {
|
|
|
|
|
|
+ unsigned Align, MVT VT) {
|
|
bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility();
|
|
bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility();
|
|
ARMConstantPoolConstant *CPV =
|
|
ARMConstantPoolConstant *CPV =
|
|
ARMConstantPoolConstant::Create(GV, UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT);
|
|
ARMConstantPoolConstant::Create(GV, UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT);
|