89 enum BaseKind { RegBase, FrameIndexBase };
92 BaseKind Kind = RegBase;
104 void setKind(BaseKind K) { Kind =
K; }
105 BaseKind getKind()
const {
return Kind; }
106 bool isRegBase()
const {
return Kind == RegBase; }
107 bool isFIBase()
const {
return Kind == FrameIndexBase; }
110 assert(isRegBase() &&
"Invalid base register access!");
115 assert(isRegBase() &&
"Invalid base register access!");
120 assert(isFIBase() &&
"Invalid base frame index access!");
125 assert(isFIBase() &&
"Invalid base frame index access!");
129 void setOffset(
int O) { Offset =
O; }
133class ARMFastISel final :
public FastISel {
136 const ARMSubtarget *Subtarget;
138 const ARMBaseInstrInfo &TII;
139 const ARMTargetLowering &TLI;
140 const ARMBaseTargetMachine &TM;
141 ARMFunctionInfo *AFI;
145 LLVMContext *Context;
148 explicit ARMFastISel(FunctionLoweringInfo &funcInfo,
149 const TargetLibraryInfo *libInfo,
150 const LibcallLoweringInfo *libcallLowering)
151 : FastISel(funcInfo, libInfo, libcallLowering),
152 Subtarget(&funcInfo.MF->getSubtarget<ARMSubtarget>()),
154 TII(*Subtarget->getInstrInfo()), TLI(*Subtarget->getTargetLowering()),
156 AFI = funcInfo.
MF->
getInfo<ARMFunctionInfo>();
157 isThumb2 = AFI->isThumbFunction();
164 Register fastEmitInst_r(
unsigned MachineInstOpcode,
165 const TargetRegisterClass *RC,
Register Op0);
166 Register fastEmitInst_rr(
unsigned MachineInstOpcode,
167 const TargetRegisterClass *RC,
Register Op0,
169 Register fastEmitInst_ri(
unsigned MachineInstOpcode,
170 const TargetRegisterClass *RC,
Register Op0,
172 Register fastEmitInst_i(
unsigned MachineInstOpcode,
173 const TargetRegisterClass *RC, uint64_t Imm);
177 bool fastSelectInstruction(
const Instruction *
I)
override;
178 Register fastMaterializeConstant(
const Constant *
C)
override;
179 Register fastMaterializeAlloca(
const AllocaInst *AI)
override;
180 bool tryToFoldLoadIntoMI(MachineInstr *
MI,
unsigned OpNo,
181 const LoadInst *LI)
override;
182 bool fastLowerArguments()
override;
184#include "ARMGenFastISel.inc"
188 bool SelectLoad(
const Instruction *
I);
189 bool SelectStore(
const Instruction *
I);
190 bool SelectBranch(
const Instruction *
I);
191 bool SelectIndirectBr(
const Instruction *
I);
192 bool SelectCmp(
const Instruction *
I);
193 bool SelectFPExt(
const Instruction *
I);
194 bool SelectFPTrunc(
const Instruction *
I);
195 bool SelectBinaryIntOp(
const Instruction *
I,
unsigned ISDOpcode);
196 bool SelectBinaryFPOp(
const Instruction *
I,
unsigned ISDOpcode);
197 bool SelectIToFP(
const Instruction *
I,
bool isSigned);
198 bool SelectFPToI(
const Instruction *
I,
bool isSigned);
199 bool SelectDiv(
const Instruction *
I,
bool isSigned);
200 bool SelectRem(
const Instruction *
I,
bool isSigned);
201 bool SelectCall(
const Instruction *
I,
const char *IntrMemName);
202 bool SelectIntrinsicCall(
const IntrinsicInst &
I);
203 bool SelectSelect(
const Instruction *
I);
204 bool SelectRet(
const Instruction *
I);
205 bool SelectTrunc(
const Instruction *
I);
206 bool SelectIntExt(
const Instruction *
I);
211 bool isPositionIndependent()
const;
212 bool isTypeLegal(
Type *Ty, MVT &VT);
213 bool isLoadTypeLegal(
Type *Ty, MVT &VT);
214 bool ARMEmitCmp(
const Value *Src1Value,
const Value *Src2Value,
217 MaybeAlign Alignment = std::nullopt,
bool isZExt =
true,
218 bool allocReg =
true);
220 MaybeAlign Alignment = std::nullopt);
221 bool ARMComputeAddress(
const Value *Obj,
Address &Addr);
222 void ARMSimplifyAddress(
Address &Addr, MVT VT,
bool useAM3);
223 bool ARMIsMemCpySmall(uint64_t Len);
224 bool ARMTryEmitSmallMemCpy(
Address Dest,
Address Src, uint64_t Len,
225 MaybeAlign Alignment);
226 Register ARMEmitIntExt(MVT SrcVT,
Register SrcReg, MVT DestVT,
bool isZExt);
227 Register ARMMaterializeFP(
const ConstantFP *CFP, MVT VT);
228 Register ARMMaterializeInt(
const Constant *
C, MVT VT);
229 Register ARMMaterializeGV(
const GlobalValue *GV, MVT VT);
232 unsigned ARMSelectCallOp(
bool UseReg);
233 Register ARMLowerPICELF(
const GlobalValue *GV, MVT VT);
235 const TargetLowering *getTargetLowering() {
return &TLI; }
239 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC,
242 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args,
243 SmallVectorImpl<Register> &ArgRegs,
244 SmallVectorImpl<MVT> &ArgVTs,
245 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
246 SmallVectorImpl<Register> &RegArgs,
250 Register getLibcallReg(
const Twine &Name);
251 bool FinishCall(MVT RetVT, SmallVectorImpl<Register> &UsedRegs,
252 const Instruction *
I, CallingConv::ID CC,
253 unsigned &NumBytes,
bool isVarArg);
254 bool ARMEmitLibcall(
const Instruction *
I, RTLIB::Libcall
Call);
258 bool isARMNEONPred(
const MachineInstr *
MI);
259 bool DefinesOptionalPredicate(MachineInstr *
MI,
bool *CPSR);
260 const MachineInstrBuilder &AddOptionalDefs(
const MachineInstrBuilder &MIB);
261 void AddLoadStoreOperands(MVT VT,
Address &Addr,
262 const MachineInstrBuilder &MIB,
271bool ARMFastISel::DefinesOptionalPredicate(
MachineInstr *
MI,
bool *CPSR) {
272 if (!
MI->hasOptionalDef())
276 for (
const MachineOperand &MO :
MI->operands()) {
277 if (!MO.isReg() || !MO.isDef())
continue;
278 if (MO.getReg() == ARM::CPSR)
284bool ARMFastISel::isARMNEONPred(
const MachineInstr *
MI) {
285 const MCInstrDesc &MCID =
MI->getDesc();
290 return MI->isPredicable();
292 for (
const MCOperandInfo &opInfo : MCID.
operands())
293 if (opInfo.isPredicate())
304const MachineInstrBuilder &
305ARMFastISel::AddOptionalDefs(
const MachineInstrBuilder &MIB) {
306 MachineInstr *
MI = &*MIB;
311 if (isARMNEONPred(
MI))
317 if (DefinesOptionalPredicate(
MI, &CPSR))
322Register ARMFastISel::fastEmitInst_r(
unsigned MachineInstOpcode,
323 const TargetRegisterClass *RC,
325 Register ResultReg = createResultReg(RC);
326 const MCInstrDesc &
II =
TII.get(MachineInstOpcode);
331 if (
II.getNumDefs() >= 1) {
332 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II,
335 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
337 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
338 TII.get(TargetOpcode::COPY), ResultReg)
344Register ARMFastISel::fastEmitInst_rr(
unsigned MachineInstOpcode,
345 const TargetRegisterClass *RC,
347 Register ResultReg = createResultReg(RC);
348 const MCInstrDesc &
II =
TII.get(MachineInstOpcode);
355 if (
II.getNumDefs() >= 1) {
357 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II, ResultReg)
361 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
364 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
365 TII.get(TargetOpcode::COPY), ResultReg)
371Register ARMFastISel::fastEmitInst_ri(
unsigned MachineInstOpcode,
372 const TargetRegisterClass *RC,
374 Register ResultReg = createResultReg(RC);
375 const MCInstrDesc &
II =
TII.get(MachineInstOpcode);
380 if (
II.getNumDefs() >= 1) {
382 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II, ResultReg)
386 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
389 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
390 TII.get(TargetOpcode::COPY), ResultReg)
396Register ARMFastISel::fastEmitInst_i(
unsigned MachineInstOpcode,
397 const TargetRegisterClass *RC,
399 Register ResultReg = createResultReg(RC);
400 const MCInstrDesc &
II =
TII.get(MachineInstOpcode);
402 if (
II.getNumDefs() >= 1) {
403 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II,
406 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
408 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
409 TII.get(TargetOpcode::COPY), ResultReg)
422 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
423 TII.get(ARM::VMOVSR), MoveReg)
433 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
434 TII.get(ARM::VMOVRS), MoveReg)
442Register ARMFastISel::ARMMaterializeFP(
const ConstantFP *CFP, MVT VT) {
443 if (VT != MVT::f32 && VT != MVT::f64)
447 bool is64bit = VT == MVT::f64;
462 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
468 if (!Subtarget->hasVFP2Base())
return false;
472 unsigned Idx = MCP.getConstantPoolIndex(
cast<Constant>(CFP), Alignment);
474 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS;
478 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(
Opc), DestReg)
484Register ARMFastISel::ARMMaterializeInt(
const Constant *
C, MVT VT) {
485 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
492 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16;
493 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass :
495 Register ImmReg = createResultReg(RC);
496 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
503 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->
isNegative()) {
508 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi;
509 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass :
511 Register ImmReg = createResultReg(RC);
512 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
520 if (Subtarget->useMovt())
531 Align Alignment =
DL.getPrefTypeAlign(
C->getType());
532 unsigned Idx = MCP.getConstantPoolIndex(
C, Alignment);
535 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
536 TII.get(ARM::t2LDRpci), ResultReg)
541 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
542 TII.get(ARM::LDRcp), ResultReg)
549bool ARMFastISel::isPositionIndependent()
const {
553Register ARMFastISel::ARMMaterializeGV(
const GlobalValue *GV, MVT VT) {
559 if (Subtarget->isROPI() || Subtarget->isRWPI())
562 bool IsIndirect = Subtarget->isGVIndirectSymbol(GV);
563 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass
565 Register DestReg = createResultReg(RC);
570 if (!Subtarget->isTargetMachO() && IsThreadLocal)
573 bool IsPositionIndependent = isPositionIndependent();
576 if (Subtarget->useMovt() &&
577 (Subtarget->isTargetMachO() || !IsPositionIndependent)) {
579 unsigned char TF = 0;
580 if (Subtarget->isTargetMachO())
583 if (IsPositionIndependent)
584 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel;
586 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm;
587 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
593 if (Subtarget->isTargetELF() && IsPositionIndependent)
594 return ARMLowerPICELF(GV, VT);
597 unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0;
602 unsigned Idx = MCP.getConstantPoolIndex(CPV, Alignment);
605 MachineInstrBuilder MIB;
607 unsigned Opc = IsPositionIndependent ? ARM::t2LDRpci_pic : ARM::t2LDRpci;
608 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(
Opc),
610 if (IsPositionIndependent)
612 AddOptionalDefs(MIB);
616 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
617 TII.get(ARM::LDRcp), DestReg)
620 AddOptionalDefs(MIB);
622 if (IsPositionIndependent) {
623 unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD;
626 MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
627 MIMD,
TII.get(
Opc), NewDestReg)
630 AddOptionalDefs(MIB);
636 if ((Subtarget->isTargetELF() && Subtarget->isGVInGOT(GV)) ||
637 (Subtarget->isTargetMachO() && IsIndirect)) {
638 MachineInstrBuilder MIB;
641 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
642 TII.get(ARM::t2LDRi12), NewDestReg)
646 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
647 TII.get(ARM::LDRi12), NewDestReg)
650 DestReg = NewDestReg;
651 AddOptionalDefs(MIB);
657Register ARMFastISel::fastMaterializeConstant(
const Constant *
C) {
666 return ARMMaterializeFP(CFP, VT);
668 return ARMMaterializeGV(GV, VT);
670 return ARMMaterializeInt(
C, VT);
677Register ARMFastISel::fastMaterializeAlloca(
const AllocaInst *AI) {
679 if (!FuncInfo.StaticAllocaMap.count(AI))
683 if (!isLoadTypeLegal(AI->
getType(), VT))
686 auto SI = FuncInfo.StaticAllocaMap.find(AI);
690 if (SI != FuncInfo.StaticAllocaMap.end()) {
691 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
693 Register ResultReg = createResultReg(RC);
696 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
706bool ARMFastISel::isTypeLegal(
Type *Ty, MVT &VT) {
710 if (evt == MVT::Other || !evt.
isSimple())
return false;
718bool ARMFastISel::isLoadTypeLegal(
Type *Ty, MVT &VT) {
719 if (isTypeLegal(Ty, VT))
return true;
723 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
730bool ARMFastISel::ARMComputeAddress(
const Value *Obj,
Address &Addr) {
732 const User *
U =
nullptr;
733 unsigned Opcode = Instruction::UserOp1;
737 if (FuncInfo.StaticAllocaMap.count(
static_cast<const AllocaInst *
>(Obj)) ||
738 FuncInfo.getMBB(
I->getParent()) == FuncInfo.MBB) {
739 Opcode =
I->getOpcode();
743 Opcode =
C->getOpcode();
748 if (Ty->getAddressSpace() > 255)
756 case Instruction::BitCast:
758 return ARMComputeAddress(
U->getOperand(0), Addr);
759 case Instruction::IntToPtr:
763 return ARMComputeAddress(
U->getOperand(0), Addr);
765 case Instruction::PtrToInt:
768 return ARMComputeAddress(
U->getOperand(0), Addr);
770 case Instruction::GetElementPtr: {
772 int TmpOffset = Addr.getOffset();
778 i != e; ++i, ++GTI) {
781 const StructLayout *SL =
DL.getStructLayout(STy);
792 if (canFoldAddIntoGEP(U,
Op)) {
802 goto unsupported_gep;
808 Addr.setOffset(TmpOffset);
809 if (ARMComputeAddress(
U->getOperand(0), Addr))
return true;
817 case Instruction::Alloca: {
819 auto SI = FuncInfo.StaticAllocaMap.find(AI);
820 if (SI != FuncInfo.StaticAllocaMap.end()) {
821 Addr.setKind(Address::FrameIndexBase);
822 Addr.setFI(
SI->second);
831 Addr.setReg(getRegForValue(Obj));
832 return Addr.getReg();
835void ARMFastISel::ARMSimplifyAddress(
Address &Addr, MVT VT,
bool useAM3) {
836 bool needsLowering =
false;
845 needsLowering = ((Addr.getOffset() & 0xfff) != Addr.getOffset());
847 if (needsLowering && isThumb2)
848 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.getOffset() < 0 &&
849 Addr.getOffset() > -256);
852 needsLowering = (Addr.getOffset() > 255 || Addr.getOffset() < -255);
858 needsLowering = ((Addr.getOffset() & 0xff) != Addr.getOffset());
865 if (needsLowering && Addr.isFIBase()) {
866 const TargetRegisterClass *RC = isThumb2 ? &ARM::tGPRRegClass
868 Register ResultReg = createResultReg(RC);
869 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
871 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(
Opc), ResultReg)
874 Addr.setKind(Address::RegBase);
875 Addr.setReg(ResultReg);
881 Addr.setReg(fastEmit_ri_(MVT::i32,
ISD::ADD, Addr.getReg(),
882 Addr.getOffset(), MVT::i32));
887void ARMFastISel::AddLoadStoreOperands(MVT VT,
Address &Addr,
888 const MachineInstrBuilder &MIB,
894 Addr.setOffset(Addr.getOffset() / 4);
897 if (Addr.isFIBase()) {
898 int FI = Addr.getFI();
899 int Offset = Addr.getOffset();
900 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
902 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
909 int Imm = (Addr.getOffset() < 0) ? (0x100 | -Addr.getOffset())
914 MIB.
addImm(Addr.getOffset());
919 MIB.
addReg(Addr.getReg());
924 int Imm = (Addr.getOffset() < 0) ? (0x100 | -Addr.getOffset())
929 MIB.
addImm(Addr.getOffset());
932 AddOptionalDefs(MIB);
935bool ARMFastISel::ARMEmitLoad(MVT VT,
Register &ResultReg,
Address &Addr,
936 MaybeAlign Alignment,
bool isZExt,
940 bool needVMOV =
false;
941 const TargetRegisterClass *RC;
944 default:
return false;
948 if (Addr.getOffset() < 0 && Addr.getOffset() > -256 &&
949 Subtarget->hasV6T2Ops())
950 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8;
952 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12;
961 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
964 if (Alignment && *Alignment <
Align(2) &&
965 !Subtarget->allowsUnalignedMem())
969 if (Addr.getOffset() < 0 && Addr.getOffset() > -256 &&
970 Subtarget->hasV6T2Ops())
971 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8;
973 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12;
975 Opc = isZExt ? ARM::LDRH : ARM::LDRSH;
978 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
981 if (Alignment && *Alignment <
Align(4) &&
982 !Subtarget->allowsUnalignedMem())
986 if (Addr.getOffset() < 0 && Addr.getOffset() > -256 &&
987 Subtarget->hasV6T2Ops())
994 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
997 if (!Subtarget->hasVFP2Base())
return false;
999 if (Alignment && *Alignment <
Align(4)) {
1002 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
1003 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
1011 if (!Subtarget->hasVFP2Base())
return false;
1014 if (Alignment && *Alignment <
Align(4))
1022 ARMSimplifyAddress(Addr, VT, useAM3);
1026 ResultReg = createResultReg(RC);
1027 assert(ResultReg.
isVirtual() &&
"Expected an allocated virtual register.");
1028 MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1029 TII.get(
Opc), ResultReg);
1036 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1037 TII.get(ARM::VMOVSR), MoveReg)
1044bool ARMFastISel::SelectLoad(
const Instruction *
I) {
1049 const Value *SV =
I->getOperand(0);
1054 if (Arg->hasSwiftErrorAttr())
1059 if (Alloca->isSwiftError())
1066 if (!isLoadTypeLegal(
I->getType(), VT))
1071 if (!ARMComputeAddress(
I->getOperand(0), Addr))
return false;
1076 updateValueMap(
I, ResultReg);
1081 MaybeAlign Alignment) {
1083 bool useAM3 =
false;
1086 default:
return false;
1088 Register Res = createResultReg(isThumb2 ? &ARM::tGPRRegClass
1089 : &ARM::GPRRegClass);
1090 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
1092 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1100 if (Addr.getOffset() < 0 && Addr.getOffset() > -256 &&
1101 Subtarget->hasV6T2Ops())
1102 StrOpc = ARM::t2STRBi8;
1104 StrOpc = ARM::t2STRBi12;
1106 StrOpc = ARM::STRBi12;
1110 if (Alignment && *Alignment <
Align(2) &&
1111 !Subtarget->allowsUnalignedMem())
1115 if (Addr.getOffset() < 0 && Addr.getOffset() > -256 &&
1116 Subtarget->hasV6T2Ops())
1117 StrOpc = ARM::t2STRHi8;
1119 StrOpc = ARM::t2STRHi12;
1126 if (Alignment && *Alignment <
Align(4) &&
1127 !Subtarget->allowsUnalignedMem())
1131 if (Addr.getOffset() < 0 && Addr.getOffset() > -256 &&
1132 Subtarget->hasV6T2Ops())
1133 StrOpc = ARM::t2STRi8;
1135 StrOpc = ARM::t2STRi12;
1137 StrOpc = ARM::STRi12;
1141 if (!Subtarget->hasVFP2Base())
return false;
1143 if (Alignment && *Alignment <
Align(4)) {
1145 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1146 TII.get(ARM::VMOVRS), MoveReg)
1150 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12;
1152 StrOpc = ARM::VSTRS;
1157 if (!Subtarget->hasVFP2Base())
return false;
1160 if (Alignment && *Alignment <
Align(4))
1163 StrOpc = ARM::VSTRD;
1167 ARMSimplifyAddress(Addr, VT, useAM3);
1171 MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1178bool ARMFastISel::SelectStore(
const Instruction *
I) {
1179 Value *Op0 =
I->getOperand(0);
1186 const Value *PtrV =
I->getOperand(1);
1191 if (Arg->hasSwiftErrorAttr())
1196 if (Alloca->isSwiftError())
1203 if (!isLoadTypeLegal(
I->getOperand(0)->getType(), VT))
1207 SrcReg = getRegForValue(Op0);
1213 if (!ARMComputeAddress(
I->getOperand(1), Addr))
1268bool ARMFastISel::SelectBranch(
const Instruction *
I) {
1271 MachineBasicBlock *FBB = FuncInfo.getMBB(BI->
getSuccessor(1));
1278 if (CI->
hasOneUse() && (CI->getParent() ==
I->getParent())) {
1282 if (FuncInfo.MBB->isLayoutSuccessor(
TBB)) {
1296 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1297 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(BrOpc))
1299 finishCondBranch(BI->getParent(),
TBB, FBB);
1304 if (TI->hasOneUse() && TI->getParent() ==
I->getParent() &&
1305 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) {
1306 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1307 Register OpReg = getRegForValue(TI->getOperand(0));
1309 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1314 if (FuncInfo.MBB->isLayoutSuccessor(
TBB)) {
1319 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1320 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(BrOpc))
1323 finishCondBranch(BI->getParent(),
TBB, FBB);
1326 }
else if (
const ConstantInt *CI =
1330 fastEmitBranch(Target, MIMD.getDL());
1345 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1348 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(TstOpc))
1353 if (FuncInfo.MBB->isLayoutSuccessor(
TBB)) {
1358 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1359 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(BrOpc))
1361 finishCondBranch(BI->getParent(),
TBB, FBB);
1365bool ARMFastISel::SelectIndirectBr(
const Instruction *
I) {
1366 Register AddrReg = getRegForValue(
I->getOperand(0));
1370 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX;
1371 assert(isThumb2 || Subtarget->hasV4TOps());
1373 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1377 for (
const BasicBlock *SuccBB :
IB->successors())
1378 FuncInfo.MBB->addSuccessor(FuncInfo.getMBB(SuccBB));
1383bool ARMFastISel::ARMEmitCmp(
const Value *Src1Value,
const Value *Src2Value,
1387 if (!SrcEVT.
isSimple())
return false;
1390 if (Ty->
isFloatTy() && !Subtarget->hasVFP2Base())
1393 if (Ty->
isDoubleTy() && (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()))
1399 bool UseImm =
false;
1400 bool isNegativeImm =
false;
1404 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 ||
1406 const APInt &CIVal = ConstInt->getValue();
1411 if (Imm < 0 && Imm != (
int)0x80000000) {
1412 isNegativeImm =
true;
1419 if (SrcVT == MVT::f32 || SrcVT == MVT::f64)
1420 if (ConstFP->isZero() && !ConstFP->isNegative())
1426 bool needsExt =
false;
1428 default:
return false;
1432 CmpOpc = UseImm ? ARM::VCMPZS : ARM::VCMPS;
1436 CmpOpc = UseImm ? ARM::VCMPZD : ARM::VCMPD;
1446 CmpOpc = ARM::t2CMPrr;
1448 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri;
1451 CmpOpc = ARM::CMPrr;
1453 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri;
1458 Register SrcReg1 = getRegForValue(Src1Value);
1464 SrcReg2 = getRegForValue(Src2Value);
1471 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt);
1475 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt);
1481 const MCInstrDesc &
II =
TII.get(CmpOpc);
1485 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
1488 MachineInstrBuilder MIB;
1489 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
1495 AddOptionalDefs(MIB);
1501 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1502 TII.get(ARM::FMSTAT)));
1506bool ARMFastISel::SelectCmp(
const Instruction *
I) {
1521 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1522 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass
1523 : &ARM::GPRRegClass;
1524 Register DestReg = createResultReg(RC);
1526 Register ZeroReg = fastMaterializeConstant(Zero);
1528 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(MovCCOpc), DestReg)
1532 updateValueMap(
I, DestReg);
1536bool ARMFastISel::SelectFPExt(
const Instruction *
I) {
1538 if (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64())
return false;
1541 if (!
I->getType()->isDoubleTy() ||
1542 !
V->getType()->isFloatTy())
return false;
1549 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1550 TII.get(ARM::VCVTDS), Result)
1552 updateValueMap(
I, Result);
1556bool ARMFastISel::SelectFPTrunc(
const Instruction *
I) {
1558 if (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64())
return false;
1561 if (!(
I->getType()->isFloatTy() &&
1562 V->getType()->isDoubleTy()))
return false;
1569 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1570 TII.get(ARM::VCVTSD), Result)
1572 updateValueMap(
I, Result);
1576bool ARMFastISel::SelectIToFP(
const Instruction *
I,
bool isSigned) {
1578 if (!Subtarget->hasVFP2Base())
return false;
1581 Type *Ty =
I->getType();
1582 if (!isTypeLegal(Ty, DstVT))
1585 Value *Src =
I->getOperand(0);
1590 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
1593 Register SrcReg = getRegForValue(Src);
1598 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) {
1599 SrcReg = ARMEmitIntExt(SrcVT, SrcReg, MVT::i32,
1607 Register FP = ARMMoveToFPReg(MVT::f32, SrcReg);
1613 else if (Ty->
isDoubleTy() && Subtarget->hasFP64())
1618 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1620 updateValueMap(
I, ResultReg);
1624bool ARMFastISel::SelectFPToI(
const Instruction *
I,
bool isSigned) {
1626 if (!Subtarget->hasVFP2Base())
return false;
1629 Type *RetTy =
I->getType();
1630 if (!isTypeLegal(RetTy, DstVT))
1638 Type *OpTy =
I->getOperand(0)->getType();
1640 else if (OpTy->
isDoubleTy() && Subtarget->hasFP64())
1646 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1651 Register IntReg = ARMMoveToIntReg(DstVT, ResultReg);
1655 updateValueMap(
I, IntReg);
1659bool ARMFastISel::SelectSelect(
const Instruction *
I) {
1661 if (!isTypeLegal(
I->getType(), VT))
1665 if (VT != MVT::i32)
return false;
1667 Register CondReg = getRegForValue(
I->getOperand(0));
1670 Register Op1Reg = getRegForValue(
I->getOperand(1));
1676 bool UseImm =
false;
1677 bool isNegativeImm =
false;
1679 assert(VT == MVT::i32 &&
"Expecting an i32.");
1680 Imm = (int)ConstInt->getValue().getZExtValue();
1682 isNegativeImm =
true;
1691 Op2Reg = getRegForValue(
I->getOperand(2));
1696 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1699 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(TstOpc))
1704 const TargetRegisterClass *RC;
1706 RC = isThumb2 ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
1707 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr;
1709 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass;
1711 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1713 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi;
1715 Register ResultReg = createResultReg(RC);
1719 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(MovCCOpc),
1727 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(MovCCOpc),
1734 updateValueMap(
I, ResultReg);
1738bool ARMFastISel::SelectDiv(
const Instruction *
I,
bool isSigned) {
1740 Type *Ty =
I->getType();
1741 if (!isTypeLegal(Ty, VT))
1747 if (Subtarget->hasDivideInThumbMode())
1751 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
1753 LC =
isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8;
1754 else if (VT == MVT::i16)
1755 LC =
isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16;
1756 else if (VT == MVT::i32)
1757 LC =
isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32;
1758 else if (VT == MVT::i64)
1759 LC =
isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64;
1760 else if (VT == MVT::i128)
1761 LC =
isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128;
1762 assert(LC != RTLIB::UNKNOWN_LIBCALL &&
"Unsupported SDIV!");
1764 return ARMEmitLibcall(
I, LC);
1767bool ARMFastISel::SelectRem(
const Instruction *
I,
bool isSigned) {
1769 Type *Ty =
I->getType();
1770 if (!isTypeLegal(Ty, VT))
1780 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
1782 LC =
isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8;
1783 else if (VT == MVT::i16)
1784 LC =
isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16;
1785 else if (VT == MVT::i32)
1786 LC =
isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32;
1787 else if (VT == MVT::i64)
1788 LC =
isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64;
1789 else if (VT == MVT::i128)
1790 LC =
isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128;
1791 assert(LC != RTLIB::UNKNOWN_LIBCALL &&
"Unsupported SREM!");
1793 return ARMEmitLibcall(
I, LC);
1796bool ARMFastISel::SelectBinaryIntOp(
const Instruction *
I,
unsigned ISDOpcode) {
1801 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
1805 switch (ISDOpcode) {
1806 default:
return false;
1808 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr;
1811 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr;
1814 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr;
1818 Register SrcReg1 = getRegForValue(
I->getOperand(0));
1824 Register SrcReg2 = getRegForValue(
I->getOperand(1));
1828 Register ResultReg = createResultReg(&ARM::GPRnopcRegClass);
1831 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1834 updateValueMap(
I, ResultReg);
1838bool ARMFastISel::SelectBinaryFPOp(
const Instruction *
I,
unsigned ISDOpcode) {
1840 if (!FPVT.
isSimple())
return false;
1851 Type *Ty =
I->getType();
1852 if (Ty->
isFloatTy() && !Subtarget->hasVFP2Base())
1854 if (Ty->
isDoubleTy() && (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()))
1858 bool is64bit = VT == MVT::f64 || VT == MVT::i64;
1859 switch (ISDOpcode) {
1860 default:
return false;
1862 Opc = is64bit ? ARM::VADDD : ARM::VADDS;
1865 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS;
1868 Opc = is64bit ? ARM::VMULD : ARM::VMULS;
1871 Register Op1 = getRegForValue(
I->getOperand(0));
1875 Register Op2 = getRegForValue(
I->getOperand(1));
1880 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1883 updateValueMap(
I, ResultReg);
1891CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC,
1897 case CallingConv::Fast:
1898 if (Subtarget->hasFPRegs() && !isVarArg) {
1905 case CallingConv::C:
1906 case CallingConv::CXX_FAST_TLS:
1909 if (Subtarget->hasFPRegs() &&
1917 case CallingConv::ARM_AAPCS_VFP:
1918 case CallingConv::Swift:
1919 case CallingConv::SwiftTail:
1925 case CallingConv::ARM_AAPCS:
1927 case CallingConv::ARM_APCS:
1929 case CallingConv::GHC:
1934 case CallingConv::CFGuard_Check:
1939bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
1940 SmallVectorImpl<Register> &ArgRegs,
1941 SmallVectorImpl<MVT> &ArgVTs,
1942 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
1943 SmallVectorImpl<Register> &RegArgs,
1949 for (
Value *Arg : Args)
1951 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, ArgLocs, *
Context);
1952 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, OrigTys,
1953 CCAssignFnForCall(CC,
false, isVarArg));
1957 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
1958 CCValAssign &VA = ArgLocs[i];
1972 !VA.
isRegLoc() || !ArgLocs[++i].isRegLoc())
1984 if (!Subtarget->hasVFP2Base())
1988 if (!Subtarget->hasVFP2Base())
1998 NumBytes = CCInfo.getStackSize();
2001 unsigned AdjStackDown =
TII.getCallFrameSetupOpcode();
2002 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2003 TII.get(AdjStackDown))
2007 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
2008 CCValAssign &VA = ArgLocs[i];
2014 "We don't handle NEON/vector parameters yet.");
2021 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT,
false);
2022 assert(Arg &&
"Failed to emit a sext");
2030 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT,
true);
2031 assert(Arg &&
"Failed to emit a zext");
2037 assert(BC &&
"Failed to emit a bitcast!");
2047 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2053 "Custom lowering for v2f64 args not available");
2056 CCValAssign &NextVA = ArgLocs[++i];
2059 "We only handle register args!");
2061 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2076 Addr.setKind(Address::RegBase);
2077 Addr.setReg(ARM::SP);
2080 bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet;
2081 assert(EmitRet &&
"Could not emit a store for argument!");
2088bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<Register> &UsedRegs,
2089 const Instruction *
I, CallingConv::ID CC,
2090 unsigned &NumBytes,
bool isVarArg) {
2092 unsigned AdjStackUp =
TII.getCallFrameDestroyOpcode();
2093 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2094 TII.get(AdjStackUp))
2098 if (RetVT != MVT::isVoid) {
2100 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *
Context);
2101 CCInfo.AnalyzeCallResult(RetVT,
I->getType(),
2102 CCAssignFnForCall(CC,
true, isVarArg));
2105 if (RVLocs.
size() == 2 && RetVT == MVT::f64) {
2108 MVT DestVT = RVLocs[0].getValVT();
2110 Register ResultReg = createResultReg(DstRC);
2111 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2112 TII.get(ARM::VMOVDRR), ResultReg)
2113 .
addReg(RVLocs[0].getLocReg())
2114 .
addReg(RVLocs[1].getLocReg()));
2116 UsedRegs.
push_back(RVLocs[0].getLocReg());
2117 UsedRegs.
push_back(RVLocs[1].getLocReg());
2120 updateValueMap(
I, ResultReg);
2122 assert(RVLocs.
size() == 1 &&
"Can't handle non-double multi-reg retvals!");
2123 MVT CopyVT = RVLocs[0].getValVT();
2126 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
2131 Register ResultReg = createResultReg(DstRC);
2132 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2133 TII.get(TargetOpcode::COPY),
2134 ResultReg).
addReg(RVLocs[0].getLocReg());
2135 UsedRegs.
push_back(RVLocs[0].getLocReg());
2138 updateValueMap(
I, ResultReg);
2145bool ARMFastISel::SelectRet(
const Instruction *
I) {
2147 const Function &
F = *
I->getParent()->getParent();
2148 const bool IsCmseNSEntry =
F.hasFnAttribute(
"cmse_nonsecure_entry");
2150 if (!FuncInfo.CanLowerReturn)
2154 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError))
2163 CallingConv::ID CC =
F.getCallingConv();
2170 CCState CCInfo(CC,
F.isVarArg(), *FuncInfo.MF, ValLocs,
I->getContext());
2171 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC,
true ,
2180 if (ValLocs.
size() != 1)
2183 CCValAssign &VA = ValLocs[0];
2194 if (!RVEVT.
isSimple())
return false;
2198 if (RVVT != DestVT) {
2199 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
2202 assert(DestVT == MVT::i32 &&
"ARM should always ext to i32");
2206 if (Outs[0].
Flags.isZExt() || Outs[0].Flags.isSExt()) {
2207 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].
Flags.isZExt());
2215 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg);
2219 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2220 TII.get(TargetOpcode::COPY), DstReg).
addReg(SrcReg);
2229 RetOpc = ARM::tBXNS_RET;
2233 RetOpc = Subtarget->getReturnOpcode();
2235 MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2237 AddOptionalDefs(MIB);
2239 MIB.
addReg(R, RegState::Implicit);
2243unsigned ARMFastISel::ARMSelectCallOp(
bool UseReg) {
2247 return isThumb2 ? ARM::tBL : ARM::BL;
2250Register ARMFastISel::getLibcallReg(
const Twine &Name) {
2257 GlobalValue *GV =
M.getNamedGlobal(
Name.str());
2259 GV =
new GlobalVariable(M, Type::getInt32Ty(*
Context),
false,
2262 return ARMMaterializeGV(GV, LCREVT.
getSimpleVT());
2272bool ARMFastISel::ARMEmitLibcall(
const Instruction *
I, RTLIB::Libcall
Call) {
2273 RTLIB::LibcallImpl LCImpl = LibcallLowering->getLibcallImpl(
Call);
2274 if (LCImpl == RTLIB::Unsupported)
2278 Type *RetTy =
I->getType();
2281 RetVT = MVT::isVoid;
2282 else if (!isTypeLegal(RetTy, RetVT))
2285 CallingConv::ID CC = LibcallLowering->getLibcallImplCallingConv(LCImpl);
2288 if (RetVT != MVT::isVoid && RetVT != MVT::i32) {
2290 CCState CCInfo(CC,
false, *FuncInfo.MF, RVLocs, *
Context);
2291 CCInfo.AnalyzeCallResult(RetVT, RetTy, CCAssignFnForCall(CC,
true,
false));
2292 if (RVLocs.
size() >= 2 && RetVT != MVT::f64)
2297 SmallVector<Value*, 8>
Args;
2301 Args.reserve(
I->getNumOperands());
2302 ArgRegs.
reserve(
I->getNumOperands());
2303 ArgVTs.
reserve(
I->getNumOperands());
2304 ArgFlags.
reserve(
I->getNumOperands());
2310 Type *ArgTy =
Op->getType();
2312 if (!isTypeLegal(ArgTy, ArgVT))
return false;
2314 ISD::ArgFlagsTy
Flags;
2315 Flags.setOrigAlign(
DL.getABITypeAlign(ArgTy));
2326 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
2327 RegArgs, CC, NumBytes,
false))
2333 if (Subtarget->genLongCalls()) {
2334 CalleeReg = getLibcallReg(FuncName);
2340 unsigned CallOpc = ARMSelectCallOp(Subtarget->genLongCalls());
2341 MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
2342 MIMD,
TII.get(CallOpc));
2346 if (Subtarget->genLongCalls()) {
2355 MIB.
addReg(R, RegState::Implicit);
2363 if (!FinishCall(RetVT, UsedRegs,
I, CC, NumBytes,
false))
return false;
2366 static_cast<MachineInstr *
>(MIB)->setPhysRegsDeadExcept(UsedRegs,
TRI);
2371bool ARMFastISel::SelectCall(
const Instruction *
I,
2372 const char *IntrMemName =
nullptr) {
2388 bool isVarArg = FTy->isVarArg();
2391 Type *RetTy =
I->getType();
2394 RetVT = MVT::isVoid;
2395 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 &&
2396 RetVT != MVT::i8 && RetVT != MVT::i1)
2400 if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 &&
2401 RetVT != MVT::i16 && RetVT != MVT::i32) {
2403 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *
Context);
2404 CCInfo.AnalyzeCallResult(RetVT, RetTy,
2405 CCAssignFnForCall(CC,
true, isVarArg));
2406 if (RVLocs.
size() >= 2 && RetVT != MVT::f64)
2411 SmallVector<Value*, 8>
Args;
2415 unsigned arg_size = CI->
arg_size();
2416 Args.reserve(arg_size);
2420 for (
auto ArgI = CI->
arg_begin(), ArgE = CI->
arg_end(); ArgI != ArgE; ++ArgI) {
2423 if (IntrMemName && ArgE - ArgI <= 1)
2426 ISD::ArgFlagsTy
Flags;
2427 unsigned ArgIdx = ArgI - CI->
arg_begin();
2442 Type *ArgTy = (*ArgI)->getType();
2444 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 &&
2448 Register Arg = getRegForValue(*ArgI);
2452 Flags.setOrigAlign(
DL.getABITypeAlign(ArgTy));
2454 Args.push_back(*ArgI);
2463 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
2464 RegArgs, CC, NumBytes, isVarArg))
2469 if (!GV || Subtarget->genLongCalls())
UseReg =
true;
2474 CalleeReg = getLibcallReg(IntrMemName);
2476 CalleeReg = getRegForValue(Callee);
2483 unsigned CallOpc = ARMSelectCallOp(
UseReg);
2484 MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
2485 MIMD,
TII.get(CallOpc));
2494 }
else if (!IntrMemName)
2501 MIB.
addReg(R, RegState::Implicit);
2509 if (!FinishCall(RetVT, UsedRegs,
I, CC, NumBytes, isVarArg))
2513 static_cast<MachineInstr *
>(MIB)->setPhysRegsDeadExcept(UsedRegs,
TRI);
2519bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) {
2523bool ARMFastISel::ARMTryEmitSmallMemCpy(
Address Dest,
Address Src, uint64_t Len,
2524 MaybeAlign Alignment) {
2526 if (!ARMIsMemCpySmall(Len))
2531 if (!Alignment || *Alignment >= 4) {
2537 assert(Len == 1 &&
"Expected a length of 1!");
2541 assert(Alignment &&
"Alignment is set in this branch");
2543 if (Len >= 2 && *Alignment == 2)
2552 RV = ARMEmitLoad(VT, ResultReg, Src);
2553 assert(RV &&
"Should be able to handle this load.");
2554 RV = ARMEmitStore(VT, ResultReg, Dest);
2555 assert(RV &&
"Should be able to handle this store.");
2560 Dest.setOffset(Dest.getOffset() +
Size);
2561 Src.setOffset(Src.getOffset() +
Size);
2567bool ARMFastISel::SelectIntrinsicCall(
const IntrinsicInst &
I) {
2569 switch (
I.getIntrinsicID()) {
2570 default:
return false;
2571 case Intrinsic::frameaddress: {
2572 MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo();
2575 unsigned LdrOpc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
2576 const TargetRegisterClass *RC = isThumb2 ? &ARM::tGPRRegClass
2577 : &ARM::GPRRegClass;
2579 const ARMBaseRegisterInfo *RegInfo = Subtarget->getRegisterInfo();
2591 DestReg = createResultReg(RC);
2592 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2593 TII.get(LdrOpc), DestReg)
2597 updateValueMap(&
I, SrcReg);
2600 case Intrinsic::memcpy:
2601 case Intrinsic::memmove: {
2609 bool isMemCpy = (
I.getIntrinsicID() == Intrinsic::memcpy);
2614 if (ARMIsMemCpySmall(Len)) {
2616 if (!ARMComputeAddress(MTI.
getRawDest(), Dest) ||
2619 MaybeAlign Alignment;
2623 if (ARMTryEmitSmallMemCpy(Dest, Src, Len, Alignment))
2635 return SelectCall(&
I, IntrMemName);
2637 case Intrinsic::memset: {
2649 return SelectCall(&
I,
"memset");
2651 case Intrinsic::trap: {
2652 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2653 TII.get(Subtarget->isThumb() ? ARM::tTRAP : ARM::TRAP));
2659bool ARMFastISel::SelectTrunc(
const Instruction *
I) {
2668 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
2670 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
2674 if (!SrcReg)
return false;
2678 updateValueMap(
I, SrcReg);
2684 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8)
2686 if (SrcVT != MVT::i16 && SrcVT != MVT::i8 && SrcVT != MVT::i1)
2691 static const uint8_t isSingleInstrTbl[3][2][2][2] = {
2695 { { { 0, 1 }, { 0, 1 } }, { { 0, 0 }, { 0, 1 } } },
2696 { { { 0, 1 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } },
2697 { { { 0, 0 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } }
2704 static const TargetRegisterClass *RCTbl[2][2] = {
2706 { &ARM::GPRnopcRegClass, &ARM::GPRnopcRegClass },
2707 { &ARM::tGPRRegClass, &ARM::rGPRRegClass }
2711 static const struct InstructionTable {
2716 }
IT[2][2][3][2] = {
2758 assert((SrcBits < DestBits) &&
"can only extend to larger types");
2759 assert((DestBits == 32 || DestBits == 16 || DestBits == 8) &&
2760 "other sizes unimplemented");
2761 assert((SrcBits == 16 || SrcBits == 8 || SrcBits == 1) &&
2762 "other sizes unimplemented");
2764 bool hasV6Ops = Subtarget->hasV6Ops();
2765 unsigned Bitness = SrcBits / 8;
2766 assert((Bitness < 3) &&
"sanity-check table bounds");
2768 bool isSingleInstr = isSingleInstrTbl[Bitness][isThumb2][hasV6Ops][isZExt];
2769 const TargetRegisterClass *RC = RCTbl[isThumb2][isSingleInstr];
2770 const InstructionTable *ITP = &
IT[isSingleInstr][isThumb2][Bitness][isZExt];
2771 unsigned Opc = ITP->Opc;
2772 assert(ARM::KILL !=
Opc &&
"Invalid table entry");
2773 unsigned hasS = ITP->hasS;
2776 "only MOVsi has shift operand addressing mode");
2777 unsigned Imm = ITP->Imm;
2780 bool setsCPSR = &ARM::tGPRRegClass == RC;
2781 unsigned LSLOpc = isThumb2 ? ARM::tLSLri : ARM::MOVsi;
2796 unsigned NumInstrsEmitted = isSingleInstr ? 1 : 2;
2797 for (
unsigned Instr = 0;
Instr != NumInstrsEmitted; ++
Instr) {
2798 ResultReg = createResultReg(RC);
2799 bool isLsl = (0 ==
Instr) && !isSingleInstr;
2800 unsigned Opcode = isLsl ? LSLOpc :
Opc;
2803 bool isKill = 1 ==
Instr;
2804 MachineInstrBuilder MIB =
BuildMI(
2805 *FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opcode), ResultReg);
2807 MIB.
addReg(ARM::CPSR, RegState::Define);
2821bool ARMFastISel::SelectIntExt(
const Instruction *
I) {
2824 Type *DestTy =
I->getType();
2825 Value *Src =
I->getOperand(0);
2826 Type *SrcTy = Src->getType();
2829 Register SrcReg = getRegForValue(Src);
2830 if (!SrcReg)
return false;
2832 EVT SrcEVT, DestEVT;
2835 if (!SrcEVT.
isSimple())
return false;
2836 if (!DestEVT.
isSimple())
return false;
2840 Register ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt);
2843 updateValueMap(
I, ResultReg);
2847bool ARMFastISel::SelectShift(
const Instruction *
I,
2856 if (DestVT != MVT::i32)
2859 unsigned Opc = ARM::MOVsr;
2861 Value *Src2Value =
I->getOperand(1);
2863 ShiftImm = CI->getZExtValue();
2867 if (ShiftImm == 0 || ShiftImm >=32)
2873 Value *Src1Value =
I->getOperand(0);
2874 Register Reg1 = getRegForValue(Src1Value);
2879 if (
Opc == ARM::MOVsr) {
2880 Reg2 = getRegForValue(Src2Value);
2885 Register ResultReg = createResultReg(&ARM::GPRnopcRegClass);
2889 MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2893 if (
Opc == ARM::MOVsi)
2895 else if (
Opc == ARM::MOVsr) {
2900 AddOptionalDefs(MIB);
2901 updateValueMap(
I, ResultReg);
2906bool ARMFastISel::fastSelectInstruction(
const Instruction *
I) {
2907 switch (
I->getOpcode()) {
2908 case Instruction::Load:
2909 return SelectLoad(
I);
2910 case Instruction::Store:
2911 return SelectStore(
I);
2912 case Instruction::CondBr:
2913 return SelectBranch(
I);
2914 case Instruction::IndirectBr:
2915 return SelectIndirectBr(
I);
2916 case Instruction::ICmp:
2917 case Instruction::FCmp:
2918 return SelectCmp(
I);
2919 case Instruction::FPExt:
2920 return SelectFPExt(
I);
2921 case Instruction::FPTrunc:
2922 return SelectFPTrunc(
I);
2923 case Instruction::SIToFP:
2924 return SelectIToFP(
I,
true);
2925 case Instruction::UIToFP:
2926 return SelectIToFP(
I,
false);
2927 case Instruction::FPToSI:
2928 return SelectFPToI(
I,
true);
2929 case Instruction::FPToUI:
2930 return SelectFPToI(
I,
false);
2931 case Instruction::Add:
2933 case Instruction::Or:
2934 return SelectBinaryIntOp(
I,
ISD::OR);
2935 case Instruction::Sub:
2937 case Instruction::FAdd:
2939 case Instruction::FSub:
2941 case Instruction::FMul:
2943 case Instruction::SDiv:
2944 return SelectDiv(
I,
true);
2945 case Instruction::UDiv:
2946 return SelectDiv(
I,
false);
2947 case Instruction::SRem:
2948 return SelectRem(
I,
true);
2949 case Instruction::URem:
2950 return SelectRem(
I,
false);
2951 case Instruction::Call:
2953 return SelectIntrinsicCall(*
II);
2954 return SelectCall(
I);
2955 case Instruction::Select:
2956 return SelectSelect(
I);
2957 case Instruction::Ret:
2958 return SelectRet(
I);
2959 case Instruction::Trunc:
2960 return SelectTrunc(
I);
2961 case Instruction::ZExt:
2962 case Instruction::SExt:
2963 return SelectIntExt(
I);
2964 case Instruction::Shl:
2966 case Instruction::LShr:
2968 case Instruction::AShr:
2985 { { ARM::SXTH, ARM::t2SXTH }, 0, 0, MVT::i16 },
2986 { { ARM::UXTH, ARM::t2UXTH }, 0, 1, MVT::i16 },
2987 { { ARM::ANDri, ARM::t2ANDri }, 255, 1, MVT::i8 },
2988 { { ARM::SXTB, ARM::t2SXTB }, 0, 0, MVT::i8 },
2989 { { ARM::UXTB, ARM::t2UXTB }, 0, 1, MVT::i8 }
2996bool ARMFastISel::tryToFoldLoadIntoMI(MachineInstr *
MI,
unsigned OpNo,
2997 const LoadInst *LI) {
3000 if (!isLoadTypeLegal(LI->
getType(), VT))
3007 if (
MI->getNumOperands() < 3 || !
MI->getOperand(2).isImm())
3009 const uint64_t
Imm =
MI->getOperand(2).getImm();
3014 if (FLE.Opc[isThumb2] ==
MI->getOpcode() &&
3015 (uint64_t)FLE.ExpectedImm ==
Imm &&
3018 isZExt = FLE.isZExt;
3021 if (!Found)
return false;
3025 if (!ARMComputeAddress(LI->
getOperand(0), Addr))
return false;
3027 Register ResultReg =
MI->getOperand(0).getReg();
3028 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->
getAlign(), isZExt,
false))
3031 removeDeadCode(
I, std::next(
I));
3035Register ARMFastISel::ARMLowerPICELF(
const GlobalValue *GV, MVT VT) {
3037 LLVMContext *
Context = &MF->getFunction().getContext();
3039 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
3046 MF->getDataLayout().getPrefTypeAlign(PointerType::get(*
Context, 0));
3047 unsigned Idx = MF->getConstantPool()->getConstantPoolIndex(CPV, ConstAlign);
3048 MachineMemOperand *CPMMO =
3052 Register TempReg = MF->getRegInfo().createVirtualRegister(&ARM::rGPRRegClass);
3053 unsigned Opc = isThumb2 ? ARM::t2LDRpci : ARM::LDRcp;
3054 MachineInstrBuilder MIB =
3055 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(
Opc), TempReg)
3058 if (
Opc == ARM::LDRcp)
3064 Opc = Subtarget->isThumb() ? ARM::tPICADD : UseGOT_PREL ? ARM::PICLDR
3067 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(
Opc), DestReg)
3069 .
addImm(ARMPCLabelIndex);
3071 if (!Subtarget->isThumb())
3074 if (UseGOT_PREL && Subtarget->isThumb()) {
3076 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3077 TII.get(ARM::t2LDRi12), NewDestReg)
3080 DestReg = NewDestReg;
3081 AddOptionalDefs(MIB);
3086bool ARMFastISel::fastLowerArguments() {
3087 if (!FuncInfo.CanLowerReturn)
3094 CallingConv::ID CC =
F->getCallingConv();
3098 case CallingConv::Fast:
3099 case CallingConv::C:
3100 case CallingConv::ARM_AAPCS_VFP:
3101 case CallingConv::ARM_AAPCS:
3102 case CallingConv::ARM_APCS:
3103 case CallingConv::Swift:
3104 case CallingConv::SwiftTail:
3110 for (
const Argument &Arg :
F->args()) {
3111 if (Arg.getArgNo() >= 4)
3114 if (Arg.hasAttribute(Attribute::InReg) ||
3115 Arg.hasAttribute(Attribute::StructRet) ||
3116 Arg.hasAttribute(Attribute::SwiftSelf) ||
3117 Arg.hasAttribute(Attribute::SwiftError) ||
3118 Arg.hasAttribute(Attribute::ByVal))
3121 Type *ArgTy = Arg.getType();
3126 if (!ArgVT.
isSimple())
return false;
3138 ARM::R0, ARM::R1, ARM::R2, ARM::R3
3141 const TargetRegisterClass *RC = &ARM::rGPRRegClass;
3142 for (
const Argument &Arg :
F->args()) {
3143 unsigned ArgNo = Arg.getArgNo();
3145 Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
3149 Register ResultReg = createResultReg(RC);
3150 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3151 TII.get(TargetOpcode::COPY),
3153 updateValueMap(&Arg, ResultReg);
3165 return new ARMFastISel(funcInfo, libInfo, libcallLowering);
static const MCPhysReg GPRArgRegs[]
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred)
static const struct FoldableLoadExtendsStruct FoldableLoadExtends[]
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
static bool isSigned(unsigned Opcode)
This file defines the FastISel class.
static Register UseReg(const MachineOperand &MO)
const HexagonInstrInfo * TII
static MaybeAlign getAlign(Value *Ptr)
Module.h This file contains the declarations for the Module class.
Machine Check Debug Module
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const GCNTargetMachine & getTM(const GCNSubtarget *STI)
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
static const unsigned FramePtr
uint64_t getZExtValue() const
Get zero extended value.
int64_t getSExtValue() const
Get sign extended value.
Register getFrameRegister(const MachineFunction &MF) const override
static ARMConstantPoolConstant * Create(const Constant *C, unsigned ID)
bool isThumb2Function() const
unsigned createPICLabelUId()
bool useFastISel() const
True if fast-isel is used.
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize=false) const override
isFPImmLegal - Returns true if the target can instruction select the specified FP immediate natively.
bool supportSplitCSR(MachineFunction *MF) const override
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const override
getRegClassFor - Return the register class that should be used for the specified value type.
bool hasStandaloneRem(EVT VT) const override
Return true if the target can handle a standalone remainder operation.
PointerType * getType() const
Overload to return most specific pointer type.
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
unsigned getValNo() const
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Value * getCalledOperand() const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
FunctionType * getFunctionType() const
unsigned arg_size() const
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
Value * getCondition() const
BasicBlock * getSuccessor(unsigned i) const
const APFloat & getValueAPF() const
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
PointerType * getType() const
Global values are always pointers.
@ ExternalLinkage
Externally visible function.
Tracks which library functions to use for a particular subtarget.
Align getAlign() const
Return the alignment of the access that is being performed.
ArrayRef< MCOperandInfo > operands() const
bool isVector() const
Return true if this is a vector value type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
MachineInstrBundleIterator< MachineInstr > iterator
void setFrameAddressIsTaken(bool T)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
Value * getLength() const
Value * getRawDest() const
MaybeAlign getDestAlign() const
unsigned getDestAddressSpace() const
Value * getRawSource() const
Return the arguments to the instruction.
unsigned getSourceAddressSpace() const
MaybeAlign getSourceAlign() const
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr unsigned id() const
void reserve(size_type N)
void push_back(const T &Elt)
constexpr const char * data() const
Get a pointer to the start of the string (which may not be null terminated).
TypeSize getElementOffset(unsigned Idx) const
Provides information about what library functions are available for the current target.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isPositionIndependent() const
FloatABI::ABIType FloatABIType
FloatABIType - This setting is set by -float-abi=xxx option is specfied on the command line.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isArrayTy() const
True if this is an instance of ArrayType.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
bool isStructTy() const
True if this is an instance of StructType.
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isVoidTy() const
Return true if this is 'void'.
const Use * const_op_iterator
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
StructType * getStructTypeOrNull() const
TypeSize getSequentialElementStride(const DataLayout &DL) const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ GOT_PREL
Thread Local Storage (General Dynamic Mode)
@ MO_NONLAZY
MO_NONLAZY - This is an independent flag, on a symbol operand "FOO" it represents a symbol which,...
int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm)
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo, const LibcallLoweringInfo *libcallLowering)
@ C
The default llvm calling convention, compatible with C.
@ ADD
Simple integer binary arithmetic operators.
@ FADD
Simple binary floating point operators.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
@ User
could "use" a pointer
NodeAddr< InstrNode * > Instr
This is an optimization pass for GlobalISel generic memory operations.
bool RetFastCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
FunctionAddr VTableAddr Value
LLVM_ABI Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
LLVM_ABI void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr RegState getKillRegState(bool B)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
LLVM_ABI void diagnoseDontCall(const CallInst &CI)
bool CC_ARM_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
bool RetCC_ARM_AAPCS_VFP(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
bool RetCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
bool RetCC_ARM_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
bool CC_ARM_APCS_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)
Get the operands corresponding to the given Pred value.
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
bool FastCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
bool CC_ARM_Win32_CFGuard_Check(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
generic_gep_type_iterator<> gep_type_iterator
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
DWARFExpression::Operation Op
static MachineOperand t1CondCodeOp(bool isDead=false)
Get the operand corresponding to the conditional code result for Thumb1.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
gep_type_iterator gep_type_begin(const User *GEP)
static MachineOperand condCodeOp(unsigned CCReg=0)
Get the operand corresponding to the conditional code result.
unsigned gettBLXrOpcode(const MachineFunction &MF)
bool CC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
unsigned getBLXOpcode(const MachineFunction &MF)
bool CC_ARM_AAPCS_VFP(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static LLVM_ABI MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
static StringRef getLibcallImplName(RTLIB::LibcallImpl CallImpl)
Get the libcall routine name for the specified libcall implementation.