282 const SDNode *
N = Op.getNode();
283 EVT VT = Op.getValueType();
286 "Expected power-of-2 shift amount");
289 if (!isa<ConstantSDNode>(
N->getOperand(1))) {
302 cast<ConstantSDNode>(
N->getOperand(1))->getZExtValue();
303 if (ShiftAmount == 16) {
308 switch (Op.getOpcode()) {
321 switch (
Op.getOpcode()) {
334 SDValue
Result = DAG.
getNode(Opc, dl, ResTys, SrcLo, SrcHi, Cnt);
340 if (!isa<ConstantSDNode>(
N->getOperand(1))) {
341 switch (
Op.getOpcode()) {
351 SDValue Amt =
N->getOperand(1);
352 EVT AmtVT = Amt.getValueType();
358 SDValue Amt =
N->getOperand(1);
359 EVT AmtVT = Amt.getValueType();
370 uint64_t ShiftAmount = cast<ConstantSDNode>(
N->getOperand(1))->getZExtValue();
371 SDValue Victim =
N->getOperand(0);
373 switch (
Op.getOpcode()) {
397 if (
Op.getOpcode() ==
ISD::SHL && 4 <= ShiftAmount && ShiftAmount < 7) {
403 }
else if (
Op.getOpcode() ==
ISD::SRL && 4 <= ShiftAmount &&
410 }
else if (
Op.getOpcode() ==
ISD::SHL && ShiftAmount == 7) {
415 }
else if (
Op.getOpcode() ==
ISD::SRL && ShiftAmount == 7) {
420 }
else if (
Op.getOpcode() ==
ISD::SRA && ShiftAmount == 6) {
425 }
else if (
Op.getOpcode() ==
ISD::SRA && ShiftAmount == 7) {
434 switch (ShiftAmount) {
453 if (4 <= ShiftAmount && ShiftAmount < 8)
454 switch (
Op.getOpcode()) {
468 else if (8 <= ShiftAmount && ShiftAmount < 12)
469 switch (
Op.getOpcode()) {
494 else if (12 <= ShiftAmount)
495 switch (
Op.getOpcode()) {
522 while (ShiftAmount--) {
523 Victim = DAG.
getNode(Opc8, dl, VT, Victim);
529SDValue AVRTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG)
const {
530 unsigned Opcode =
Op->getOpcode();
532 "Invalid opcode for Div/Rem lowering");
534 EVT VT =
Op->getValueType(0);
535 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
538 switch (VT.getSimpleVT().SimpleTy) {
542 LC = IsSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8;
545 LC = IsSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16;
548 LC = IsSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32;
552 SDValue InChain = DAG.getEntryNode();
555 TargetLowering::ArgListEntry Entry;
556 for (SDValue
const &Value :
Op->op_values()) {
558 Entry.Ty =
Value.getValueType().getTypeForEVT(*DAG.getContext());
559 Entry.IsSExt = IsSigned;
560 Entry.IsZExt = !IsSigned;
561 Args.push_back(Entry);
570 TargetLowering::CallLoweringInfo CLI(DAG);
575 .setSExtResult(IsSigned)
576 .setZExtResult(!IsSigned);
582SDValue AVRTargetLowering::LowerGlobalAddress(SDValue Op,
583 SelectionDAG &DAG)
const {
584 auto DL = DAG.getDataLayout();
586 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
587 int64_t
Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
595SDValue AVRTargetLowering::LowerBlockAddress(SDValue Op,
596 SelectionDAG &DAG)
const {
597 auto DL = DAG.getDataLayout();
598 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
626SDValue AVRTargetLowering::getAVRCmp(SDValue
LHS, SDValue
RHS,
627 SelectionDAG &DAG, SDLoc
DL)
const {
628 assert((
LHS.getSimpleValueType() ==
RHS.getSimpleValueType()) &&
629 "LHS and RHS have different types");
632 "invalid comparison type");
636 if (
LHS.getSimpleValueType() ==
MVT::i16 && isa<ConstantSDNode>(
RHS)) {
637 uint64_t Imm = cast<ConstantSDNode>(
RHS)->getZExtValue();
641 DAG.getIntPtrConstant(0,
DL));
643 DAG.getIntPtrConstant(1,
DL));
644 SDValue RHSlo = (Imm & 0xff) == 0
647 DAG.getIntPtrConstant(0,
DL));
648 SDValue RHShi = (Imm & 0xff00) == 0
651 DAG.getIntPtrConstant(1,
DL));
654 }
else if (
RHS.getSimpleValueType() ==
MVT::i16 && isa<ConstantSDNode>(
LHS)) {
658 SDValue LHSlo = (
Imm & 0xff) == 0
661 DAG.getIntPtrConstant(0,
DL));
662 SDValue LHShi = (
Imm & 0xff00) == 0
665 DAG.getIntPtrConstant(1,
DL));
667 DAG.getIntPtrConstant(0,
DL));
669 DAG.getIntPtrConstant(1,
DL));
683 SDValue &AVRcc, SelectionDAG &DAG,
686 EVT VT =
LHS.getValueType();
687 bool UseTest =
false;
699 if (
const ConstantSDNode *
C = dyn_cast<ConstantSDNode>(
RHS)) {
700 switch (
C->getSExtValue()) {
712 LHS = DAG.getConstant(0,
DL, VT);
719 RHS = DAG.getConstant(
C->getSExtValue() + 1,
DL, VT);
732 if (
const ConstantSDNode *
C = dyn_cast<ConstantSDNode>(
RHS)) {
733 switch (
C->getSExtValue()) {
738 LHS = DAG.getConstant(0,
DL, VT);
762 if (
const ConstantSDNode *
C = dyn_cast<ConstantSDNode>(
RHS)) {
763 RHS = DAG.getConstant(
C->getSExtValue() + 1,
DL, VT);
778 DAG.getIntPtrConstant(0,
DL));
780 DAG.getIntPtrConstant(1,
DL));
782 DAG.getIntPtrConstant(0,
DL));
784 DAG.getIntPtrConstant(1,
DL));
789 DAG.getIntPtrConstant(1,
DL));
792 Cmp = getAVRCmp(LHSlo, RHSlo, DAG,
DL);
797 DAG.getIntPtrConstant(0,
DL));
799 DAG.getIntPtrConstant(1,
DL));
802 DAG.getIntPtrConstant(0,
DL));
804 DAG.getIntPtrConstant(1,
DL));
806 DAG.getIntPtrConstant(0,
DL));
808 DAG.getIntPtrConstant(1,
DL));
811 DAG.getIntPtrConstant(0,
DL));
813 DAG.getIntPtrConstant(1,
DL));
816 DAG.getIntPtrConstant(0,
DL));
818 DAG.getIntPtrConstant(1,
DL));
820 DAG.getIntPtrConstant(0,
DL));
822 DAG.getIntPtrConstant(1,
DL));
827 DAG.getIntPtrConstant(1,
DL));
830 Cmp = getAVRCmp(LHS0, RHS0, DAG,
DL);
842 LHS, DAG.getIntPtrConstant(1,
DL)));
858SDValue AVRTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG)
const {
859 SDValue Chain =
Op.getOperand(0);
861 SDValue
LHS =
Op.getOperand(2);
862 SDValue
RHS =
Op.getOperand(3);
863 SDValue Dest =
Op.getOperand(4);
867 SDValue
Cmp = getAVRCmp(
LHS,
RHS,
CC, TargetCC, DAG, dl);
873SDValue AVRTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG)
const {
874 SDValue
LHS =
Op.getOperand(0);
875 SDValue
RHS =
Op.getOperand(1);
876 SDValue TrueV =
Op.getOperand(2);
877 SDValue FalseV =
Op.getOperand(3);
882 SDValue
Cmp = getAVRCmp(
LHS,
RHS,
CC, TargetCC, DAG, dl);
884 SDVTList VTs = DAG.getVTList(
Op.getValueType(),
MVT::Glue);
885 SDValue Ops[] = {TrueV, FalseV, TargetCC,
Cmp};
890SDValue AVRTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG)
const {
891 SDValue
LHS =
Op.getOperand(0);
892 SDValue
RHS =
Op.getOperand(1);
899 SDValue TrueV = DAG.getConstant(1,
DL,
Op.getValueType());
900 SDValue FalseV = DAG.getConstant(0,
DL,
Op.getValueType());
901 SDVTList VTs = DAG.getVTList(
Op.getValueType(),
MVT::Glue);
902 SDValue Ops[] = {TrueV, FalseV, TargetCC,
Cmp};
907SDValue AVRTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG)
const {
908 const MachineFunction &MF = DAG.getMachineFunction();
909 const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
910 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
911 auto DL = DAG.getDataLayout();
916 SDValue FI = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(),
getPointerTy(
DL));
918 return DAG.getStore(
Op.getOperand(0), dl, FI,
Op.getOperand(1),
919 MachinePointerInfo(SV));
923SDValue AVRTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG)
const {
925 if (
Op.getOperand(
Op.getNumOperands() - 1) == ZeroReg ||
926 Op.getOperand(
Op.getNumOperands() - 2) == ZeroReg) {
939 SmallVector<SDValue, 8> Ops;
940 SDNode *
N =
Op.getNode();
942 for (
unsigned I = 0;
I <
N->getNumOperands();
I++) {
943 SDValue Operand =
N->getOperand(
I);
944 if (Operand.getValueType() ==
MVT::Glue) {
949 Ops.push_back(Operand);
954 Ops.push_back(ZeroReg);
961 SDValue
New = DAG.getNode(
N->getOpcode(), dl,
N->getVTList(), Ops);
962 DAG.ReplaceAllUsesOfValueWith(Op, New);
963 DAG.ReplaceAllUsesOfValueWith(
Op.getValue(1),
New.getValue(1));
969 switch (Op.getOpcode()) {
977 return LowerShifts(Op, DAG);
979 return LowerGlobalAddress(Op, DAG);
981 return LowerBlockAddress(Op, DAG);
983 return LowerBR_CC(Op, DAG);
985 return LowerSELECT_CC(Op, DAG);
987 return LowerSETCC(Op, DAG);
989 return LowerVASTART(Op, DAG);
992 return LowerDivRem(Op, DAG);
994 return LowerINLINEASM(Op, DAG);
1007 switch (
N->getOpcode()) {
1010 if (
const ConstantSDNode *
C = dyn_cast<ConstantSDNode>(
N->getOperand(1))) {
1069 if (
const LoadSDNode *LD = dyn_cast<LoadSDNode>(
N)) {
1070 VT = LD->getMemoryVT();
1071 Op = LD->getBasePtr().getNode();
1077 }
else if (
const StoreSDNode *ST = dyn_cast<StoreSDNode>(
N)) {
1078 VT = ST->getMemoryVT();
1079 Op = ST->getBasePtr().getNode();
1096 int RHSC =
RHS->getSExtValue();
1100 if ((VT ==
MVT::i16 && RHSC != -2) || (VT ==
MVT::i8 && RHSC != -1)) {
1104 Base = Op->getOperand(0);
1125 if (
const LoadSDNode *LD = dyn_cast<LoadSDNode>(
N)) {
1126 VT = LD->getMemoryVT();
1129 }
else if (
const StoreSDNode *ST = dyn_cast<StoreSDNode>(
N)) {
1130 VT = ST->getMemoryVT();
1147 int RHSC =
RHS->getSExtValue();
1154 Base = Op->getOperand(0);
1173#include "AVRGenCallingConv.inc"
1178 AVR::R25, AVR::R24, AVR::R23, AVR::R22, AVR::R21, AVR::R20,
1179 AVR::R19, AVR::R18, AVR::R17, AVR::R16, AVR::R15, AVR::R14,
1180 AVR::R13, AVR::R12, AVR::R11, AVR::R10, AVR::R9, AVR::R8};
1182 AVR::R22, AVR::R21, AVR::R20};
1184 AVR::R26R25, AVR::R25R24, AVR::R24R23, AVR::R23R22, AVR::R22R21,
1185 AVR::R21R20, AVR::R20R19, AVR::R19R18, AVR::R18R17, AVR::R17R16,
1186 AVR::R16R15, AVR::R15R14, AVR::R14R13, AVR::R13R12, AVR::R12R11,
1187 AVR::R11R10, AVR::R10R9, AVR::R9R8};
1189 AVR::R24R23, AVR::R23R22,
1190 AVR::R22R21, AVR::R21R20};
1193 "8-bit and 16-bit register arrays must be of equal length");
1195 "8-bit and 16-bit register arrays must be of equal length");
1201template <
typename ArgT>
1218 unsigned NumArgs = Args.size();
1221 int RegLastIdx = -1;
1223 bool UseStack =
false;
1224 for (
unsigned i = 0; i != NumArgs;) {
1225 MVT VT = Args[i].VT;
1230 unsigned ArgIndex = Args[i].OrigArgIndex;
1233 for (; j != NumArgs; ++j) {
1234 if (Args[j].OrigArgIndex != ArgIndex)
1236 TotalBytes += Args[j].VT.getStoreSize();
1239 TotalBytes =
alignTo(TotalBytes, 2);
1241 if (TotalBytes == 0)
1244 unsigned RegIdx = RegLastIdx + TotalBytes;
1245 RegLastIdx = RegIdx;
1247 if (RegIdx >= RegList8.
size()) {
1250 for (; i != j; ++i) {
1251 MVT VT = Args[i].VT;
1267 "calling convention can only manage i8 and i16 types");
1269 assert(
Reg &&
"register not available in calling convention");
1280template <
typename ArgT>
1283 unsigned TotalBytes = 0;
1285 for (
const ArgT &
Arg : Args) {
1286 TotalBytes +=
Arg.VT.getStoreSize();
1294template <
typename ArgT>
1297 unsigned NumArgs = Args.size();
1301 assert(TotalBytes <= 4 &&
1302 "return values greater than 4 bytes cannot be lowered on AVRTiny");
1304 assert(TotalBytes <= 8 &&
1305 "return values greater than 8 bytes cannot be lowered on AVR");
1320 if (TotalBytes > 4) {
1323 TotalBytes =
alignTo(TotalBytes, 2);
1327 int RegIdx = TotalBytes - 1;
1328 for (
unsigned i = 0; i != NumArgs; ++i) {
1329 MVT VT = Args[i].VT;
1338 assert(
Reg &&
"register not available in calling convention");
1345SDValue AVRTargetLowering::LowerFormalArguments(
1347 const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl,
1348 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals)
const {
1349 MachineFunction &MF = DAG.getMachineFunction();
1350 MachineFrameInfo &MFI = MF.getFrameInfo();
1351 auto DL = DAG.getDataLayout();
1354 SmallVector<CCValAssign, 16> ArgLocs;
1355 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1360 CCInfo.AnalyzeFormalArguments(Ins, ArgCC_AVR_Vararg);
1367 for (CCValAssign &VA : ArgLocs) {
1370 if (VA.isRegLoc()) {
1371 EVT RegVT = VA.getLocVT();
1372 const TargetRegisterClass *RC;
1374 RC = &AVR::GPR8RegClass;
1376 RC = &AVR::DREGSRegClass;
1382 ArgValue = DAG.getCopyFromReg(Chain, dl,
Reg, RegVT);
1390 switch (VA.getLocInfo()) {
1396 ArgValue = DAG.getNode(
ISD::BITCAST, dl, VA.getValVT(), ArgValue);
1400 DAG.getValueType(VA.getValVT()));
1401 ArgValue = DAG.getNode(
ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1405 DAG.getValueType(VA.getValVT()));
1406 ArgValue = DAG.getNode(
ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1410 InVals.push_back(ArgValue);
1415 EVT LocVT = VA.getLocVT();
1418 int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,
1419 VA.getLocMemOffset(),
true);
1424 InVals.push_back(DAG.getLoad(LocVT, dl, Chain, FIN,
1432 unsigned StackSize = CCInfo.getNextStackOffset();
1433 AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
1435 AFI->setVarArgsFrameIndex(MFI.CreateFixedObject(2, StackSize,
true));
1445SDValue AVRTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
1446 SmallVectorImpl<SDValue> &InVals)
const {
1447 SelectionDAG &DAG = CLI.DAG;
1449 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1450 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1451 SmallVectorImpl<ISD::InputArg> &
Ins = CLI.Ins;
1452 SDValue Chain = CLI.Chain;
1453 SDValue
Callee = CLI.Callee;
1454 bool &isTailCall = CLI.IsTailCall;
1456 bool isVarArg = CLI.IsVarArg;
1458 MachineFunction &MF = DAG.getMachineFunction();
1464 SmallVector<CCValAssign, 16> ArgLocs;
1465 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1472 if (
const GlobalAddressSDNode *
G = dyn_cast<GlobalAddressSDNode>(
Callee)) {
1473 const GlobalValue *GV =
G->getGlobal();
1474 if (isa<Function>(GV))
1475 F = cast<Function>(GV);
1477 DAG.getTargetGlobalAddress(GV,
DL,
getPointerTy(DAG.getDataLayout()));
1478 }
else if (
const ExternalSymbolSDNode *ES =
1479 dyn_cast<ExternalSymbolSDNode>(
Callee)) {
1480 Callee = DAG.getTargetExternalSymbol(ES->getSymbol(),
1486 CCInfo.AnalyzeCallOperands(Outs, ArgCC_AVR_Vararg);
1493 unsigned NumBytes = CCInfo.getNextStackOffset();
1495 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0,
DL);
1497 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
1501 bool HasStackArgs =
false;
1502 for (AI = 0, AE = ArgLocs.size(); AI != AE; ++AI) {
1503 CCValAssign &VA = ArgLocs[AI];
1504 EVT RegVT = VA.getLocVT();
1505 SDValue
Arg = OutVals[AI];
1508 switch (VA.getLocInfo()) {
1529 if (VA.isMemLoc()) {
1530 HasStackArgs =
true;
1536 RegsToPass.push_back(std::make_pair(VA.getLocReg(),
Arg));
1546 SmallVector<SDValue, 8> MemOpChains;
1547 for (; AI != AE; AI++) {
1548 CCValAssign &VA = ArgLocs[AI];
1549 SDValue
Arg = OutVals[AI];
1554 SDValue PtrOff = DAG.getNode(
1556 DAG.getRegister(AVR::SP,
getPointerTy(DAG.getDataLayout())),
1557 DAG.getIntPtrConstant(VA.getLocMemOffset() + 1,
DL));
1559 MemOpChains.push_back(
1560 DAG.getStore(Chain,
DL,
Arg, PtrOff,
1564 if (!MemOpChains.empty())
1572 for (
auto Reg : RegsToPass) {
1573 Chain = DAG.getCopyToReg(Chain,
DL,
Reg.first,
Reg.second, InFlag);
1574 InFlag = Chain.getValue(1);
1579 SmallVector<SDValue, 8> Ops;
1580 Ops.push_back(Chain);
1585 for (
auto Reg : RegsToPass) {
1586 Ops.push_back(DAG.getRegister(
Reg.first,
Reg.second.getValueType()));
1596 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv);
1597 assert(Mask &&
"Missing call preserved mask for calling convention");
1598 Ops.push_back(DAG.getRegisterMask(Mask));
1600 if (InFlag.getNode()) {
1601 Ops.push_back(InFlag);
1605 InFlag = Chain.getValue(1);
1608 Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, InFlag,
DL);
1611 InFlag = Chain.getValue(1);
1616 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins,
DL, DAG,
1623SDValue AVRTargetLowering::LowerCallResult(
1624 SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv,
bool isVarArg,
1625 const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl,
1626 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals)
const {
1629 SmallVector<CCValAssign, 16> RVLocs;
1630 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1635 CCInfo.AnalyzeCallResult(Ins, RetCC_AVR_BUILTIN);
1641 for (CCValAssign
const &RVLoc : RVLocs) {
1642 Chain = DAG.getCopyFromReg(Chain, dl, RVLoc.getLocReg(), RVLoc.getValVT(),
1645 InFlag = Chain.getValue(2);
1646 InVals.push_back(Chain.getValue(0));
1656bool AVRTargetLowering::CanLowerReturn(
1658 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context)
const {
1660 SmallVector<CCValAssign, 16> RVLocs;
1661 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
1662 return CCInfo.CheckReturn(Outs, RetCC_AVR_BUILTIN);
1670AVRTargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv,
1672 const SmallVectorImpl<ISD::OutputArg> &Outs,
1673 const SmallVectorImpl<SDValue> &OutVals,
1674 const SDLoc &dl, SelectionDAG &DAG)
const {
1676 SmallVector<CCValAssign, 16> RVLocs;
1679 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1682 MachineFunction &MF = DAG.getMachineFunction();
1686 CCInfo.AnalyzeReturn(Outs, RetCC_AVR_BUILTIN);
1692 SmallVector<SDValue, 4> RetOps(1, Chain);
1694 for (
unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1695 CCValAssign &VA = RVLocs[i];
1696 assert(VA.isRegLoc() &&
"Can only return in registers!");
1698 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
1701 Flag = Chain.getValue(1);
1702 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1707 if (MF.getFunction().getAttributes().hasFnAttr(Attribute::Naked)) {
1711 const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
1713 if (!AFI->isInterruptOrSignalHandler()) {
1726 if (
Flag.getNode()) {
1727 RetOps.push_back(Flag);
1730 return DAG.getNode(RetOpc, dl,
MVT::Other, RetOps);
1737MachineBasicBlock *AVRTargetLowering::insertShift(MachineInstr &
MI,
1738 MachineBasicBlock *BB)
const {
1740 const TargetRegisterClass *RC;
1741 bool HasRepeatedOperand =
false;
1742 bool HasZeroOperand =
false;
1743 MachineFunction *
F = BB->getParent();
1744 MachineRegisterInfo &RI =
F->getRegInfo();
1746 DebugLoc dl =
MI.getDebugLoc();
1748 switch (
MI.getOpcode()) {
1753 RC = &AVR::GPR8RegClass;
1754 HasRepeatedOperand =
true;
1758 RC = &AVR::DREGSRegClass;
1762 RC = &AVR::GPR8RegClass;
1766 RC = &AVR::DREGSRegClass;
1770 RC = &AVR::GPR8RegClass;
1774 RC = &AVR::DREGSRegClass;
1778 RC = &AVR::GPR8RegClass;
1779 HasZeroOperand =
true;
1783 RC = &AVR::DREGSRegClass;
1787 RC = &AVR::GPR8RegClass;
1791 RC = &AVR::DREGSRegClass;
1795 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1798 for (
I = BB->getIterator();
I !=
F->end() && &(*
I) != BB; ++
I)
1804 MachineBasicBlock *LoopBB =
F->CreateMachineBasicBlock(LLVM_BB);
1805 MachineBasicBlock *CheckBB =
F->CreateMachineBasicBlock(LLVM_BB);
1806 MachineBasicBlock *RemBB =
F->CreateMachineBasicBlock(LLVM_BB);
1808 F->insert(
I, LoopBB);
1809 F->insert(
I, CheckBB);
1810 F->insert(
I, RemBB);
1816 RemBB->transferSuccessorsAndUpdatePHIs(BB);
1819 BB->addSuccessor(CheckBB);
1820 LoopBB->addSuccessor(CheckBB);
1821 CheckBB->addSuccessor(LoopBB);
1822 CheckBB->addSuccessor(RemBB);
1824 Register ShiftAmtReg = RI.createVirtualRegister(&AVR::GPR8RegClass);
1825 Register ShiftAmtReg2 = RI.createVirtualRegister(&AVR::GPR8RegClass);
1826 Register ShiftReg = RI.createVirtualRegister(RC);
1827 Register ShiftReg2 = RI.createVirtualRegister(RC);
1828 Register ShiftAmtSrcReg =
MI.getOperand(2).getReg();
1838 auto ShiftMI =
BuildMI(LoopBB, dl,
TII.get(Opc), ShiftReg2).
addReg(ShiftReg);
1839 if (HasRepeatedOperand)
1840 ShiftMI.
addReg(ShiftReg);
1850 BuildMI(CheckBB, dl,
TII.get(AVR::PHI), ShiftReg)
1855 BuildMI(CheckBB, dl,
TII.get(AVR::PHI), ShiftAmtReg)
1860 BuildMI(CheckBB, dl,
TII.get(AVR::PHI), DstReg)
1866 BuildMI(CheckBB, dl,
TII.get(AVR::DECRd), ShiftAmtReg2).
addReg(ShiftAmtReg);
1869 MI.eraseFromParent();
1892 const bool ShiftLeft = Opc ==
ISD::SHL;
1893 const bool ArithmeticShift = Opc ==
ISD::SRA;
1896 Register ZeroReg =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
1904 if (ShiftLeft && (ShiftAmt % 8) >= 6) {
1909 size_t ShiftRegsOffset = ShiftAmt / 8;
1910 size_t ShiftRegsSize = Regs.size() - ShiftRegsOffset;
1912 Regs.
slice(ShiftRegsOffset, ShiftRegsSize);
1920 Register LowByte =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
1924 if (ShiftAmt % 8 == 6) {
1926 Register NewLowByte =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
1928 LowByte = NewLowByte;
1932 for (
size_t I = 0;
I < Regs.size();
I++) {
1933 int ShiftRegsIdx =
I + 1;
1934 if (ShiftRegsIdx < (
int)ShiftRegs.
size()) {
1935 Regs[
I] = ShiftRegs[ShiftRegsIdx];
1936 }
else if (ShiftRegsIdx == (
int)ShiftRegs.
size()) {
1937 Regs[
I] = std::pair(LowByte, 0);
1939 Regs[
I] = std::pair(ZeroReg, 0);
1947 if (!ShiftLeft && (ShiftAmt % 8) >= 6) {
1950 size_t ShiftRegsSize = Regs.size() - (ShiftAmt / 8);
1952 Regs.
slice(0, ShiftRegsSize);
1961 Register HighByte =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
1963 if (ArithmeticShift) {
1981 if (ShiftAmt % 8 == 6) {
1984 Register NewExt =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
1992 for (
int I = Regs.size() - 1;
I >= 0;
I--) {
1993 int ShiftRegsIdx =
I - (Regs.size() - ShiftRegs.
size()) - 1;
1994 if (ShiftRegsIdx >= 0) {
1995 Regs[
I] = ShiftRegs[ShiftRegsIdx];
1996 }
else if (ShiftRegsIdx == -1) {
1997 Regs[
I] = std::pair(HighByte, 0);
1999 Regs[
I] = std::pair(ExtByte, 0);
2008 while (ShiftLeft && ShiftAmt >= 8) {
2010 for (
size_t I = 0;
I < Regs.size() - 1;
I++) {
2011 Regs[
I] = Regs[
I + 1];
2015 Regs[Regs.size() - 1] = std::pair(ZeroReg, 0);
2018 Regs = Regs.drop_back(1);
2025 if (!ShiftLeft && ShiftAmt >= 8) {
2026 if (ArithmeticShift) {
2028 ShrExtendReg =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2029 Register Tmp =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2031 .
addReg(Regs[0].first, 0, Regs[0].second)
2032 .
addReg(Regs[0].first, 0, Regs[0].second);
2033 BuildMI(*BB,
MI, dl,
TII.get(AVR::SBCRdRr), ShrExtendReg)
2037 ShrExtendReg = ZeroReg;
2039 for (; ShiftAmt >= 8; ShiftAmt -= 8) {
2041 for (
size_t I = Regs.size() - 1;
I != 0;
I--) {
2042 Regs[
I] = Regs[
I - 1];
2046 Regs[0] = std::pair(ShrExtendReg, 0);
2049 Regs = Regs.drop_front(1);
2054 assert((ShiftAmt < 8) &&
"Unexpect shift amount");
2074 if (!ArithmeticShift && ShiftAmt >= 4) {
2076 for (
size_t I = 0;
I < Regs.size();
I++) {
2077 size_t Idx = ShiftLeft ?
I : Regs.size() -
I - 1;
2078 Register SwapReg =
MRI.createVirtualRegister(&AVR::LD8RegClass);
2082 Register R =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2088 Register AndReg =
MRI.createVirtualRegister(&AVR::LD8RegClass);
2091 .
addImm(ShiftLeft ? 0xf0 : 0x0f);
2093 Register R =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2097 size_t PrevIdx = ShiftLeft ?
Idx - 1 :
Idx + 1;
2098 Regs[PrevIdx] = std::pair(R, 0);
2101 Regs[
Idx] = std::pair(AndReg, 0);
2108 while (ShiftLeft && ShiftAmt) {
2110 for (ssize_t
I = Regs.size() - 1;
I >= 0;
I--) {
2111 Register Out =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2114 if (
I == (ssize_t)Regs.size() - 1) {
2117 .
addReg(In, 0, InSubreg);
2121 .
addReg(In, 0, InSubreg);
2123 Regs[
I] = std::pair(Out, 0);
2127 while (!ShiftLeft && ShiftAmt) {
2129 for (
size_t I = 0;
I < Regs.size();
I++) {
2130 Register Out =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2134 unsigned Opc = ArithmeticShift ? AVR::ASRRd : AVR::LSRRd;
2139 Regs[
I] = std::pair(Out, 0);
2144 if (ShiftAmt != 0) {
2151AVRTargetLowering::insertWideShift(MachineInstr &
MI,
2152 MachineBasicBlock *BB)
const {
2154 const DebugLoc &dl =
MI.getDebugLoc();
2158 int64_t ShiftAmt =
MI.getOperand(4).getImm();
2160 switch (
MI.getOpcode()) {
2173 std::array<std::pair<Register, int>, 4>
Registers = {
2174 std::pair(
MI.getOperand(3).getReg(), AVR::sub_hi),
2175 std::pair(
MI.getOperand(3).getReg(), AVR::sub_lo),
2176 std::pair(
MI.getOperand(2).getReg(), AVR::sub_hi),
2177 std::pair(
MI.getOperand(2).getReg(), AVR::sub_lo),
2195 (Opc !=
ISD::SRA || (ShiftAmt < 16 || ShiftAmt >= 22))) {
2197 BuildMI(*BB,
MI, dl,
TII.get(AVR::REG_SEQUENCE),
MI.getOperand(0).getReg())
2202 BuildMI(*BB,
MI, dl,
TII.get(AVR::REG_SEQUENCE),
MI.getOperand(1).getReg())
2209 BuildMI(*BB,
MI, dl,
TII.get(AVR::REG_SEQUENCE),
MI.getOperand(1).getReg())
2214 BuildMI(*BB,
MI, dl,
TII.get(AVR::REG_SEQUENCE),
MI.getOperand(0).getReg())
2222 MI.eraseFromParent();
2227 if (
I->getOpcode() == AVR::COPY) {
2228 Register SrcReg =
I->getOperand(1).getReg();
2229 return (SrcReg == AVR::R0 || SrcReg == AVR::R1);
2238MachineBasicBlock *AVRTargetLowering::insertMul(MachineInstr &
MI,
2239 MachineBasicBlock *BB)
const {
2247 BuildMI(*BB,
I,
MI.getDebugLoc(),
TII.get(AVR::EORRdRr), AVR::R1)
2255AVRTargetLowering::insertCopyZero(MachineInstr &
MI,
2256 MachineBasicBlock *BB)
const {
2260 .
add(
MI.getOperand(0))
2262 MI.eraseFromParent();
2268MachineBasicBlock *AVRTargetLowering::insertAtomicArithmeticOp(
2269 MachineInstr &
MI, MachineBasicBlock *BB,
unsigned Opcode,
int Width)
const {
2270 MachineRegisterInfo &
MRI = BB->getParent()->getRegInfo();
2273 DebugLoc dl =
MI.getDebugLoc();
2284 const TargetRegisterClass *RC =
2285 (
Width == 8) ? &AVR::GPR8RegClass : &AVR::DREGSRegClass;
2286 unsigned LoadOpcode = (
Width == 8) ? AVR::LDRdPtr : AVR::LDWRdPtr;
2287 unsigned StoreOpcode = (
Width == 8) ? AVR::STPtrRr : AVR::STWPtrRr;
2295 BuildMI(*BB,
I, dl,
TII.get(LoadOpcode),
MI.getOperand(0).getReg())
2296 .
add(
MI.getOperand(1));
2302 .
add(
MI.getOperand(2));
2306 .
add(
MI.getOperand(1))
2315 MI.eraseFromParent();
2322 int Opc =
MI.getOpcode();
2337 return insertShift(
MI,
MBB);
2341 return insertWideShift(
MI,
MBB);
2344 return insertMul(
MI,
MBB);
2346 return insertCopyZero(
MI,
MBB);
2347 case AVR::AtomicLoadAdd8:
2348 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ADDRdRr, 8);
2349 case AVR::AtomicLoadAdd16:
2350 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ADDWRdRr, 16);
2351 case AVR::AtomicLoadSub8:
2352 return insertAtomicArithmeticOp(
MI,
MBB, AVR::SUBRdRr, 8);
2353 case AVR::AtomicLoadSub16:
2354 return insertAtomicArithmeticOp(
MI,
MBB, AVR::SUBWRdRr, 16);
2355 case AVR::AtomicLoadAnd8:
2356 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ANDRdRr, 8);
2357 case AVR::AtomicLoadAnd16:
2358 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ANDWRdRr, 16);
2359 case AVR::AtomicLoadOr8:
2360 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ORRdRr, 8);
2361 case AVR::AtomicLoadOr16:
2362 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ORWRdRr, 16);
2363 case AVR::AtomicLoadXor8:
2364 return insertAtomicArithmeticOp(
MI,
MBB, AVR::EORRdRr, 8);
2365 case AVR::AtomicLoadXor16:
2366 return insertAtomicArithmeticOp(
MI,
MBB, AVR::EORWRdRr, 16);
2369 assert((Opc == AVR::Select16 || Opc == AVR::Select8) &&
2370 "Unexpected instr type to insert");
2391 if (FallThrough !=
nullptr) {
2416 MBB->addSuccessor(falseMBB);
2417 MBB->addSuccessor(trueMBB);
2425 MI.getOperand(0).getReg())
2431 MI.eraseFromParent();
2441 if (Constraint.
size() == 1) {
2443 switch (Constraint[0]) {
2486 switch (ConstraintCode[0]) {
2497 Value *CallOperandVal =
info.CallOperandVal;
2502 if (!CallOperandVal) {
2507 switch (*constraint) {
2531 if (
const ConstantFP *
C = dyn_cast<ConstantFP>(CallOperandVal)) {
2538 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2539 if (isUInt<6>(
C->getZExtValue())) {
2545 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2546 if ((
C->getSExtValue() >= -63) && (
C->getSExtValue() <= 0)) {
2552 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2553 if (
C->getZExtValue() == 2) {
2559 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2560 if (
C->getZExtValue() == 0) {
2566 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2567 if (isUInt<8>(
C->getZExtValue())) {
2573 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2574 if (
C->getSExtValue() == -1) {
2580 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2581 if ((
C->getZExtValue() == 8) || (
C->getZExtValue() == 16) ||
2582 (
C->getZExtValue() == 24)) {
2588 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2589 if (
C->getZExtValue() == 1) {
2595 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2596 if ((
C->getSExtValue() >= -6) && (
C->getSExtValue() <= 5)) {
2609std::pair<unsigned, const TargetRegisterClass *>
2613 if (Constraint.
size() == 1) {
2614 switch (Constraint[0]) {
2617 return std::make_pair(0U, &AVR::LD8loRegClass);
2619 return std::make_pair(0U, &AVR::DREGSLD8loRegClass);
2623 return std::make_pair(0U, &AVR::PTRDISPREGSRegClass);
2627 return std::make_pair(0U, &AVR::LD8RegClass);
2629 return std::make_pair(0U, &AVR::DLDREGSRegClass);
2633 return std::make_pair(0U, &AVR::GPR8loRegClass);
2635 return std::make_pair(0U, &AVR::DREGSloRegClass);
2639 return std::make_pair(0U, &AVR::PTRREGSRegClass);
2642 return std::make_pair(0U, &AVR::GPRSPRegClass);
2645 return std::make_pair(0U, &AVR::GPR8RegClass);
2647 return std::make_pair(0U, &AVR::DREGSRegClass);
2652 &AVR::GPR8RegClass);
2656 return std::make_pair(0U, &AVR::IWREGSRegClass);
2661 return std::make_pair(
unsigned(AVR::R27R26), &AVR::PTRREGSRegClass);
2666 return std::make_pair(
unsigned(AVR::R29R28), &AVR::PTRREGSRegClass);
2671 return std::make_pair(
unsigned(AVR::R31R30), &AVR::PTRREGSRegClass);
2683 std::string &Constraint,
2684 std::vector<SDValue> &Ops,
2688 EVT Ty = Op.getValueType();
2691 if (Constraint.length() != 1) {
2695 char ConstraintLetter = Constraint[0];
2696 switch (ConstraintLetter) {
2714 int64_t CVal64 =
C->getSExtValue();
2716 switch (ConstraintLetter) {
2718 if (!isUInt<6>(CUVal64))
2723 if (CVal64 < -63 || CVal64 > 0)
2738 if (!isUInt<8>(CUVal64))
2754 if (CUVal64 != 8 && CUVal64 != 16 && CUVal64 != 24)
2764 if (CVal64 < -6 || CVal64 > 5)
2774 if (!FC || !FC->isZero())
2781 if (Result.getNode()) {
2782 Ops.push_back(Result);
2795 .
Case(
"r0", AVR::R0)
2796 .
Case(
"r1", AVR::R1)
2800 .
Case(
"r0", AVR::R1R0)
2801 .
Case(
"sp", AVR::SP)
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu Simplify well known AMD library false FunctionCallee Callee
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Function Alias Analysis Results
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
const HexagonInstrInfo * TII
unsigned const TargetRegisterInfo * TRI
typename CallsiteContextGraph< DerivedCCG, FuncTy, CallTy >::CallInfo CallInfo
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI Pre allocate WWM Registers
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
Utilities related to the AVR instruction set.
A specific AVR target MCU.
Register getTmpRegister() const
bool hasTinyEncoding() const
Register getZeroRegister() const
const AVRInstrInfo * getInstrInfo() const override
bool supportsMultiplication() const
const AVRRegisterInfo * getRegisterInfo() const override
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
Replace a node with an illegal result type with a new node built out of custom code.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
Returns true by value, base pointer and offset pointer and addressing mode by reference if the node's...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
const AVRSubtarget & Subtarget
void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
AVRTargetLowering(const AVRTargetMachine &TM, const AVRSubtarget &STI)
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
Returns true by value, base pointer and offset pointer and addressing mode by reference if this node ...
A generic AVR implementation.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
LLVM Basic Block Representation.
CCState - This class holds information needed while lowering arguments and return values.
unsigned AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
LLVMContext & getContext() const
void addLoc(const CCValAssign &V)
static CCValAssign getReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getMem(unsigned ValNo, MVT ValVT, unsigned Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
A parsed version of the target data layout string in and methods for querying it.
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
static unsigned getFlagWord(unsigned Kind, unsigned NumOps)
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
This is an important class for using LLVM in a threaded context.
This class is used to represent ISD::LOAD nodes.
static auto integer_valuetypes()
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
BasicBlockListType::iterator iterator
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
MutableArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDValue getValue(unsigned R) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC)
Set the CallingConv that should be used for the specified libcall.
void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
void setSupportsUnalignedAtomics(bool UnalignedSupported)
Sets whether unaligned atomic operations are supported.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
std::vector< ArgListEntry > ArgListTy
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM Value Representation.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
CondCodes
AVR specific condition codes.
@ COND_SH
Unsigned same or higher.
@ COND_GE
Greater than or equal.
@ ASRWN
Word arithmetic shift right N bits.
@ SWAP
Swap Rd[7:4] <-> Rd[3:0].
@ LSLW
Wide logical shift left.
@ ROLLOOP
A loop of single left bit rotate instructions.
@ ASRLO
Lower 8-bit of word arithmetic shift right.
@ RETI_FLAG
Return from ISR.
@ ASRLOOP
A loop of single arithmetic shift right instructions.
@ LSRLOOP
A loop of single logical shift right instructions.
@ LSR
Logical shift right.
@ LSRLO
Lower 8-bit of word logical shift right.
@ TST
Test for zero or minus instruction.
@ LSRBN
Byte logical shift right N bits.
@ ASRW
Wide arithmetic shift right.
@ SELECT_CC
Operand 0 and operand 1 are selection variable, operand 2 is condition code and operand 3 is flag ope...
@ CMPC
Compare with carry instruction.
@ LSLWN
Word logical shift left N bits.
@ RORLOOP
A loop of single right bit rotate instructions.
@ CMP
Compare instruction.
@ ASRBN
Byte arithmetic shift right N bits.
@ CALL
Represents an abstract call instruction, which includes a bunch of information.
@ ASR
Arithmetic shift right.
@ RET_FLAG
Return from subroutine.
@ LSRW
Wide logical shift right.
@ LSLBN
Byte logical shift left N bits.
@ LSLHI
Higher 8-bit of word logical shift left.
@ LSRWN
Word logical shift right N bits.
@ WRAPPER
A wrapper node for TargetConstantPool, TargetExternalSymbol, and TargetGlobalAddress.
@ LSLLOOP
A loop of single logical shift left instructions.
@ BRCOND
AVR conditional branches.
bool isProgramMemoryAccess(MemSDNode const *N)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AVR_BUILTIN
Used for special AVR rtlib functions which have an "optimized" convention to preserve registers.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ ADD
Simple integer binary arithmetic operators.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ SIGN_EXTEND
Conversion operators.
@ BR_CC
BR_CC - Conditional branch.
@ BR_JT
BR_JT - Jumptable branch.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ BasicBlock
Various leaf nodes.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ INLINEASM
INLINEASM - Represents an inline asm block.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
@ Undef
Value of the register doesn't matter.
Type
MessagePack types as defined in the standard, with the exception of Integer being divided into a sign...
This is an optimization pass for GlobalISel generic memory operations.
static void analyzeReturnValues(const SmallVectorImpl< ArgT > &Args, CCState &CCInfo, bool Tiny)
Analyze incoming and outgoing value of returning from a function.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static const MCPhysReg RegList16Tiny[]
static const MCPhysReg RegList8Tiny[]
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
static void analyzeArguments(TargetLowering::CallLoweringInfo *CLI, const Function *F, const DataLayout *TD, const SmallVectorImpl< ArgT > &Args, SmallVectorImpl< CCValAssign > &ArgLocs, CCState &CCInfo, bool Tiny)
Analyze incoming and outgoing function arguments.
static const MCPhysReg RegList16AVR[]
static unsigned getTotalArgumentsSizeInBytes(const SmallVectorImpl< ArgT > &Args)
Count the total number of bytes needed to pass or return these arguments.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
static AVRCC::CondCodes intCCToAVRCC(ISD::CondCode CC)
IntCCToAVRCC - Convert a DAG integer condition code to an AVR CC.
static bool isCopyMulResult(MachineBasicBlock::iterator const &I)
static void insertMultibyteShift(MachineInstr &MI, MachineBasicBlock *BB, MutableArrayRef< std::pair< Register, int > > Regs, ISD::NodeType Opc, int64_t ShiftAmt)
static const MCPhysReg RegList8AVR[]
Registers for calling conventions, ordered in reverse as required by ABI.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool isVector() const
Return true if this is a vector value type.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.