14#ifndef LLVM_CODEGEN_GLOBALISEL_LEGALIZATIONARTIFACTCOMBINER_H
15#define LLVM_CODEGEN_GLOBALISEL_LEGALIZATIONARTIFACTCOMBINER_H
31#define DEBUG_TYPE "legalizer"
39 static bool isArtifactCast(
unsigned Opc) {
41 case TargetOpcode::G_TRUNC:
42 case TargetOpcode::G_SEXT:
43 case TargetOpcode::G_ZEXT:
44 case TargetOpcode::G_ANYEXT:
61 assert(
MI.getOpcode() == TargetOpcode::G_ANYEXT);
65 Register SrcReg = lookThroughCopyInstrs(
MI.getOperand(1).getReg());
69 if (mi_match(SrcReg,
MRI, m_GTrunc(m_Reg(TruncSrc)))) {
71 if (
MRI.getType(DstReg) ==
MRI.getType(TruncSrc))
75 Builder.buildAnyExtOrTrunc(DstReg, TruncSrc);
77 markInstAndDefDead(
MI, *
MRI.getVRegDef(SrcReg), DeadInsts);
84 if (mi_match(SrcReg,
MRI,
85 m_all_of(m_MInstr(ExtMI), m_any_of(m_GAnyExt(m_Reg(ExtSrc)),
86 m_GSExt(m_Reg(ExtSrc)),
87 m_GZExt(m_Reg(ExtSrc)))))) {
90 markInstAndDefDead(
MI, *ExtMI, DeadInsts);
95 auto *SrcMI =
MRI.getVRegDef(SrcReg);
96 if (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT) {
97 const LLT DstTy =
MRI.getType(DstReg);
98 if (isInstLegal({TargetOpcode::G_CONSTANT, {DstTy}})) {
99 auto &CstVal = SrcMI->getOperand(1);
101 DstReg, CstVal.getCImm()->getValue().sext(DstTy.
getSizeInBits()));
103 markInstAndDefDead(
MI, *SrcMI, DeadInsts);
115 assert(
MI.getOpcode() == TargetOpcode::G_ZEXT);
119 Register SrcReg = lookThroughCopyInstrs(
MI.getOperand(1).getReg());
125 if (mi_match(SrcReg,
MRI, m_GTrunc(m_Reg(TruncSrc))) ||
126 mi_match(SrcReg,
MRI, m_GSExt(m_Reg(SextSrc)))) {
127 LLT DstTy =
MRI.getType(DstReg);
128 if (isInstUnsupported({TargetOpcode::G_AND, {DstTy}}) ||
129 isConstantUnsupported(DstTy))
132 LLT SrcTy =
MRI.getType(SrcReg);
134 auto Mask =
Builder.buildConstant(
136 if (SextSrc && (DstTy !=
MRI.getType(SextSrc)))
137 SextSrc =
Builder.buildSExtOrTrunc(DstTy, SextSrc).getReg(0);
138 if (TruncSrc && (DstTy !=
MRI.getType(TruncSrc)))
139 TruncSrc =
Builder.buildAnyExtOrTrunc(DstTy, TruncSrc).getReg(0);
140 Builder.buildAnd(DstReg, SextSrc ? SextSrc : TruncSrc, Mask);
141 markInstAndDefDead(
MI, *
MRI.getVRegDef(SrcReg), DeadInsts);
147 if (mi_match(SrcReg,
MRI, m_GZExt(m_Reg(ZextSrc)))) {
150 MI.getOperand(1).setReg(ZextSrc);
153 markDefDead(
MI, *
MRI.getVRegDef(SrcReg), DeadInsts);
158 auto *SrcMI =
MRI.getVRegDef(SrcReg);
159 if (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT) {
160 const LLT DstTy =
MRI.getType(DstReg);
161 if (isInstLegal({TargetOpcode::G_CONSTANT, {DstTy}})) {
162 auto &CstVal = SrcMI->getOperand(1);
164 DstReg, CstVal.getCImm()->getValue().zext(DstTy.
getSizeInBits()));
166 markInstAndDefDead(
MI, *SrcMI, DeadInsts);
177 assert(
MI.getOpcode() == TargetOpcode::G_SEXT);
181 Register SrcReg = lookThroughCopyInstrs(
MI.getOperand(1).getReg());
185 if (mi_match(SrcReg,
MRI, m_GTrunc(m_Reg(TruncSrc)))) {
186 LLT DstTy =
MRI.getType(DstReg);
187 if (isInstUnsupported({TargetOpcode::G_SEXT_INREG, {DstTy}}))
190 LLT SrcTy =
MRI.getType(SrcReg);
191 uint64_t SizeInBits = SrcTy.getScalarSizeInBits();
192 if (DstTy !=
MRI.getType(TruncSrc))
193 TruncSrc =
Builder.buildAnyExtOrTrunc(DstTy, TruncSrc).getReg(0);
194 Builder.buildSExtInReg(DstReg, TruncSrc, SizeInBits);
195 markInstAndDefDead(
MI, *
MRI.getVRegDef(SrcReg), DeadInsts);
203 if (mi_match(SrcReg,
MRI,
204 m_all_of(m_MInstr(ExtMI), m_any_of(m_GZExt(m_Reg(ExtSrc)),
205 m_GSExt(m_Reg(ExtSrc)))))) {
209 markInstAndDefDead(
MI, *
MRI.getVRegDef(SrcReg), DeadInsts);
214 auto *SrcMI =
MRI.getVRegDef(SrcReg);
215 if (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT) {
216 const LLT DstTy =
MRI.getType(DstReg);
217 if (isInstLegal({TargetOpcode::G_CONSTANT, {DstTy}})) {
218 auto &CstVal = SrcMI->getOperand(1);
220 DstReg, CstVal.getCImm()->getValue().sext(DstTy.
getSizeInBits()));
222 markInstAndDefDead(
MI, *SrcMI, DeadInsts);
235 assert(
MI.getOpcode() == TargetOpcode::G_TRUNC);
239 const LLT DstTy =
MRI.getType(DstReg);
240 Register SrcReg = lookThroughCopyInstrs(
MI.getOperand(1).getReg());
243 auto *SrcMI =
MRI.getVRegDef(SrcReg);
244 if (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT) {
245 if (isInstLegal({TargetOpcode::G_CONSTANT, {DstTy}})) {
246 auto &CstVal = SrcMI->getOperand(1);
248 DstReg, CstVal.getCImm()->getValue().trunc(DstTy.
getSizeInBits()));
250 markInstAndDefDead(
MI, *SrcMI, DeadInsts);
257 if (
auto *SrcMerge = dyn_cast<GMerge>(SrcMI)) {
258 const Register MergeSrcReg = SrcMerge->getSourceReg(0);
259 const LLT MergeSrcTy =
MRI.getType(MergeSrcReg);
267 if (DstSize < MergeSrcSize) {
270 if (isInstUnsupported({TargetOpcode::G_TRUNC, {DstTy, MergeSrcTy}}))
273 LLVM_DEBUG(
dbgs() <<
"Combining G_TRUNC(G_MERGE_VALUES) to G_TRUNC: "
276 Builder.buildTrunc(DstReg, MergeSrcReg);
278 }
else if (DstSize == MergeSrcSize) {
281 dbgs() <<
"Replacing G_TRUNC(G_MERGE_VALUES) with merge input: "
285 }
else if (DstSize % MergeSrcSize == 0) {
288 if (isInstUnsupported(
289 {TargetOpcode::G_MERGE_VALUES, {DstTy, MergeSrcTy}}))
293 dbgs() <<
"Combining G_TRUNC(G_MERGE_VALUES) to G_MERGE_VALUES: "
296 const unsigned NumSrcs = DstSize / MergeSrcSize;
297 assert(NumSrcs < SrcMI->getNumOperands() - 1 &&
298 "trunc(merge) should require less inputs than merge");
300 for (
unsigned i = 0; i < NumSrcs; ++i)
301 SrcRegs[i] = SrcMerge->getSourceReg(i);
303 Builder.buildMergeValues(DstReg, SrcRegs);
310 markInstAndDefDead(
MI, *SrcMerge, DeadInsts);
316 if (mi_match(SrcReg,
MRI, m_GTrunc(m_Reg(TruncSrc)))) {
322 Builder.buildTrunc(DstReg, TruncSrc);
324 markInstAndDefDead(
MI, *
MRI.getVRegDef(TruncSrc), DeadInsts);
332 LLT FoundRegTy =
MRI.getType(FoundReg);
333 if (DstTy == FoundRegTy) {
334 LLVM_DEBUG(
dbgs() <<
".. Combine G_TRUNC(G_[S,Z,ANY]EXT/G_TRUNC...): "
340 markInstAndDefDead(
MI, *
MRI.getVRegDef(SrcReg), DeadInsts);
352 unsigned Opcode =
MI.getOpcode();
353 assert(Opcode == TargetOpcode::G_ANYEXT || Opcode == TargetOpcode::G_ZEXT ||
354 Opcode == TargetOpcode::G_SEXT);
357 MI.getOperand(1).getReg(),
MRI)) {
360 LLT DstTy =
MRI.getType(DstReg);
362 if (Opcode == TargetOpcode::G_ANYEXT) {
364 if (!isInstLegal({TargetOpcode::G_IMPLICIT_DEF, {DstTy}}))
367 Builder.buildInstr(TargetOpcode::G_IMPLICIT_DEF, {DstReg}, {});
372 if (isConstantUnsupported(DstTy))
375 Builder.buildConstant(DstReg, 0);
379 markInstAndDefDead(
MI, *
DefMI, DeadInsts);
389 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
391 const unsigned CastOpc = CastMI.
getOpcode();
393 if (!isArtifactCast(CastOpc))
396 const unsigned NumDefs =
MI.getNumOperands() - 1;
399 const LLT CastSrcTy =
MRI.getType(CastSrcReg);
400 const LLT DestTy =
MRI.getType(
MI.getOperand(0).getReg());
401 const LLT SrcTy =
MRI.getType(
MI.getOperand(NumDefs).getReg());
406 if (CastOpc == TargetOpcode::G_TRUNC) {
417 unsigned UnmergeNumElts =
422 if (isInstUnsupported(
423 {TargetOpcode::G_UNMERGE_VALUES, {UnmergeTy, CastSrcTy}}))
427 auto NewUnmerge =
Builder.buildUnmerge(UnmergeTy, CastSrcReg);
429 for (
unsigned I = 0;
I != NumDefs; ++
I) {
432 Builder.buildTrunc(DefReg, NewUnmerge.getReg(
I));
435 markInstAndDefDead(
MI, CastMI, DeadInsts);
447 if (CastSrcSize % DestSize != 0)
451 if (isInstUnsupported(
452 {TargetOpcode::G_UNMERGE_VALUES, {DestTy, CastSrcTy}}))
457 const unsigned NewNumDefs = CastSrcSize / DestSize;
459 for (
unsigned Idx = 0;
Idx < NewNumDefs; ++
Idx) {
461 DstRegs[
Idx] =
MI.getOperand(
Idx).getReg();
463 DstRegs[
Idx] =
MRI.createGenericVirtualRegister(DestTy);
468 Builder.buildUnmerge(DstRegs, CastSrcReg);
470 markInstAndDefDead(
MI, CastMI, DeadInsts);
485 case TargetOpcode::G_BUILD_VECTOR:
486 case TargetOpcode::G_MERGE_VALUES:
513 case TargetOpcode::G_CONCAT_VECTORS: {
524 if (ConvertOp == TargetOpcode::G_TRUNC)
539 Builder.buildCopy(DstReg, SrcReg);
545 for (
auto &
UseMI :
MRI.use_instructions(DstReg)) {
550 MRI.replaceRegWith(DstReg, SrcReg);
553 for (
auto *
UseMI : UseMIs)
561 if (Def.getReg() == SearchDef)
592 unsigned SrcSize =
MRI.getType(Src1Reg).getSizeInBits();
595 unsigned StartSrcIdx = (StartBit / SrcSize) + 1;
597 unsigned InRegOffset = StartBit % SrcSize;
601 if (InRegOffset +
Size > SrcSize)
605 if (InRegOffset == 0 &&
Size == SrcSize) {
606 CurrentBest = SrcReg;
607 return findValueFromDefImpl(SrcReg, 0,
Size);
610 return findValueFromDefImpl(SrcReg, InRegOffset,
Size);
624 unsigned SrcSize =
MRI.getType(Src1Reg).getSizeInBits();
627 unsigned StartSrcIdx = (StartBit / SrcSize) + 1;
629 unsigned InRegOffset = StartBit % SrcSize;
631 if (InRegOffset != 0)
638 if (
Size > SrcSize) {
639 if (
Size % SrcSize > 0)
642 unsigned NumSrcsUsed =
Size / SrcSize;
647 LLT SrcTy =
MRI.getType(Src1Reg);
652 LI.
getAction({TargetOpcode::G_BUILD_VECTOR, {NewBVTy, SrcTy}});
657 for (
unsigned SrcIdx = StartSrcIdx; SrcIdx < StartSrcIdx + NumSrcsUsed;
664 return BV.
getReg(StartSrcIdx);
674 assert(
MI.getOpcode() == TargetOpcode::G_INSERT);
677 Register ContainerSrcReg =
MI.getOperand(1).getReg();
678 Register InsertedReg =
MI.getOperand(2).getReg();
679 LLT InsertedRegTy =
MRI.getType(InsertedReg);
680 unsigned InsertOffset =
MI.getOperand(3).getImm();
718 unsigned InsertedEndBit = InsertOffset + InsertedRegTy.
getSizeInBits();
719 unsigned EndBit = StartBit +
Size;
720 unsigned NewStartBit;
722 if (EndBit <= InsertOffset || InsertedEndBit <= StartBit) {
723 SrcRegToUse = ContainerSrcReg;
724 NewStartBit = StartBit;
725 return findValueFromDefImpl(SrcRegToUse, NewStartBit,
Size);
727 if (InsertOffset <= StartBit && EndBit <= InsertedEndBit) {
728 SrcRegToUse = InsertedReg;
729 NewStartBit = StartBit - InsertOffset;
730 if (NewStartBit == 0 &&
731 Size ==
MRI.getType(SrcRegToUse).getSizeInBits())
732 CurrentBest = SrcRegToUse;
733 return findValueFromDefImpl(SrcRegToUse, NewStartBit,
Size);
747 assert(
MI.getOpcode() == TargetOpcode::G_SEXT ||
748 MI.getOpcode() == TargetOpcode::G_ZEXT ||
749 MI.getOpcode() == TargetOpcode::G_ANYEXT);
753 LLT SrcType =
MRI.getType(SrcReg);
760 if (StartBit +
Size > SrcSize)
764 CurrentBest = SrcReg;
765 return findValueFromDefImpl(SrcReg, StartBit,
Size);
775 assert(
MI.getOpcode() == TargetOpcode::G_TRUNC);
779 LLT SrcType =
MRI.getType(SrcReg);
785 return findValueFromDefImpl(SrcReg, StartBit,
Size);
793 std::optional<DefinitionAndSourceRegister> DefSrcReg =
796 DefReg = DefSrcReg->Reg;
800 switch (Def->getOpcode()) {
801 case TargetOpcode::G_CONCAT_VECTORS:
802 return findValueFromConcat(cast<GConcatVectors>(*Def), StartBit,
Size);
803 case TargetOpcode::G_UNMERGE_VALUES: {
804 unsigned DefStartBit = 0;
805 unsigned DefSize =
MRI.getType(DefReg).getSizeInBits();
806 for (
const auto &MO : Def->defs()) {
807 if (MO.getReg() == DefReg)
809 DefStartBit += DefSize;
811 Register SrcReg = Def->getOperand(Def->getNumOperands() - 1).getReg();
813 findValueFromDefImpl(SrcReg, StartBit + DefStartBit,
Size);
819 if (StartBit == 0 &&
Size == DefSize)
823 case TargetOpcode::G_BUILD_VECTOR:
824 return findValueFromBuildVector(cast<GBuildVector>(*Def), StartBit,
826 case TargetOpcode::G_INSERT:
827 return findValueFromInsert(*Def, StartBit,
Size);
828 case TargetOpcode::G_TRUNC:
829 return findValueFromTrunc(*Def, StartBit,
Size);
830 case TargetOpcode::G_SEXT:
831 case TargetOpcode::G_ZEXT:
832 case TargetOpcode::G_ANYEXT:
833 return findValueFromExt(*Def, StartBit,
Size);
851 Register FoundReg = findValueFromDefImpl(DefReg, StartBit,
Size);
852 return FoundReg != DefReg ? FoundReg :
Register();
860 unsigned NumDefs =
MI.getNumDefs();
861 LLT DestTy =
MRI.getType(
MI.getReg(0));
864 for (
unsigned DefIdx = 0; DefIdx < NumDefs; ++DefIdx) {
866 if (
MRI.use_nodbg_empty(DefReg)) {
867 DeadDefs[DefIdx] =
true;
873 if (
MRI.getType(FoundVal) != DestTy)
880 MI.getOperand(DefIdx).setReg(DefReg);
882 DeadDefs[DefIdx] =
true;
884 return DeadDefs.
all();
888 unsigned &DefOperandIdx) {
890 if (
auto *Unmerge = dyn_cast<GUnmerge>(
MRI.getVRegDef(Def))) {
891 DefOperandIdx = Unmerge->findRegisterDefOperandIdx(Def);
902 GUnmerge *Unmerge,
unsigned UnmergeIdxStart,
903 unsigned NumElts,
unsigned EltSize,
905 assert(MergeStartIdx + NumElts <=
MI.getNumSources());
906 for (
unsigned i = MergeStartIdx; i < MergeStartIdx + NumElts; ++i) {
907 unsigned EltUnmergeIdx;
909 MI.getSourceReg(i), EltSize, EltUnmergeIdx);
911 if (EltUnmerge == Unmerge) {
913 if (i - MergeStartIdx != EltUnmergeIdx - UnmergeIdxStart)
915 }
else if (!AllowUndef ||
916 MRI.getVRegDef(
MI.getSourceReg(i))->getOpcode() !=
917 TargetOpcode::G_IMPLICIT_DEF)
928 LLT EltTy =
MRI.getType(Elt0);
931 unsigned Elt0UnmergeIdx;
937 unsigned NumMIElts =
MI.getNumSources();
939 LLT DstTy =
MRI.getType(Dst);
940 Register UnmergeSrc = Unmerge->getSourceReg();
941 LLT UnmergeSrcTy =
MRI.getType(UnmergeSrc);
950 if ((DstTy == UnmergeSrcTy) && (Elt0UnmergeIdx == 0)) {
971 (Elt0UnmergeIdx % NumMIElts == 0) &&
972 getCoverTy(UnmergeSrcTy, DstTy) == UnmergeSrcTy) {
977 auto NewUnmerge = MIB.
buildUnmerge(DstTy, Unmerge->getSourceReg());
978 unsigned DstIdx = (Elt0UnmergeIdx * EltSize) / DstTy.
getSizeInBits();
980 UpdatedDefs, Observer);
998 unsigned NumElts = Unmerge->getNumDefs();
999 for (
unsigned i = 0; i <
MI.getNumSources(); i += NumElts) {
1000 unsigned EltUnmergeIdx;
1002 EltSize, EltUnmergeIdx);
1004 if ((!UnmergeI) || (UnmergeI->getNumDefs() != NumElts) ||
1005 (EltUnmergeIdx != 0))
1010 ConcatSources.
push_back(UnmergeI->getSourceReg());
1027 unsigned NumDefs =
MI.getNumDefs();
1033 LLT OpTy =
MRI.getType(SrcReg);
1034 LLT DestTy =
MRI.getType(
MI.getReg(0));
1035 unsigned SrcDefIdx =
getDefIndex(*SrcDef, SrcReg);
1041 markInstAndDefDead(
MI, *SrcDef, DeadInsts, SrcDefIdx);
1045 if (
auto *SrcUnmerge = dyn_cast<GUnmerge>(SrcDef)) {
1051 Register SrcUnmergeSrc = SrcUnmerge->getSourceReg();
1052 LLT SrcUnmergeSrcTy =
MRI.getType(SrcUnmergeSrc);
1058 {TargetOpcode::G_UNMERGE_VALUES, {OpTy, SrcUnmergeSrcTy}});
1059 switch (ActionStep.
Action) {
1072 auto NewUnmerge =
Builder.buildUnmerge(DestTy, SrcUnmergeSrc);
1077 for (
unsigned I = 0;
I != NumDefs; ++
I) {
1083 markInstAndDefDead(
MI, *SrcUnmerge, DeadInsts, SrcDefIdx);
1088 unsigned ConvertOp = 0;
1092 if (isArtifactCast(
SrcOp)) {
1098 ConvertOp, OpTy, DestTy)) {
1106 if (NumMergeRegs < NumDefs) {
1107 if (NumDefs % NumMergeRegs != 0)
1118 const unsigned NewNumDefs = NumDefs / NumMergeRegs;
1119 for (
unsigned Idx = 0;
Idx < NumMergeRegs; ++
Idx) {
1121 for (
unsigned j = 0, DefIdx =
Idx * NewNumDefs; j < NewNumDefs;
1131 LLT MergeEltTy = MergeDstTy.
divide(NumMergeRegs);
1146 Register TmpReg =
MRI.createGenericVirtualRegister(MergeEltTy);
1147 Builder.buildInstr(ConvertOp, {TmpReg},
1149 Builder.buildUnmerge(DstRegs, TmpReg);
1156 }
else if (NumMergeRegs > NumDefs) {
1157 if (ConvertOp != 0 || NumMergeRegs % NumDefs != 0)
1168 const unsigned NumRegs = NumMergeRegs / NumDefs;
1169 for (
unsigned DefIdx = 0; DefIdx < NumDefs; ++DefIdx) {
1171 for (
unsigned j = 0,
Idx = NumRegs * DefIdx + 1; j < NumRegs;
1176 Builder.buildMergeLikeInstr(DefReg, Regs);
1183 if (!ConvertOp && DestTy != MergeSrcTy)
1184 ConvertOp = TargetOpcode::G_BITCAST;
1189 for (
unsigned Idx = 0;
Idx < NumDefs; ++
Idx) {
1193 if (!
MRI.use_empty(DefReg)) {
1194 Builder.buildInstr(ConvertOp, {DefReg}, {MergeSrc});
1199 markInstAndDefDead(
MI, *MergeI, DeadInsts);
1203 assert(DestTy == MergeSrcTy &&
1204 "Bitcast and the other kinds of conversions should "
1205 "have happened earlier");
1208 for (
unsigned Idx = 0;
Idx < NumDefs; ++
Idx) {
1216 markInstAndDefDead(
MI, *MergeI, DeadInsts);
1223 assert(
MI.getOpcode() == TargetOpcode::G_EXTRACT);
1237 Register SrcReg = lookThroughCopyInstrs(
MI.getOperand(1).getReg());
1239 if (!MergeI || !isa<GMergeLikeInstr>(MergeI))
1243 LLT DstTy =
MRI.getType(DstReg);
1244 LLT SrcTy =
MRI.getType(SrcReg);
1248 unsigned Offset =
MI.getOperand(2).getImm();
1250 unsigned MergeSrcSize = SrcTy.
getSizeInBits() / NumMergeSrcs;
1251 unsigned MergeSrcIdx =
Offset / MergeSrcSize;
1254 unsigned EndMergeSrcIdx = (
Offset + ExtractDstSize - 1) / MergeSrcSize;
1257 if (MergeSrcIdx != EndMergeSrcIdx)
1263 Offset - MergeSrcIdx * MergeSrcSize);
1265 markInstAndDefDead(
MI, *MergeI, DeadInsts);
1281 if (!DeadInsts.
empty())
1282 deleteMarkedDeadInsts(DeadInsts, WrapperObserver);
1289 bool Changed =
false;
1290 switch (
MI.getOpcode()) {
1293 case TargetOpcode::G_ANYEXT:
1296 case TargetOpcode::G_ZEXT:
1299 case TargetOpcode::G_SEXT:
1302 case TargetOpcode::G_UNMERGE_VALUES:
1304 UpdatedDefs, WrapperObserver);
1306 case TargetOpcode::G_MERGE_VALUES:
1307 case TargetOpcode::G_BUILD_VECTOR:
1308 case TargetOpcode::G_CONCAT_VECTORS:
1312 if (U.getOpcode() == TargetOpcode::G_UNMERGE_VALUES ||
1313 U.getOpcode() == TargetOpcode::G_TRUNC) {
1319 UpdatedDefs, WrapperObserver);
1321 case TargetOpcode::G_EXTRACT:
1324 case TargetOpcode::G_TRUNC:
1339 while (!UpdatedDefs.
empty()) {
1343 switch (
Use.getOpcode()) {
1345 case TargetOpcode::G_ANYEXT:
1346 case TargetOpcode::G_ZEXT:
1347 case TargetOpcode::G_SEXT:
1348 case TargetOpcode::G_UNMERGE_VALUES:
1349 case TargetOpcode::G_EXTRACT:
1350 case TargetOpcode::G_TRUNC:
1351 case TargetOpcode::G_BUILD_VECTOR:
1355 case TargetOpcode::COPY: {
1357 if (Copy.isVirtual())
1374 switch (
MI.getOpcode()) {
1375 case TargetOpcode::COPY:
1376 case TargetOpcode::G_TRUNC:
1377 case TargetOpcode::G_ZEXT:
1378 case TargetOpcode::G_ANYEXT:
1379 case TargetOpcode::G_SEXT:
1380 case TargetOpcode::G_EXTRACT:
1381 return MI.getOperand(1).getReg();
1382 case TargetOpcode::G_UNMERGE_VALUES:
1383 return MI.getOperand(
MI.getNumOperands() - 1).getReg();
1396 unsigned DefIdx = 0) {
1407 while (PrevMI != &
DefMI) {
1408 Register PrevRegSrc = getArtifactSrcReg(*PrevMI);
1411 if (
MRI.hasOneUse(PrevRegSrc)) {
1412 if (TmpDef != &
DefMI) {
1415 "Expecting copy or artifact cast here");
1424 if (PrevMI == &
DefMI) {
1429 if (!
MRI.use_empty(
Def.getReg())) {
1434 if (!
MRI.hasOneUse(
DefMI.getOperand(DefIdx).getReg()))
1453 unsigned DefIdx = 0) {
1455 markDefDead(
MI,
DefMI, DeadInsts, DefIdx);
1466 for (
auto *DeadMI : DeadInsts) {
1469 DeadMI->eraseFromParent();
1477 using namespace LegalizeActions;
1486 bool isConstantUnsupported(
LLT Ty)
const {
1488 return isInstUnsupported({TargetOpcode::G_CONSTANT, {Ty}});
1491 return isInstUnsupported({TargetOpcode::G_CONSTANT, {EltTy}}) ||
1492 isInstUnsupported({TargetOpcode::G_BUILD_VECTOR, {Ty, EltTy}});
1502 if (
MRI.getType(TmpReg).isValid())
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This contains common code to allow clients to notify changes to machine instr.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
Interface for Targets to specify which operations they can successfully select and how the others sho...
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements the SmallBitVector class.
static constexpr int Concat[]
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
APInt zext(unsigned width) const
Zero extend to a new width.
static constexpr ElementCount getFixed(ScalarTy MinVal)
Represents a G_BUILD_VECTOR.
Represents a G_CONCAT_VECTORS.
Abstract class that contains various methods for clients to notify about changes.
virtual void changingInstr(MachineInstr &MI)=0
This instruction is about to be mutated in some way.
virtual void changedInstr(MachineInstr &MI)=0
This instruction was mutated in some way.
Simple wrapper observer that takes several observers, and calls each one for each event.
void changedInstr(MachineInstr &MI) override
This instruction was mutated in some way.
void changingInstr(MachineInstr &MI) override
This instruction is about to be mutated in some way.
void erasingInstr(MachineInstr &MI) override
An instruction is about to be erased.
Represents G_BUILD_VECTOR, G_CONCAT_VECTORS or G_MERGE_VALUES.
Register getSourceReg(unsigned I) const
Returns the I'th source register.
unsigned getNumSources() const
Returns the number of source registers.
Represents a G_UNMERGE_VALUES.
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
constexpr unsigned getScalarSizeInBits() const
constexpr bool isScalar() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr LLT changeElementCount(ElementCount EC) const
Return a vector or scalar with the same element type and the new element count.
constexpr LLT getScalarType() const
constexpr LLT divide(int Factor) const
Return a type that is Factor times smaller.
This class provides utilities for finding source registers of specific bit ranges in an artifact.
bool tryCombineUnmergeDefs(GUnmerge &MI, GISelChangeObserver &Observer, SmallVectorImpl< Register > &UpdatedDefs)
Try to combine the defs of an unmerge MI by attempting to find values that provides the bits for each...
bool isSequenceFromUnmerge(GMergeLikeInstr &MI, unsigned MergeStartIdx, GUnmerge *Unmerge, unsigned UnmergeIdxStart, unsigned NumElts, unsigned EltSize, bool AllowUndef)
Register findValueFromDef(Register DefReg, unsigned StartBit, unsigned Size)
Try to find a source of the value defined in the def DefReg, starting at position StartBit with size ...
GUnmerge * findUnmergeThatDefinesReg(Register Reg, unsigned Size, unsigned &DefOperandIdx)
bool tryCombineMergeLike(GMergeLikeInstr &MI, SmallVectorImpl< MachineInstr * > &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs, GISelChangeObserver &Observer)
ArtifactValueFinder(MachineRegisterInfo &Mri, MachineIRBuilder &Builder, const LegalizerInfo &Info)
bool tryFoldUnmergeCast(MachineInstr &MI, MachineInstr &CastMI, SmallVectorImpl< MachineInstr * > &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs)
LegalizationArtifactCombiner(MachineIRBuilder &B, MachineRegisterInfo &MRI, const LegalizerInfo &LI)
bool tryCombineZExt(MachineInstr &MI, SmallVectorImpl< MachineInstr * > &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs, GISelObserverWrapper &Observer)
bool tryCombineInstruction(MachineInstr &MI, SmallVectorImpl< MachineInstr * > &DeadInsts, GISelObserverWrapper &WrapperObserver)
Try to combine away MI.
bool tryCombineTrunc(MachineInstr &MI, SmallVectorImpl< MachineInstr * > &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs, GISelObserverWrapper &Observer)
static bool canFoldMergeOpcode(unsigned MergeOp, unsigned ConvertOp, LLT OpTy, LLT DestTy)
bool tryCombineSExt(MachineInstr &MI, SmallVectorImpl< MachineInstr * > &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs)
static unsigned getDefIndex(const MachineInstr &MI, Register SearchDef)
Return the operand index in MI that defines Def.
static void replaceRegOrBuildCopy(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI, MachineIRBuilder &Builder, SmallVectorImpl< Register > &UpdatedDefs, GISelChangeObserver &Observer)
Try to replace DstReg with SrcReg or build a COPY instruction depending on the register constraints.
bool tryCombineUnmergeValues(GUnmerge &MI, SmallVectorImpl< MachineInstr * > &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs, GISelChangeObserver &Observer)
bool tryCombineExtract(MachineInstr &MI, SmallVectorImpl< MachineInstr * > &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs)
bool tryFoldImplicitDef(MachineInstr &MI, SmallVectorImpl< MachineInstr * > &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs)
Try to fold G_[ASZ]EXT (G_IMPLICIT_DEF).
bool tryCombineAnyExt(MachineInstr &MI, SmallVectorImpl< MachineInstr * > &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs, GISelObserverWrapper &Observer)
LegalizeActionStep getAction(const LegalityQuery &Query) const
Determine what action should be taken to legalize the described instruction.
Helper class to build MachineInstr.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
void setInstrAndDebugLoc(MachineInstr &MI)
Set the insertion point to before MI, and set the debug loc to MI's loc.
Register getReg(unsigned Idx) const
Get the register for the operand index.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
unsigned getNumOperands() const
Retuns the total number of operands.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
bool all() const
Returns true if all bits are set.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
A Use represents the edge between a Value definition and its users.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Unsupported
This operation is completely unsupported on the target.
@ NotFound
Sentinel value for when no action was found in the specified table.
@ FewerElements
The (vector) operation should be implemented by splitting it into sub-vectors where the operation is ...
@ Legal
The operation is expected to be selectable directly by the target, and no transformation is necessary...
@ Unsupported
This operation is completely unsupported on the target.
@ Lower
The operation itself must be expressed in terms of simpler actions on this target.
@ NarrowScalar
The operation should be synthesized from multiple instructions acting on a narrower scalar base-type.
operand_type_match m_Reg()
UnaryOp_match< SrcTy, TargetOpcode::COPY > m_Copy(SrcTy &&Src)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
MachineInstr * getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI)
See if Reg is defined by an single def instruction that is Opcode.
MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI)
Check if DstReg can be replaced with SrcReg depending on the register constraints.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_READNONE LLT getCoverTy(LLT OrigTy, LLT TargetTy)
Return smallest type that covers both OrigTy and TargetTy and is multiple of TargetTy.
std::optional< DefinitionAndSourceRegister > getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, and underlying value Register folding away any copies.
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
LegalizeAction Action
The action to take or the final answer.
unsigned TypeIdx
If describing an action, the type index to change. Otherwise zero.