24using namespace PatternMatch;
26#define DEBUG_TYPE "instcombine"
38 unsigned Opc =
I->getOpcode();
40 case Instruction::Add:
41 case Instruction::Sub:
42 case Instruction::Mul:
43 case Instruction::And:
45 case Instruction::Xor:
46 case Instruction::AShr:
47 case Instruction::LShr:
48 case Instruction::Shl:
49 case Instruction::UDiv:
50 case Instruction::URem: {
56 case Instruction::Trunc:
57 case Instruction::ZExt:
58 case Instruction::SExt:
62 if (
I->getOperand(0)->getType() == Ty)
63 return I->getOperand(0);
68 Opc == Instruction::SExt);
70 case Instruction::Select: {
76 case Instruction::PHI: {
87 case Instruction::FPToUI:
88 case Instruction::FPToSI:
92 case Instruction::Call:
94 switch (
II->getIntrinsicID()) {
97 case Intrinsic::vscale: {
106 case Instruction::ShuffleVector: {
107 auto *ScalarTy = cast<VectorType>(Ty)->getElementType();
108 auto *VTy = cast<VectorType>(
I->getOperand(0)->getType());
113 cast<ShuffleVectorInst>(
I)->getShuffleMask());
126InstCombinerImpl::isEliminableCastPair(
const CastInst *CI1,
141 DstTy, SrcIntPtrTy, MidIntPtrTy,
146 if ((Res == Instruction::IntToPtr && SrcTy != DstIntPtrTy) ||
147 (Res == Instruction::PtrToInt && DstTy != SrcIntPtrTy))
158 if (
auto *SrcC = dyn_cast<Constant>(Src))
163 if (
auto *CSrc = dyn_cast<CastInst>(Src)) {
169 if (CSrc->hasOneUse())
175 if (
auto *Sel = dyn_cast<SelectInst>(Src)) {
181 auto *Cmp = dyn_cast<CmpInst>(Sel->getCondition());
182 if (!Cmp || Cmp->getOperand(0)->getType() != Sel->getType() ||
188 if (CI.
getOpcode() != Instruction::BitCast ||
199 if (
auto *PN = dyn_cast<PHINode>(Src)) {
216 auto *SrcTy = dyn_cast<FixedVectorType>(
X->getType());
217 auto *DestTy = dyn_cast<FixedVectorType>(Ty);
218 if (SrcTy && DestTy &&
219 SrcTy->getNumElements() == DestTy->getNumElements() &&
232 if (isa<Constant>(V))
246 if (!isa<Instruction>(V))
274 auto *
I = cast<Instruction>(V);
275 Type *OrigTy = V->getType();
276 switch (
I->getOpcode()) {
277 case Instruction::Add:
278 case Instruction::Sub:
279 case Instruction::Mul:
280 case Instruction::And:
281 case Instruction::Or:
282 case Instruction::Xor:
287 case Instruction::UDiv:
288 case Instruction::URem: {
303 case Instruction::Shl: {
314 case Instruction::LShr: {
332 case Instruction::AShr: {
342 unsigned ShiftedBits = OrigBitWidth -
BitWidth;
349 case Instruction::Trunc:
352 case Instruction::ZExt:
353 case Instruction::SExt:
357 case Instruction::Select: {
362 case Instruction::PHI: {
372 case Instruction::FPToUI:
373 case Instruction::FPToSI: {
377 Type *InputTy =
I->getOperand(0)->getType()->getScalarType();
381 I->getOpcode() == Instruction::FPToSI);
384 case Instruction::ShuffleVector:
405 if (!TruncOp->
hasOneUse() || !isa<IntegerType>(DestType))
408 Value *VecInput =
nullptr;
413 !isa<VectorType>(VecInput->
getType()))
417 unsigned VecWidth = VecType->getPrimitiveSizeInBits();
419 unsigned ShiftAmount = ShiftVal ? ShiftVal->
getZExtValue() : 0;
421 if ((VecWidth % DestWidth != 0) || (ShiftAmount % DestWidth != 0))
426 unsigned NumVecElts = VecWidth / DestWidth;
427 if (VecType->getElementType() != DestType) {
432 unsigned Elt = ShiftAmount / DestWidth;
434 Elt = NumVecElts - 1 - Elt;
444 "Don't narrow to an illegal scalar type");
460 Value *ShVal0, *ShVal1, *ShAmt0, *ShAmt1;
467 if (Or0->
getOpcode() == BinaryOperator::LShr) {
473 Or1->
getOpcode() == BinaryOperator::LShr &&
474 "Illegal or(shift,shift) pair");
483 unsigned MaxShiftAmountWidth =
Log2_32(NarrowWidth);
484 APInt HiBitMask =
~APInt::getLowBitsSet(WideWidth, MaxShiftAmountWidth);
491 if (ShVal0 != ShVal1)
497 unsigned Mask = Width - 1;
510 Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, NarrowWidth);
513 ShAmt = matchShiftAmount(ShAmt1, ShAmt0, NarrowWidth);
535 if (ShVal0 != ShVal1)
537 Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr;
550 if (!isa<VectorType>(SrcTy) && !shouldChangeType(SrcTy, DestTy))
560 case Instruction::And:
561 case Instruction::Or:
562 case Instruction::Xor:
563 case Instruction::Add:
564 case Instruction::Sub:
565 case Instruction::Mul: {
592 case Instruction::LShr:
593 case Instruction::AShr: {
598 unsigned MaxShiftAmt = SrcWidth - DestWidth;
602 APInt(SrcWidth, MaxShiftAmt)))) {
603 auto *OldShift = cast<Instruction>(Trunc.
getOperand(0));
604 bool IsExact = OldShift->isExact();
609 OldShift->getOpcode() == Instruction::AShr
621 if (
Instruction *NarrowOr = narrowFunnelShift(Trunc))
632 auto *Shuf = dyn_cast<ShuffleVectorInst>(Trunc.
getOperand(0));
633 if (Shuf && Shuf->hasOneUse() &&
match(Shuf->getOperand(1),
m_Undef()) &&
635 Shuf->getType() == Shuf->getOperand(0)->getType()) {
653 assert((Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) &&
654 "Unexpected instruction for shrinking");
656 auto *InsElt = dyn_cast<InsertElementInst>(Trunc.
getOperand(0));
657 if (!InsElt || !InsElt->hasOneUse())
662 Value *VecOp = InsElt->getOperand(0);
663 Value *ScalarOp = InsElt->getOperand(1);
682 Type *DestTy = Trunc.
getType(), *SrcTy = Src->getType();
690 if ((DestTy->
isVectorTy() || shouldChangeType(SrcTy, DestTy)) &&
696 dbgs() <<
"ICE: EvaluateInDifferentType converting expression type"
708 if (
auto *DestITy = dyn_cast<IntegerType>(DestTy)) {
709 if (DestWidth * 2 < SrcWidth) {
710 auto *NewDestTy = DestITy->getExtendedType();
711 if (shouldChangeType(SrcTy, NewDestTy) &&
714 dbgs() <<
"ICE: EvaluateInDifferentType converting expression type"
715 " to reduce the width of operand of"
728 if (
SelectInst *Sel = dyn_cast<SelectInst>(Src))
737 if (DestWidth == 1) {
786 unsigned AWidth =
A->getType()->getScalarSizeInBits();
787 unsigned MaxShiftAmt = SrcWidth - std::max(DestWidth, AWidth);
788 auto *OldSh = cast<Instruction>(Src);
789 bool IsExact = OldSh->isExact();
794 APInt(SrcWidth, MaxShiftAmt)))) {
795 auto GetNewShAmt = [&](
unsigned Width) {
796 Constant *MaxAmt = ConstantInt::get(SrcTy, Width - 1,
false);
805 if (
A->getType() == DestTy) {
806 Constant *ShAmt = GetNewShAmt(DestWidth);
808 return IsExact ? BinaryOperator::CreateExactAShr(
A, ShAmt)
809 : BinaryOperator::CreateAShr(
A, ShAmt);
813 if (Src->hasOneUse()) {
814 Constant *ShAmt = GetNewShAmt(AWidth);
831 if (Src->hasOneUse() &&
832 (isa<VectorType>(SrcTy) || shouldChangeType(SrcTy, DestTy))) {
839 APInt Threshold =
APInt(
C->getType()->getScalarSizeInBits(), DestWidth);
862 auto *VecOpTy = cast<VectorType>(VecOp->
getType());
863 auto VecElts = VecOpTy->getElementCount();
866 if (SrcWidth % DestWidth == 0) {
867 uint64_t TruncRatio = SrcWidth / DestWidth;
868 uint64_t BitCastNumElts = VecElts.getKnownMinValue() * TruncRatio;
871 : VecOpIdx * TruncRatio;
872 assert(BitCastNumElts <= std::numeric_limits<uint32_t>::max() &&
885 unsigned AWidth =
A->getType()->getScalarSizeInBits();
886 if (AWidth == DestWidth && AWidth >
Log2_32(SrcWidth)) {
887 Value *WidthDiff = ConstantInt::get(
A->getType(), SrcWidth - AWidth);
890 return BinaryOperator::CreateAdd(NarrowCtlz, WidthDiff);
900 if (
Log2_32(*MaxVScale) < DestWidth) {
908 bool Changed =
false;
921 return Changed ? &Trunc :
nullptr;
941 Value *In = Cmp->getOperand(0);
942 Value *Sh = ConstantInt::get(In->getType(),
943 In->getType()->getScalarSizeInBits() - 1);
945 if (In->getType() != Zext.
getType())
956 if (Op1CV->
isZero() && Cmp->isEquality()) {
961 uint32_t ShAmt = KnownZeroMask.logBase2();
962 bool IsExpectShAmt = KnownZeroMask.isPowerOf2() &&
965 (Cmp->getOperand(0)->getType() == Zext.
getType() ||
967 Value *In = Cmp->getOperand(0);
972 In->getName() +
".lobit");
988 if (
Cmp->isEquality()) {
996 auto *
And = cast<BinaryOperator>(
Cmp->getOperand(0));
997 Value *Shift =
And->getOperand(
X ==
And->getOperand(0) ? 1 : 0);
1040 auto *
I = cast<Instruction>(V);
1042 switch (
I->getOpcode()) {
1043 case Instruction::ZExt:
1044 case Instruction::SExt:
1045 case Instruction::Trunc:
1047 case Instruction::And:
1048 case Instruction::Or:
1049 case Instruction::Xor:
1050 case Instruction::Add:
1051 case Instruction::Sub:
1052 case Instruction::Mul:
1057 if (BitsToClear == 0 && Tmp == 0)
1062 if (Tmp == 0 &&
I->isBitwiseLogicOp()) {
1065 unsigned VSize = V->getType()->getScalarSizeInBits();
1071 if (
I->getOpcode() == Instruction::And)
1080 case Instruction::Shl: {
1088 BitsToClear = ShiftAmt < BitsToClear ? BitsToClear - ShiftAmt : 0;
1093 case Instruction::LShr: {
1101 if (BitsToClear > V->getType()->getScalarSizeInBits())
1102 BitsToClear = V->getType()->getScalarSizeInBits();
1108 case Instruction::Select:
1117 case Instruction::PHI: {
1132 case Instruction::Call:
1136 if (
II->getIntrinsicID() == Intrinsic::vscale)
1157 Type *SrcTy = Src->getType(), *DestTy = Zext.
getType();
1164 unsigned BitsToClear;
1165 if (shouldChangeType(SrcTy, DestTy) &&
1168 "Can't clear more bits than in SrcTy");
1172 dbgs() <<
"ICE: EvaluateInDifferentType converting expression type"
1173 " to avoid zero extend: "
1179 if (
auto *
SrcOp = dyn_cast<Instruction>(Src))
1180 if (
SrcOp->hasOneUse())
1190 DestBitSize - SrcBitsKept),
1197 return BinaryOperator::CreateAnd(Res,
C);
1203 if (
auto *CSrc = dyn_cast<TruncInst>(Src)) {
1208 Value *
A = CSrc->getOperand(0);
1209 unsigned SrcSize =
A->getType()->getScalarSizeInBits();
1210 unsigned MidSize = CSrc->getType()->getScalarSizeInBits();
1216 if (SrcSize < DstSize) {
1218 Constant *AndConst = ConstantInt::get(
A->getType(), AndValue);
1223 if (SrcSize == DstSize) {
1225 return BinaryOperator::CreateAnd(
A, ConstantInt::get(
A->getType(),
1228 if (SrcSize > DstSize) {
1231 return BinaryOperator::CreateAnd(Trunc,
1232 ConstantInt::get(Trunc->
getType(),
1237 if (
auto *Cmp = dyn_cast<ICmpInst>(Src))
1238 return transformZExtICmp(Cmp, Zext);
1244 X->getType() == DestTy)
1251 X->getType() == DestTy) {
1262 X->getType() == DestTy) {
1264 return BinaryOperator::CreateAnd(
X, ZextC);
1273 unsigned TypeWidth = Src->getType()->getScalarSizeInBits();
1274 if (
Log2_32(*MaxVScale) < TypeWidth) {
1304 Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1);
1308 if (!Op1->getType()->isIntOrIntVectorTy())
1316 if (In->getType() != Sext.
getType())
1322 if (
ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
1326 if (Cmp->hasOneUse() &&
1327 Cmp->isEquality() && (Op1C->isZero() || Op1C->getValue().isPowerOf2())){
1331 if (KnownZeroMask.isPowerOf2()) {
1332 Value *In = Cmp->getOperand(0);
1335 if (!Op1C->isZero() && Op1C->getValue() != KnownZeroMask) {
1345 unsigned ShiftAmt = KnownZeroMask.countr_zero();
1349 ConstantInt::get(
In->getType(), ShiftAmt));
1359 unsigned ShiftAmt = KnownZeroMask.countl_zero();
1363 ConstantInt::get(
In->getType(), ShiftAmt));
1367 KnownZeroMask.getBitWidth() - 1),
"sext");
1389 "Can't sign extend type to a smaller type");
1395 auto *
I = cast<Instruction>(V);
1396 switch (
I->getOpcode()) {
1397 case Instruction::SExt:
1398 case Instruction::ZExt:
1399 case Instruction::Trunc:
1401 case Instruction::And:
1402 case Instruction::Or:
1403 case Instruction::Xor:
1404 case Instruction::Add:
1405 case Instruction::Sub:
1406 case Instruction::Mul:
1414 case Instruction::Select:
1418 case Instruction::PHI: {
1445 Type *SrcTy = Src->getType(), *DestTy = Sext.
getType();
1452 CI->setNonNeg(
true);
1460 dbgs() <<
"ICE: EvaluateInDifferentType converting expression type"
1461 " to avoid sign extend: "
1472 Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize);
1481 unsigned XBitSize =
X->getType()->getScalarSizeInBits();
1486 if (Src->hasOneUse() &&
X->getType() == DestTy) {
1488 Constant *ShAmt = ConstantInt::get(DestTy, DestBitSize - SrcBitSize);
1497 if (Src->hasOneUse() &&
1505 if (
auto *Cmp = dyn_cast<ICmpInst>(Src))
1506 return transformSExtICmp(Cmp, Sext);
1523 Constant *BA =
nullptr, *CA =
nullptr;
1529 assert(WideCurrShAmt &&
"Constant folding of ImmConstant cannot fail");
1538 return BinaryOperator::CreateAShr(
A, NewShAmt);
1546 Type *XTy =
X->getType();
1548 Constant *ShlAmtC = ConstantInt::get(XTy, XBitSize - SrcBitSize);
1549 Constant *AshrAmtC = ConstantInt::get(XTy, XBitSize - 1);
1553 if (cast<BinaryOperator>(Src)->getOperand(0)->hasOneUse()) {
1565 if (
Log2_32(*MaxVScale) < (SrcBitSize - 1)) {
1608 auto *CV = dyn_cast<Constant>(V);
1609 auto *CVVTy = dyn_cast<FixedVectorType>(V->getType());
1613 Type *MinType =
nullptr;
1615 unsigned NumElts = CVVTy->getNumElements();
1619 for (
unsigned i = 0; i != NumElts; ++i) {
1620 if (isa<UndefValue>(CV->getAggregateElement(i)))
1623 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
1643 if (
auto *FPExt = dyn_cast<FPExtInst>(V))
1644 return FPExt->getOperand(0)->getType();
1649 if (
auto *CFP = dyn_cast<ConstantFP>(V))
1656 if (
auto *FPCExt = dyn_cast<ConstantExpr>(V))
1657 if (FPCExt->getOpcode() == Instruction::FPExt)
1658 return FPCExt->getOperand(0)->getType();
1665 return V->getType();
1672 assert((Opcode == CastInst::SIToFP || Opcode == CastInst::UIToFP) &&
1674 Value *Src =
I.getOperand(0);
1675 Type *SrcTy = Src->getType();
1676 Type *FPTy =
I.getType();
1677 bool IsSigned = Opcode == Instruction::SIToFP;
1683 if (SrcSize <= DestNumSigBits)
1692 int SrcNumSigBits =
F->getType()->getFPMantissaWidth();
1699 if (SrcNumSigBits > 0 && DestNumSigBits > 0 &&
1700 SrcNumSigBits <= DestNumSigBits)
1711 if (SigBits <= DestNumSigBits)
1729 auto *BO = dyn_cast<BinaryOperator>(FPT.
getOperand(0));
1730 if (BO && BO->hasOneUse()) {
1735 unsigned OpWidth = BO->getType()->getFPMantissaWidth();
1738 unsigned SrcWidth = std::max(LHSWidth, RHSWidth);
1740 switch (BO->getOpcode()) {
1742 case Instruction::FAdd:
1743 case Instruction::FSub:
1762 if (OpWidth >= 2*DstWidth+1 && DstWidth >= SrcWidth) {
1770 case Instruction::FMul:
1776 if (OpWidth >= LHSWidth + RHSWidth && DstWidth >= SrcWidth) {
1782 case Instruction::FDiv:
1789 if (OpWidth >= 2*DstWidth && DstWidth >= SrcWidth) {
1795 case Instruction::FRem: {
1800 if (SrcWidth == OpWidth)
1803 if (LHSWidth == SrcWidth) {
1820 if (
Op &&
Op->hasOneUse()) {
1823 if (isa<FPMathOperator>(
Op))
1836 X->getType() == Ty) {
1843 X->getType() == Ty) {
1851 if (
auto *
II = dyn_cast<IntrinsicInst>(FPT.
getOperand(0))) {
1852 switch (
II->getIntrinsicID()) {
1854 case Intrinsic::ceil:
1855 case Intrinsic::fabs:
1856 case Intrinsic::floor:
1857 case Intrinsic::nearbyint:
1858 case Intrinsic::rint:
1859 case Intrinsic::round:
1860 case Intrinsic::roundeven:
1861 case Intrinsic::trunc: {
1862 Value *Src =
II->getArgOperand(0);
1863 if (!Src->hasOneUse())
1869 if (
II->getIntrinsicID() != Intrinsic::fabs) {
1870 FPExtInst *FPExtSrc = dyn_cast<FPExtInst>(Src);
1871 if (!FPExtSrc || FPExtSrc->
getSrcTy() != Ty)
1879 II->getIntrinsicID(), Ty);
1881 II->getOperandBundlesAsDefs(OpBundles);
1894 if (isa<SIToFPInst>(Src) || isa<UIToFPInst>(Src)) {
1895 auto *FPCast = cast<CastInst>(Src);
1908 if (isa<SIToFPInst>(Src) || isa<UIToFPInst>(Src)) {
1909 auto *FPCast = cast<CastInst>(Src);
1925 auto *OpI = cast<CastInst>(FI.
getOperand(0));
1926 Value *
X = OpI->getOperand(0);
1927 Type *XType =
X->getType();
1929 bool IsOutputSigned = isa<FPToSIInst>(FI);
1944 if (OutputSize > OpI->getType()->getFPMantissaWidth())
1949 bool IsInputSigned = isa<SIToFPInst>(OpI);
1950 if (IsInputSigned && IsOutputSigned)
1957 assert(XType == DestType &&
"Unexpected types for int to FP to int casts");
2010 UI->setNonNeg(
true);
2045 if (TySize != PtrSize) {
2058 Mask->getType() == Ty)
2061 if (
auto *
GEP = dyn_cast<GEPOperator>(
SrcOp)) {
2066 if (
GEP->hasOneUse() &&
2067 isa<ConstantPointerNull>(
GEP->getPointerOperand())) {
2075 if (
GEP->hasOneUse() &&
2077 Base->getType() == Ty) {
2079 auto *NewOp = BinaryOperator::CreateAdd(
Base,
Offset);
2080 if (
GEP->hasNoUnsignedWrap() ||
2081 (
GEP->hasNoUnsignedSignedWrap() &&
2083 NewOp->setHasNoUnsignedWrap(
true);
2126 if (SrcTy->getElementType() != DestTy->getElementType()) {
2131 if (SrcTy->getElementType()->getPrimitiveSizeInBits() !=
2132 DestTy->getElementType()->getPrimitiveSizeInBits())
2137 cast<FixedVectorType>(SrcTy)->getNumElements());
2142 unsigned SrcElts = cast<FixedVectorType>(SrcTy)->getNumElements();
2143 unsigned DestElts = cast<FixedVectorType>(DestTy)->getNumElements();
2145 assert(SrcElts != DestElts &&
"Element counts should be different.");
2150 auto ShuffleMaskStorage = llvm::to_vector<16>(llvm::seq<int>(0, SrcElts));
2154 if (SrcElts > DestElts) {
2163 ShuffleMask = ShuffleMaskStorage;
2165 ShuffleMask = ShuffleMask.
take_back(DestElts);
2167 ShuffleMask = ShuffleMask.
take_front(DestElts);
2178 unsigned DeltaElts = DestElts - SrcElts;
2180 ShuffleMaskStorage.insert(ShuffleMaskStorage.begin(), DeltaElts, NullElt);
2182 ShuffleMaskStorage.append(DeltaElts, NullElt);
2183 ShuffleMask = ShuffleMaskStorage;
2211 "Shift should be a multiple of the element type size");
2214 if (isa<UndefValue>(V))
return true;
2218 if (V->getType() == VecEltTy) {
2220 if (
Constant *
C = dyn_cast<Constant>(V))
2221 if (
C->isNullValue())
2226 ElementIndex = Elements.size() - ElementIndex - 1;
2229 if (Elements[ElementIndex])
2232 Elements[ElementIndex] = V;
2236 if (
Constant *
C = dyn_cast<Constant>(V)) {
2249 if (!isa<IntegerType>(
C->getType()))
2251 C->getType()->getPrimitiveSizeInBits()));
2255 for (
unsigned i = 0; i != NumElts; ++i) {
2256 unsigned ShiftI = i * ElementSize;
2258 Instruction::LShr,
C, ConstantInt::get(
C->getType(), ShiftI));
2270 if (!V->hasOneUse())
return false;
2273 if (!
I)
return false;
2274 switch (
I->getOpcode()) {
2275 default:
return false;
2276 case Instruction::BitCast:
2277 if (
I->getOperand(0)->getType()->isVectorTy())
2281 case Instruction::ZExt:
2283 I->getOperand(0)->getType()->getPrimitiveSizeInBits(),
2288 case Instruction::Or:
2293 case Instruction::Shl: {
2295 ConstantInt *CI = dyn_cast<ConstantInt>(
I->getOperand(1));
2296 if (!CI)
return false;
2323 auto *DestVecTy = cast<FixedVectorType>(CI.
getType());
2328 DestVecTy->getElementType(),
2336 for (
unsigned i = 0, e = Elements.size(); i != e; ++i) {
2337 if (!Elements[i])
continue;
2369 auto *FixedVType = dyn_cast<FixedVectorType>(VecType);
2370 if (DestType->
isVectorTy() && FixedVType && FixedVType->getNumElements() == 1)
2397 if (
X->getType()->isFPOrFPVectorTy() &&
2398 Y->getType()->isIntOrIntVectorTy()) {
2404 if (
X->getType()->isIntOrIntVectorTy() &&
2405 Y->getType()->isFPOrFPVectorTy()) {
2420 X->getType() == DestTy && !isa<Constant>(
X)) {
2427 X->getType() == DestTy && !isa<Constant>(
X)) {
2459 if (
auto *CondVTy = dyn_cast<VectorType>(CondTy))
2461 CondVTy->getElementCount() !=
2462 cast<VectorType>(DestTy)->getElementCount())
2472 auto *Sel = cast<Instruction>(BitCast.
getOperand(0));
2475 !isa<Constant>(
X)) {
2482 !isa<Constant>(
X)) {
2494 if (!isa<StoreInst>(U))
2515 Type *SrcTy = Src->getType();
2527 while (!PhiWorklist.
empty()) {
2529 for (
Value *IncValue : OldPN->incoming_values()) {
2530 if (isa<Constant>(IncValue))
2533 if (
auto *
LI = dyn_cast<LoadInst>(IncValue)) {
2539 if (
Addr == &CI || isa<LoadInst>(
Addr))
2547 if (
LI->hasOneUse() &&
LI->isSimple())
2554 if (
auto *PNode = dyn_cast<PHINode>(IncValue)) {
2555 if (OldPhiNodes.
insert(PNode))
2560 auto *BCI = dyn_cast<BitCastInst>(IncValue);
2566 Type *TyA = BCI->getOperand(0)->getType();
2567 Type *TyB = BCI->getType();
2568 if (TyA != DestTy || TyB != SrcTy)
2575 for (
auto *OldPN : OldPhiNodes) {
2577 if (
auto *SI = dyn_cast<StoreInst>(V)) {
2578 if (!
SI->isSimple() ||
SI->getOperand(0) != OldPN)
2580 }
else if (
auto *BCI = dyn_cast<BitCastInst>(V)) {
2582 Type *TyB = BCI->getOperand(0)->getType();
2583 Type *TyA = BCI->getType();
2584 if (TyA != DestTy || TyB != SrcTy)
2586 }
else if (
auto *
PHI = dyn_cast<PHINode>(V)) {
2590 if (!OldPhiNodes.contains(
PHI))
2600 for (
auto *OldPN : OldPhiNodes) {
2603 NewPNodes[OldPN] = NewPN;
2607 for (
auto *OldPN : OldPhiNodes) {
2608 PHINode *NewPN = NewPNodes[OldPN];
2609 for (
unsigned j = 0, e = OldPN->getNumOperands(); j != e; ++j) {
2610 Value *
V = OldPN->getOperand(j);
2611 Value *NewV =
nullptr;
2612 if (
auto *
C = dyn_cast<Constant>(V)) {
2614 }
else if (
auto *
LI = dyn_cast<LoadInst>(V)) {
2623 }
else if (
auto *BCI = dyn_cast<BitCastInst>(V)) {
2624 NewV = BCI->getOperand(0);
2625 }
else if (
auto *PrevPN = dyn_cast<PHINode>(V)) {
2626 NewV = NewPNodes[PrevPN];
2629 NewPN->
addIncoming(NewV, OldPN->getIncomingBlock(j));
2643 for (
auto *OldPN : OldPhiNodes) {
2644 PHINode *NewPN = NewPNodes[OldPN];
2646 if (
auto *SI = dyn_cast<StoreInst>(V)) {
2647 assert(
SI->isSimple() &&
SI->getOperand(0) == OldPN);
2651 SI->setOperand(0, NewBC);
2655 else if (
auto *BCI = dyn_cast<BitCastInst>(V)) {
2656 Type *TyB = BCI->getOperand(0)->getType();
2657 Type *TyA = BCI->getType();
2658 assert(TyA == DestTy && TyB == SrcTy);
2664 }
else if (
auto *
PHI = dyn_cast<PHINode>(V)) {
2689 if (
X->getType() != FTy)
2701 Type *SrcTy = Src->getType();
2706 if (DestTy == Src->getType())
2709 if (isa<FixedVectorType>(DestTy)) {
2710 if (isa<IntegerType>(SrcTy)) {
2714 if (isa<TruncInst>(Src) || isa<ZExtInst>(Src)) {
2715 CastInst *SrcCast = cast<CastInst>(Src);
2717 if (isa<VectorType>(BCIn->getOperand(0)->getType()))
2719 BCIn->getOperand(0), cast<VectorType>(DestTy), *
this))
2732 if (SrcVTy->getNumElements() == 1) {
2745 if (
auto *InsElt = dyn_cast<InsertElementInst>(Src))
2746 return new BitCastInst(InsElt->getOperand(1), DestTy);
2756 Y->getType()->isIntegerTy() && isDesirableIntType(
BitWidth)) {
2759 IndexC = SrcVTy->getNumElements() - 1 - IndexC;
2765 unsigned EltWidth =
Y->getType()->getScalarSizeInBits();
2769 return BinaryOperator::CreateOr(AndX, ZextY);
2774 if (
auto *Shuf = dyn_cast<ShuffleVectorInst>(Src)) {
2777 Value *ShufOp0 = Shuf->getOperand(0);
2778 Value *ShufOp1 = Shuf->getOperand(1);
2779 auto ShufElts = cast<VectorType>(Shuf->getType())->getElementCount();
2780 auto SrcVecElts = cast<VectorType>(ShufOp0->
getType())->getElementCount();
2781 if (Shuf->hasOneUse() && DestTy->
isVectorTy() &&
2782 cast<VectorType>(DestTy)->getElementCount() == ShufElts &&
2783 ShufElts == SrcVecElts) {
2788 if (((Tmp = dyn_cast<BitCastInst>(ShufOp0)) &&
2790 ((Tmp = dyn_cast<BitCastInst>(ShufOp1)) &&
2804 if (DestTy->
isIntegerTy() && ShufElts.getKnownMinValue() % 2 == 0 &&
2805 Shuf->hasOneUse() && Shuf->isReverse()) {
2806 unsigned IntrinsicNum = 0;
2809 IntrinsicNum = Intrinsic::bswap;
2811 IntrinsicNum = Intrinsic::bitreverse;
2813 if (IntrinsicNum != 0) {
2814 assert(ShufOp0->
getType() == SrcTy &&
"Unexpected shuffle mask");
2825 if (
PHINode *PN = dyn_cast<PHINode>(Src))
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static std::optional< bool > isBigEndian(const SmallDenseMap< int64_t, int64_t, 8 > &MemOffset2Idx, int64_t LowestIdx)
Given a map from byte offsets in memory to indices in a load/store, determine if that map corresponds...
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool isSigned(unsigned int Opcode)
static bool collectInsertionElements(Value *V, unsigned Shift, SmallVectorImpl< Value * > &Elements, Type *VecEltTy, bool isBigEndian)
V is a value which is inserted into a vector of VecEltTy.
static bool canEvaluateSExtd(Value *V, Type *Ty)
Return true if we can take the specified value and return it as type Ty without inserting any new cas...
static bool hasStoreUsersOnly(CastInst &CI)
Check if all users of CI are StoreInsts.
static Value * foldCopySignIdioms(BitCastInst &CI, InstCombiner::BuilderTy &Builder, const SimplifyQuery &SQ)
Fold (bitcast (or (and (bitcast X to int), signmask), nneg Y) to fp) to copysign((bitcast Y to fp),...
static Type * shrinkFPConstantVector(Value *V, bool PreferBFloat)
static bool canEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear, InstCombinerImpl &IC, Instruction *CxtI)
Determine if the specified value can be computed in the specified wider type and produce the same low...
static Instruction * canonicalizeBitCastExtElt(BitCastInst &BitCast, InstCombinerImpl &IC)
Canonicalize scalar bitcasts of extracted elements into a bitcast of the vector followed by extract e...
static Instruction * shrinkSplatShuffle(TruncInst &Trunc, InstCombiner::BuilderTy &Builder)
Try to narrow the width of a splat shuffle.
static Type * shrinkFPConstant(ConstantFP *CFP, bool PreferBFloat)
static Instruction * foldFPtoI(Instruction &FI, InstCombiner &IC)
static Instruction * foldBitCastSelect(BitCastInst &BitCast, InstCombiner::BuilderTy &Builder)
Change the type of a select if we can eliminate a bitcast.
static Instruction * foldBitCastBitwiseLogic(BitCastInst &BitCast, InstCombiner::BuilderTy &Builder)
Change the type of a bitwise logic operation if we can eliminate a bitcast.
static bool fitsInFPType(ConstantFP *CFP, const fltSemantics &Sem)
Return a Constant* for the specified floating-point constant if it fits in the specified FP type with...
static Instruction * optimizeVectorResizeWithIntegerBitCasts(Value *InVal, VectorType *DestTy, InstCombinerImpl &IC)
This input value (which is known to have vector type) is being zero extended or truncated to the spec...
static Instruction * shrinkInsertElt(CastInst &Trunc, InstCombiner::BuilderTy &Builder)
Try to narrow the width of an insert element.
static Type * getMinimumFPType(Value *V, bool PreferBFloat)
Find the minimum FP type we can safely truncate to.
static bool isMultipleOfTypeSize(unsigned Value, Type *Ty)
static Value * optimizeIntegerToVectorInsertions(BitCastInst &CI, InstCombinerImpl &IC)
If the input is an 'or' instruction, we may be doing shifts and ors to assemble the elements of the v...
static bool canAlwaysEvaluateInType(Value *V, Type *Ty)
Constants and extensions/truncates from the destination type are always free to be evaluated in that ...
static bool canNotEvaluateInType(Value *V, Type *Ty)
Filter out values that we can not evaluate in the destination type for free.
static bool isKnownExactCastIntToFP(CastInst &I, InstCombinerImpl &IC)
Return true if the cast from integer to FP can be proven to be exact for all possible inputs (the con...
static unsigned getTypeSizeIndex(unsigned Value, Type *Ty)
static Instruction * foldVecTruncToExtElt(TruncInst &Trunc, InstCombinerImpl &IC)
Given a vector that is bitcast to an integer, optionally logically right-shifted, and truncated,...
static bool canEvaluateTruncated(Value *V, Type *Ty, InstCombinerImpl &IC, Instruction *CxtI)
Return true if we can evaluate the specified expression tree as type Ty instead of its larger type,...
This file provides internal interfaces used to implement the InstCombine.
This file provides the interface for the instcombine pass implementation.
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements a set that has insertion order iteration characteristics.
static SymbolRef::Type getType(const Symbol *Sym)
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
int32_t exactLogBase2() const
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
This class represents a conversion between pointers from one address space to another.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
ArrayRef< T > take_back(size_t N=1) const
Return a copy of *this with only the last N elements.
std::optional< unsigned > getVScaleRangeMax() const
Returns the maximum value for the vscale_range attribute or std::nullopt when unknown.
BinaryOps getOpcode() const
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateFMulFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static BinaryOperator * CreateFDivFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
This class represents a no-op cast from one type to another.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This is the base class for all instructions that perform data casts.
Type * getSrcTy() const
Return the source type, as a convenience.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
static CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static CastInst * CreateFPCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create an FPExt, BitCast, or FPTrunc for fp -> fp casts.
static unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, Type *DstIntPtrTy)
Determine how a pair of casts can be eliminated, if they can be at all.
static CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a Trunc or BitCast cast instruction.
static CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Type * getDestTy() const
Return the destination type, as a convenience.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_ULT
unsigned less than
@ ICMP_ULE
unsigned less or equal
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
This is the shared class of boolean and integer constants.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
This is an important base class in LLVM.
static Constant * mergeUndefsWith(Constant *C, Constant *Other)
Merges undefs of a Constant with another Constant, along with the undefs already present.
static Constant * getAllOnesValue(Type *Ty)
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
bool isElementWiseEqual(Value *Y) const
Return true if this constant and a constant 'Y' are element-wise equal.
This class represents an Operation in the Expression.
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
bool isLegalInteger(uint64_t Width) const
Returns true if the specified type is known to be a native integer type supported by the CPU.
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
Class to represent fixed width SIMD vectors.
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
This instruction compares its operands according to the predicate given to the constructor.
Value * CreateVScale(Constant *Scaling, const Twine &Name="")
Create a call to llvm.vscale, multiplied by Scaling.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
Value * CreateFPTrunc(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
Value * CreateFRemFMF(Value *L, Value *R, Instruction *FMFSource, const Twine &Name="")
Copy fast-math-flags from an instruction rather than using the builder's default FMF.
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateCopySign(Value *LHS, Value *RHS, Instruction *FMFSource=nullptr, const Twine &Name="")
Create call to the copysign intrinsic.
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * visitZExt(ZExtInst &Zext)
Instruction * visitAddrSpaceCast(AddrSpaceCastInst &CI)
Instruction * visitSExt(SExtInst &Sext)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
Instruction * visitFPToSI(FPToSIInst &FI)
Instruction * visitTrunc(TruncInst &CI)
Instruction * visitUIToFP(CastInst &CI)
Instruction * visitPtrToInt(PtrToIntInst &CI)
Instruction * visitSIToFP(CastInst &CI)
Instruction * commonCastTransforms(CastInst &CI)
Implement the transforms common to all CastInst visitors.
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * foldItoFPtoI(CastInst &FI)
fpto{s/u}i({u/s}itofp(X)) --> X or zext(X) or sext(X) or trunc(X) This is safe if the intermediate ty...
Instruction * visitFPTrunc(FPTruncInst &CI)
Instruction * visitBitCast(BitCastInst &CI)
Instruction * visitIntToPtr(IntToPtrInst &CI)
Instruction * visitFPToUI(FPToUIInst &FI)
Value * EvaluateInDifferentType(Value *V, Type *Ty, bool isSigned)
Given an expression that CanEvaluateTruncated or CanEvaluateSExtd returns true for,...
bool SimplifyDemandedInstructionBits(Instruction &Inst)
Tries to simplify operands to an integer instruction based on its demanded bits.
Instruction * visitFPExt(CastInst &CI)
LoadInst * combineLoadToNewType(LoadInst &LI, Type *NewTy, const Twine &Suffix="")
Helper to combine a load to a new type.
The core instruction combiner logic.
const DataLayout & getDataLayout() const
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
unsigned ComputeNumSignBits(const Value *Op, unsigned Depth=0, const Instruction *CxtI=nullptr) const
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth=0, const Instruction *CxtI=nullptr) const
const SimplifyQuery & getSimplifyQuery() const
unsigned ComputeMaxSignificantBits(const Value *Op, unsigned Depth=0, const Instruction *CxtI=nullptr) const
void push(Instruction *I)
Push the instruction onto the worklist stack.
void copyFastMathFlags(FastMathFlags FMF)
Convenience function for transferring all fast-math flag values to this instruction,...
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
const Function * getFunction() const
Return the function this instruction belongs to.
void setNonNeg(bool b=true)
Set or clear the nneg flag on this instruction, which must be a zext instruction.
bool hasNonNeg() const LLVM_READONLY
Determine whether the the nneg flag is set.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
This class represents a cast from an integer to a pointer.
unsigned getAddressSpace() const
Returns the address space of this instruction's pointer type.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents a cast from a pointer to an integer.
Value * getPointerOperand()
Gets the pointer operand.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
This class represents a sign extension of integer types.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
bool insert(const value_type &X)
Insert a new element into the SetVector.
This instruction constructs a fixed permutation of two input vectors.
A SetVector that performs no allocations if smaller than a certain size.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class represents a truncation of integer types.
void setHasNoSignedWrap(bool B)
void setHasNoUnsignedWrap(bool B)
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
The instances of the Type class are immutable: once they are created, they are never changed.
static Type * getHalfTy(LLVMContext &C)
static Type * getDoubleTy(LLVMContext &C)
const fltSemantics & getFltSemantics() const
bool isVectorTy() const
True if this is an instance of VectorType.
static Type * getBFloatTy(LLVMContext &C)
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isBFloatTy() const
Return true if this is 'bfloat', a 16-bit bfloat type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Type * getWithNewType(Type *EltTy) const
Given vector type, change the element type, whilst keeping the old number of elements.
int getFPMantissaWidth() const
Return the width of the mantissa of this type.
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isX86_AMXTy() const
Return true if this is X86 AMX.
static IntegerType * getInt32Ty(LLVMContext &C)
static Type * getFloatTy(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
static Type * getPPC_FP128Ty(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
static UnaryOperator * CreateFNegFMF(Value *Op, Instruction *FMFSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)
'undef' values are things that do not have specified contents.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
LLVMContext & getContext() const
All values hold a context through their type.
StringRef getName() const
Return a constant reference to the value's name.
void takeName(Value *V)
Transfer the name from V to this value.
static bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
This class represents zero extension of integer types.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
specific_intval< true > m_SpecificIntAllowPoison(const APInt &V)
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)
Matches logical shift operations.
CastInst_match< OpTy, FPToUIInst > m_FPToUI(const OpTy &Op)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
OneUse_match< T > m_OneUse(const T &SubPattern)
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
CastInst_match< OpTy, FPExtInst > m_FPExt(const OpTy &Op)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
VScaleVal_match m_VScale()
CastInst_match< OpTy, FPToSIInst > m_FPToSI(const OpTy &Op)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
CastOperator_match< OpTy, Instruction::IntToPtr > m_IntToPtr(const OpTy &Op)
Matches IntToPtr.
ThreeOps_match< Val_t, Elt_t, Idx_t, Instruction::InsertElement > m_InsertElt(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx)
Matches InsertElementInst.
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
cst_pred_ty< icmp_pred_with_threshold > m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold)
Match an integer or vector with every element comparing 'pred' (eg/ne/...) to Threshold.
This is an optimization pass for GlobalISel generic memory operations.
Constant * ConstantFoldSelectInstruction(Constant *Cond, Constant *V1, Constant *V2)
Attempt to constant fold a select instruction with the specified operands.
unsigned Log2_64_Ceil(uint64_t Value)
Return the ceil log base 2 of the specified value, 64 if the value is zero.
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
@ And
Bitwise or logical AND of integers.
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
constexpr unsigned BitWidth
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, unsigned Depth, const SimplifyQuery &SQ)
Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.
Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static const fltSemantics & IEEEsingle() LLVM_READNONE
static constexpr roundingMode rmNearestTiesToEven
static const fltSemantics & IEEEdouble() LLVM_READNONE
static const fltSemantics & IEEEhalf() LLVM_READNONE
static const fltSemantics & BFloat() LLVM_READNONE
static unsigned int semanticsIntSizeInBits(const fltSemantics &, bool)
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
SimplifyQuery getWithInstruction(const Instruction *I) const