32#include "llvm/Config/config.h"
46#include "llvm/IR/IntrinsicsAArch64.h"
47#include "llvm/IR/IntrinsicsAMDGPU.h"
48#include "llvm/IR/IntrinsicsARM.h"
49#include "llvm/IR/IntrinsicsNVPTX.h"
50#include "llvm/IR/IntrinsicsWebAssembly.h"
51#include "llvm/IR/IntrinsicsX86.h"
69 "disable-fp-call-folding",
70 cl::desc(
"Disable constant-folding of FP intrinsics and libcalls."),
85 unsigned BitShift =
DL.getTypeSizeInBits(SrcEltTy);
86 for (
unsigned i = 0; i != NumSrcElts; ++i) {
88 if (
DL.isLittleEndian())
89 Element =
C->getAggregateElement(NumSrcElts - i - 1);
91 Element =
C->getAggregateElement(i);
103 Result |= ElementCI->getValue().zext(
Result.getBitWidth());
116static bool foldMixesPoisonBits(
Constant *
C,
unsigned NumSrcElt,
117 unsigned NumDstElt) {
120 if (NumSrcElt % NumDstElt != 0)
121 return C->containsPoisonElement();
122 unsigned Ratio = NumSrcElt / NumDstElt;
123 for (
unsigned i = 0; i != NumSrcElt; i += Ratio) {
124 bool HasPoison =
false;
125 bool HasNonPoison =
false;
126 for (
unsigned j = 0;
j != Ratio; ++
j) {
127 Constant *Src =
C->getAggregateElement(i + j);
136 if (HasPoison && HasNonPoison)
146static bool computePoisonDstLanes(
Constant *
C,
unsigned NumSrcElt,
151 if ((NumDstElt < NumSrcElt ? NumSrcElt % NumDstElt : NumDstElt % NumSrcElt))
152 return !
C->containsPoisonElement();
153 if (NumDstElt < NumSrcElt) {
154 unsigned Ratio = NumSrcElt / NumDstElt;
155 for (
unsigned i = 0; i != NumDstElt; ++i) {
156 for (
unsigned j = 0;
j != Ratio; ++
j) {
157 Constant *Src =
C->getAggregateElement(i * Ratio + j);
161 PoisonDstElts[i] =
true;
167 unsigned Ratio = NumDstElt / NumSrcElt;
168 for (
unsigned i = 0; i != NumSrcElt; ++i) {
169 Constant *Src =
C->getAggregateElement(i);
173 PoisonDstElts.
set(i * Ratio, (i + 1) * Ratio);
184 "Invalid constantexpr bitcast!");
194 Type *SrcEltTy = VTy->getElementType();
198 if (SrcEltTy->
isByteTy() &&
C->containsPoisonElement())
212 if (
Constant *CE = foldConstVectorToAPInt(Result, DestTy,
C,
213 SrcEltTy, NumSrcElts,
DL))
217 return ConstantInt::get(DestTy, Result);
250 if (NumDstElt == NumSrcElt)
254 Type *DstEltTy = DestVTy->getElementType();
283 if (NumDstElt < NumSrcElt && foldMixesPoisonBits(
C, NumSrcElt, NumDstElt))
304 "Constant folding cannot fail for plain fp->int bitcast!");
313 if (!computePoisonDstLanes(
C, NumSrcElt, NumDstElt, PoisonDstElts))
323 "Constant folding cannot fail for plain byte->int bitcast!");
330 bool isLittleEndian =
DL.isLittleEndian();
336 APInt Buffer(2 * std::max(SrcBitSize, DstBitSize), 0);
337 APInt UndefMask(Buffer.getBitWidth(), 0);
338 APInt PoisonMask(Buffer.getBitWidth(), 0);
339 unsigned BufferBitSize = 0;
341 while (
Result.size() != NumDstElt) {
343 while (BufferBitSize < DstBitSize) {
344 Constant *Element =
C->getAggregateElement(SrcElt++);
349 if (!isLittleEndian) {
350 Buffer <<= SrcBitSize;
351 UndefMask <<= SrcBitSize;
352 PoisonMask <<= SrcBitSize;
356 unsigned BitPosition = isLittleEndian ? BufferBitSize : 0;
359 UndefMask.setBits(BitPosition, BitPosition + SrcBitSize);
361 PoisonMask.setBits(BitPosition, BitPosition + SrcBitSize);
367 SrcValue = Src->getValue();
371 Buffer.insertBits(SrcValue, BitPosition);
372 BufferBitSize += SrcBitSize;
376 while (BufferBitSize >= DstBitSize) {
377 unsigned ShiftAmt = isLittleEndian ? 0 : BufferBitSize - DstBitSize;
379 if (UndefMask.extractBits(DstBitSize, ShiftAmt).isAllOnes()) {
381 if (!PoisonMask.extractBits(DstBitSize, ShiftAmt).isZero()) {
389 Result.push_back(ConstantInt::get(DstEltTy, Elt));
393 if (isLittleEndian) {
394 Buffer.lshrInPlace(DstBitSize);
395 UndefMask.lshrInPlace(DstBitSize);
396 PoisonMask.lshrInPlace(DstBitSize);
398 BufferBitSize -= DstBitSize;
403 for (
unsigned I : PoisonDstElts.
set_bits())
428 *DSOEquiv = FoundDSOEquiv;
429 GV = FoundDSOEquiv->getGlobalValue();
437 if (!CE)
return false;
440 if (CE->getOpcode() == Instruction::PtrToInt ||
441 CE->getOpcode() == Instruction::PtrToAddr)
450 unsigned BitWidth =
DL.getIndexTypeSizeInBits(
GEP->getType());
459 if (!
GEP->accumulateConstantOffset(
DL, TmpOffset))
469 Type *SrcTy =
C->getType();
473 TypeSize DestSize =
DL.getTypeSizeInBits(DestTy);
474 TypeSize SrcSize =
DL.getTypeSizeInBits(SrcTy);
486 if (SrcSize == DestSize &&
487 DL.isNonIntegralPointerType(SrcTy->getScalarType()) ==
493 Cast = Instruction::IntToPtr;
494 else if (SrcTy->isPointerTy() && DestTy->
isIntegerTy())
495 Cast = Instruction::PtrToInt;
503 if (!SrcTy->isAggregateType() && !SrcTy->isVectorTy())
510 if (SrcTy->isStructTy()) {
516 ElemC =
C->getAggregateElement(Elem++);
517 }
while (ElemC &&
DL.getTypeSizeInBits(ElemC->
getType()).isZero());
523 if (!
DL.typeSizeEqualsStoreSize(VT->getElementType()))
526 C =
C->getAggregateElement(0u);
541 assert(ByteOffset <=
DL.getTypeAllocSize(
C->getType()) &&
542 "Out of range access");
545 if (ByteOffset >=
DL.getTypeStoreSize(
C->getType()))
554 if (CI && CI->getType()->isIntegerTy()) {
555 if ((CI->getBitWidth() & 7) != 0)
557 const APInt &Val = CI->getValue();
558 unsigned IntBytes =
unsigned(CI->getBitWidth()/8);
560 for (
unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
561 unsigned n = ByteOffset;
562 if (!
DL.isLittleEndian())
563 n = IntBytes - n - 1;
571 if (CFP && CFP->getType()->isFloatingPointTy()) {
572 if (CFP->getType()->isDoubleTy()) {
574 return ReadDataFromGlobal(
C, ByteOffset, CurPtr, BytesLeft,
DL);
576 if (CFP->getType()->isFloatTy()){
578 return ReadDataFromGlobal(
C, ByteOffset, CurPtr, BytesLeft,
DL);
580 if (CFP->getType()->isHalfTy()){
582 return ReadDataFromGlobal(
C, ByteOffset, CurPtr, BytesLeft,
DL);
591 ByteOffset -= CurEltOffset;
596 uint64_t EltSize =
DL.getTypeAllocSize(CS->getOperand(Index)->getType());
598 if (ByteOffset < EltSize &&
599 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
606 if (Index == CS->getType()->getNumElements())
612 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
616 CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
617 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
619 CurEltOffset = NextEltOffset;
630 NumElts = AT->getNumElements();
631 EltTy = AT->getElementType();
632 EltSize =
DL.getTypeAllocSize(EltTy);
638 if (!
DL.typeSizeEqualsStoreSize(EltTy))
641 EltSize =
DL.getTypeStoreSize(EltTy);
643 uint64_t Index = ByteOffset / EltSize;
646 for (; Index != NumElts; ++Index) {
647 if (!ReadDataFromGlobal(
C->getAggregateElement(Index),
Offset, CurPtr,
652 assert(BytesWritten <= EltSize &&
"Not indexing into this element?");
653 if (BytesWritten >= BytesLeft)
657 BytesLeft -= BytesWritten;
658 CurPtr += BytesWritten;
664 if (
CE->getOpcode() == Instruction::IntToPtr &&
665 CE->getOperand(0)->getType() ==
DL.getIntPtrType(
CE->getType())) {
666 return ReadDataFromGlobal(
CE->getOperand(0), ByteOffset, CurPtr,
697 DL.getTypeSizeInBits(LoadTy).getFixedValue());
699 FoldReinterpretLoadFromConst(
C, MapTy, OrigLoadTy,
Offset,
DL)) {
719 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
721 if (BytesLoaded > 128 || BytesLoaded == 0)
730 if (
Offset <= -1 *
static_cast<int64_t
>(BytesLoaded))
734 TypeSize InitializerSize =
DL.getTypeAllocSize(
C->getType());
743 unsigned char *CurPtr = RawBytes.data();
744 unsigned BytesLeft = BytesLoaded;
753 if (!ReadDataFromGlobal(
C,
Offset, CurPtr, BytesLeft,
DL))
756 APInt ResultVal =
APInt(IntType->getBitWidth(), 0);
757 if (
DL.isLittleEndian()) {
758 ResultVal = RawBytes[BytesLoaded - 1];
759 for (
unsigned i = 1; i != BytesLoaded; ++i) {
761 ResultVal |= RawBytes[BytesLoaded - 1 - i];
764 ResultVal = RawBytes[0];
765 for (
unsigned i = 1; i != BytesLoaded; ++i) {
767 ResultVal |= RawBytes[i];
771 return ConstantInt::get(IntType->getContext(), ResultVal);
791 if (NBytes > UINT16_MAX)
799 unsigned char *CurPtr = RawBytes.
data();
801 if (!ReadDataFromGlobal(
Init,
Offset, CurPtr, NBytes,
DL))
819 if (!
Offset.isZero() || !Indices[0].isZero())
824 if (Index.isNegative() || Index.getActiveBits() >= 32)
827 C =
C->getAggregateElement(Index.getZExtValue());
853 if (
Offset.getSignificantBits() <= 64)
855 FoldReinterpretLoadFromConst(
C, Ty, Ty,
Offset.getSExtValue(),
DL))
872 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
902 if (!
DL.typeSizeEqualsStoreSize(
C->getType()))
904 if (
C->isNullValue() && !Ty->isX86_AMXTy())
906 if (
C->isAllOnesValue() &&
907 (Ty->isIntOrIntVectorTy() || Ty->isFPOrFPVectorTy()))
926 if (
Opc == Instruction::And) {
929 if ((Known1.
One | Known0.
Zero).isAllOnes()) {
933 if ((Known0.
One | Known1.
Zero).isAllOnes()) {
945 if (
Opc == Instruction::Sub) {
951 unsigned OpSize =
DL.getTypeSizeInBits(Op0->
getType());
968 std::optional<ConstantRange>
InRange,
970 Type *IntIdxTy =
DL.getIndexType(ResultTy);
975 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i) {
978 SrcElemTy,
Ops.slice(1, i - 1)))) &&
979 Ops[i]->getType()->getScalarType() != IntIdxScalarTy) {
982 Ops[i]->getType()->isVectorTy() ? IntIdxTy : IntIdxScalarTy;
1006 Type *SrcElemTy =
GEP->getSourceElementType();
1011 if (
Constant *
C = CastGEPIndices(SrcElemTy,
Ops, ResTy,
GEP->getNoWrapFlags(),
1012 GEP->getInRange(),
DL, TLI))
1021 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i)
1025 unsigned BitWidth =
DL.getTypeSizeInBits(IntIdxTy);
1028 DL.getIndexedOffsetInType(
1032 std::optional<ConstantRange>
InRange =
GEP->getInRange();
1038 bool Overflow =
false;
1040 NW &=
GEP->getNoWrapFlags();
1045 bool AllConstantInt =
true;
1046 for (
Value *NestedOp : NestedOps)
1048 AllConstantInt =
false;
1051 if (!AllConstantInt)
1055 if (
auto GEPRange =
GEP->getInRange()) {
1056 auto AdjustedGEPRange = GEPRange->sextOrTrunc(
BitWidth).subtract(
Offset);
1058 InRange ?
InRange->intersectWith(AdjustedGEPRange) : AdjustedGEPRange;
1062 SrcElemTy =
GEP->getSourceElementType();
1076 APInt BaseIntVal(
DL.getPointerTypeSizeInBits(Ptr->
getType()), 0);
1078 if (
CE->getOpcode() == Instruction::IntToPtr) {
1080 BaseIntVal =
Base->getValue().zextOrTrunc(BaseIntVal.getBitWidth());
1085 !
DL.mustNotIntroduceIntToPtr(Ptr->
getType())) {
1096 bool CanBeNull, CanBeFreed;
1099 if (DerefBytes != 0 && !CanBeNull &&
Offset.sle(DerefBytes))
1118Constant *ConstantFoldInstOperandsImpl(
const Value *InstOrCE,
unsigned Opcode,
1122 bool AllowNonDeterministic) {
1132 case Instruction::FAdd:
1133 case Instruction::FSub:
1134 case Instruction::FMul:
1135 case Instruction::FDiv:
1136 case Instruction::FRem:
1142 AllowNonDeterministic);
1152 Type *SrcElemTy =
GEP->getSourceElementType();
1160 GEP->getNoWrapFlags(),
1165 return CE->getWithOperands(
Ops);
1168 default:
return nullptr;
1169 case Instruction::ICmp:
1170 case Instruction::FCmp: {
1175 case Instruction::Freeze:
1177 case Instruction::Call:
1182 AllowNonDeterministic);
1185 case Instruction::Select:
1187 case Instruction::ExtractElement:
1189 case Instruction::ExtractValue:
1192 case Instruction::InsertElement:
1194 case Instruction::InsertValue:
1197 case Instruction::ShuffleVector:
1200 case Instruction::Load: {
1202 if (LI->isVolatile())
1225 for (
const Use &OldU :
C->operands()) {
1231 auto It = FoldedOps.
find(OldC);
1232 if (It == FoldedOps.
end()) {
1233 NewC = ConstantFoldConstantImpl(OldC,
DL, TLI, FoldedOps);
1234 FoldedOps.
insert({OldC, NewC});
1239 Ops.push_back(NewC);
1243 if (
Constant *Res = ConstantFoldInstOperandsImpl(
1244 CE,
CE->getOpcode(),
Ops,
DL, TLI,
true))
1263 for (
Value *Incoming : PN->incoming_values()) {
1275 C = ConstantFoldConstantImpl(
C,
DL, TLI, FoldedOps);
1278 if (CommonValue &&
C != CommonValue)
1289 if (!
all_of(
I->operands(), [](
const Use &U) { return isa<Constant>(U); }))
1294 for (
const Use &OpU :
I->operands()) {
1297 Op = ConstantFoldConstantImpl(
Op,
DL, TLI, FoldedOps);
1307 return ConstantFoldConstantImpl(
C,
DL, TLI, FoldedOps);
1314 bool AllowNonDeterministic) {
1315 return ConstantFoldInstOperandsImpl(
I,
I->getOpcode(),
Ops,
DL, TLI,
1316 AllowNonDeterministic);
1335 if (CE0->getOpcode() == Instruction::IntToPtr) {
1336 Type *IntPtrTy =
DL.getIntPtrType(CE0->getType());
1348 if (CE0->getOpcode() == Instruction::PtrToInt ||
1349 CE0->getOpcode() == Instruction::PtrToAddr) {
1350 Type *AddrTy =
DL.getAddressType(CE0->getOperand(0)->getType());
1351 if (CE0->getType() == AddrTy) {
1360 if (CE0->getOpcode() == CE1->getOpcode()) {
1361 if (CE0->getOpcode() == Instruction::IntToPtr) {
1362 Type *IntPtrTy =
DL.getIntPtrType(CE0->getType());
1376 if (CE0->getOpcode() == Instruction::PtrToInt ||
1377 CE0->getOpcode() == Instruction::PtrToAddr) {
1378 Type *AddrTy =
DL.getAddressType(CE0->getOperand(0)->getType());
1379 if (CE0->getType() == AddrTy &&
1380 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
1382 Predicate, CE0->getOperand(0), CE1->getOperand(0),
DL, TLI);
1394 unsigned IndexWidth =
DL.getIndexTypeSizeInBits(Ops0->
getType());
1395 APInt Offset0(IndexWidth, 0);
1398 DL, Offset0, IsEqPred,
1401 APInt Offset1(IndexWidth, 0);
1403 DL, Offset1, IsEqPred,
1406 if (Stripped0 == Stripped1)
1445 if (
Constant *
C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS,
DL))
1459 return ConstantFP::get(Ty, APF);
1461 return ConstantFP::get(
1478 Ty->getScalarType()->getFltSemantics());
1490 IsOutput ?
Mode.Output :
Mode.Input);
1519 for (
unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1541 for (
unsigned I = 0, E = CDV->getNumElements();
I < E; ++
I) {
1542 const APFloat &Elt = CDV->getElementAsAPFloat(
I);
1544 NewElts.
push_back(ConstantFP::get(Ty, Elt));
1564 bool AllowNonDeterministic) {
1577 if (!AllowNonDeterministic)
1579 if (
FP->hasNoSignedZeros() ||
FP->hasAllowReassoc() ||
1580 FP->hasAllowContract() ||
FP->hasAllowReciprocal())
1594 if (!AllowNonDeterministic &&
C->isNaN())
1613 C->getType(), DestTy, &
DL))
1619 case Instruction::PtrToAddr:
1620 case Instruction::PtrToInt:
1625 if (CE->getOpcode() == Instruction::IntToPtr) {
1627 Type *MidTy = Opcode == Instruction::PtrToInt
1628 ?
DL.getAddressType(CE->getType())
1629 :
DL.getIntPtrType(CE->getType());
1636 unsigned BitWidth =
DL.getIndexTypeSizeInBits(
GEP->getType());
1639 DL, BaseOffset,
true));
1640 if (
Base->isNullValue()) {
1641 FoldedValue = ConstantInt::get(CE->getContext(), BaseOffset);
1645 if (
GEP->getNumIndices() == 1 &&
1646 GEP->getSourceElementType()->isIntegerTy(8)) {
1650 if (
Sub &&
Sub->getType() == IntIdxTy &&
1651 Sub->getOpcode() == Instruction::Sub &&
1652 Sub->getOperand(0)->isNullValue())
1655 Sub->getOperand(1));
1666 case Instruction::IntToPtr:
1672 if (CE->getOpcode() == Instruction::PtrToInt) {
1673 Constant *SrcPtr = CE->getOperand(0);
1674 unsigned SrcPtrSize =
DL.getPointerTypeSizeInBits(SrcPtr->
getType());
1675 unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1677 if (MidIntSize >= SrcPtrSize) {
1685 case Instruction::Trunc:
1686 case Instruction::ZExt:
1687 case Instruction::SExt:
1688 case Instruction::FPTrunc:
1689 case Instruction::FPExt:
1690 case Instruction::UIToFP:
1691 case Instruction::SIToFP:
1692 case Instruction::FPToUI:
1693 case Instruction::FPToSI:
1694 case Instruction::AddrSpaceCast:
1696 case Instruction::BitCast:
1707 Type *SrcTy =
C->getType();
1708 if (SrcTy == DestTy)
1722 if (
Call->isNoBuiltin())
1724 if (
Call->getFunctionType() !=
F->getFunctionType())
1733 return Arg.getType()->isFloatingPointTy();
1737 switch (
F->getIntrinsicID()) {
1740 case Intrinsic::bswap:
1741 case Intrinsic::ctpop:
1742 case Intrinsic::ctlz:
1743 case Intrinsic::cttz:
1744 case Intrinsic::fshl:
1745 case Intrinsic::fshr:
1746 case Intrinsic::launder_invariant_group:
1747 case Intrinsic::strip_invariant_group:
1748 case Intrinsic::masked_load:
1749 case Intrinsic::get_active_lane_mask:
1750 case Intrinsic::abs:
1751 case Intrinsic::smax:
1752 case Intrinsic::smin:
1753 case Intrinsic::umax:
1754 case Intrinsic::umin:
1755 case Intrinsic::scmp:
1756 case Intrinsic::ucmp:
1757 case Intrinsic::sadd_with_overflow:
1758 case Intrinsic::uadd_with_overflow:
1759 case Intrinsic::ssub_with_overflow:
1760 case Intrinsic::usub_with_overflow:
1761 case Intrinsic::smul_with_overflow:
1762 case Intrinsic::umul_with_overflow:
1763 case Intrinsic::sadd_sat:
1764 case Intrinsic::uadd_sat:
1765 case Intrinsic::ssub_sat:
1766 case Intrinsic::usub_sat:
1767 case Intrinsic::smul_fix:
1768 case Intrinsic::smul_fix_sat:
1769 case Intrinsic::bitreverse:
1770 case Intrinsic::is_constant:
1771 case Intrinsic::vector_reduce_add:
1772 case Intrinsic::vector_reduce_mul:
1773 case Intrinsic::vector_reduce_and:
1774 case Intrinsic::vector_reduce_or:
1775 case Intrinsic::vector_reduce_xor:
1776 case Intrinsic::vector_reduce_smin:
1777 case Intrinsic::vector_reduce_smax:
1778 case Intrinsic::vector_reduce_umin:
1779 case Intrinsic::vector_reduce_umax:
1780 case Intrinsic::vector_extract:
1781 case Intrinsic::vector_insert:
1782 case Intrinsic::vector_interleave2:
1783 case Intrinsic::vector_interleave3:
1784 case Intrinsic::vector_interleave4:
1785 case Intrinsic::vector_interleave5:
1786 case Intrinsic::vector_interleave6:
1787 case Intrinsic::vector_interleave7:
1788 case Intrinsic::vector_interleave8:
1789 case Intrinsic::vector_deinterleave2:
1790 case Intrinsic::vector_deinterleave3:
1791 case Intrinsic::vector_deinterleave4:
1792 case Intrinsic::vector_deinterleave5:
1793 case Intrinsic::vector_deinterleave6:
1794 case Intrinsic::vector_deinterleave7:
1795 case Intrinsic::vector_deinterleave8:
1797 case Intrinsic::amdgcn_perm:
1798 case Intrinsic::amdgcn_wave_reduce_umin:
1799 case Intrinsic::amdgcn_wave_reduce_umax:
1800 case Intrinsic::amdgcn_wave_reduce_max:
1801 case Intrinsic::amdgcn_wave_reduce_min:
1802 case Intrinsic::amdgcn_wave_reduce_and:
1803 case Intrinsic::amdgcn_wave_reduce_or:
1804 case Intrinsic::amdgcn_s_wqm:
1805 case Intrinsic::amdgcn_s_quadmask:
1806 case Intrinsic::amdgcn_s_bitreplicate:
1807 case Intrinsic::arm_mve_vctp8:
1808 case Intrinsic::arm_mve_vctp16:
1809 case Intrinsic::arm_mve_vctp32:
1810 case Intrinsic::arm_mve_vctp64:
1811 case Intrinsic::aarch64_sve_convert_from_svbool:
1812 case Intrinsic::wasm_alltrue:
1813 case Intrinsic::wasm_anytrue:
1814 case Intrinsic::wasm_dot:
1816 case Intrinsic::wasm_trunc_signed:
1817 case Intrinsic::wasm_trunc_unsigned:
1822 case Intrinsic::minnum:
1823 case Intrinsic::maxnum:
1824 case Intrinsic::minimum:
1825 case Intrinsic::maximum:
1826 case Intrinsic::minimumnum:
1827 case Intrinsic::maximumnum:
1828 case Intrinsic::log:
1829 case Intrinsic::log2:
1830 case Intrinsic::log10:
1831 case Intrinsic::exp:
1832 case Intrinsic::exp2:
1833 case Intrinsic::exp10:
1834 case Intrinsic::sqrt:
1835 case Intrinsic::sin:
1836 case Intrinsic::cos:
1837 case Intrinsic::sincos:
1838 case Intrinsic::sinh:
1839 case Intrinsic::cosh:
1840 case Intrinsic::atan:
1841 case Intrinsic::pow:
1842 case Intrinsic::powi:
1843 case Intrinsic::ldexp:
1844 case Intrinsic::fma:
1845 case Intrinsic::fmuladd:
1846 case Intrinsic::frexp:
1847 case Intrinsic::fptoui_sat:
1848 case Intrinsic::fptosi_sat:
1849 case Intrinsic::amdgcn_cos:
1850 case Intrinsic::amdgcn_cubeid:
1851 case Intrinsic::amdgcn_cubema:
1852 case Intrinsic::amdgcn_cubesc:
1853 case Intrinsic::amdgcn_cubetc:
1854 case Intrinsic::amdgcn_fmul_legacy:
1855 case Intrinsic::amdgcn_fma_legacy:
1856 case Intrinsic::amdgcn_fract:
1857 case Intrinsic::amdgcn_sin:
1859 case Intrinsic::x86_sse_cvtss2si:
1860 case Intrinsic::x86_sse_cvtss2si64:
1861 case Intrinsic::x86_sse_cvttss2si:
1862 case Intrinsic::x86_sse_cvttss2si64:
1863 case Intrinsic::x86_sse2_cvtsd2si:
1864 case Intrinsic::x86_sse2_cvtsd2si64:
1865 case Intrinsic::x86_sse2_cvttsd2si:
1866 case Intrinsic::x86_sse2_cvttsd2si64:
1867 case Intrinsic::x86_avx512_vcvtss2si32:
1868 case Intrinsic::x86_avx512_vcvtss2si64:
1869 case Intrinsic::x86_avx512_cvttss2si:
1870 case Intrinsic::x86_avx512_cvttss2si64:
1871 case Intrinsic::x86_avx512_vcvtsd2si32:
1872 case Intrinsic::x86_avx512_vcvtsd2si64:
1873 case Intrinsic::x86_avx512_cvttsd2si:
1874 case Intrinsic::x86_avx512_cvttsd2si64:
1875 case Intrinsic::x86_avx512_vcvtss2usi32:
1876 case Intrinsic::x86_avx512_vcvtss2usi64:
1877 case Intrinsic::x86_avx512_cvttss2usi:
1878 case Intrinsic::x86_avx512_cvttss2usi64:
1879 case Intrinsic::x86_avx512_vcvtsd2usi32:
1880 case Intrinsic::x86_avx512_vcvtsd2usi64:
1881 case Intrinsic::x86_avx512_cvttsd2usi:
1882 case Intrinsic::x86_avx512_cvttsd2usi64:
1885 case Intrinsic::nvvm_fmax_d:
1886 case Intrinsic::nvvm_fmax_f:
1887 case Intrinsic::nvvm_fmax_ftz_f:
1888 case Intrinsic::nvvm_fmax_ftz_nan_f:
1889 case Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f:
1890 case Intrinsic::nvvm_fmax_ftz_xorsign_abs_f:
1891 case Intrinsic::nvvm_fmax_nan_f:
1892 case Intrinsic::nvvm_fmax_nan_xorsign_abs_f:
1893 case Intrinsic::nvvm_fmax_xorsign_abs_f:
1896 case Intrinsic::nvvm_fmin_d:
1897 case Intrinsic::nvvm_fmin_f:
1898 case Intrinsic::nvvm_fmin_ftz_f:
1899 case Intrinsic::nvvm_fmin_ftz_nan_f:
1900 case Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f:
1901 case Intrinsic::nvvm_fmin_ftz_xorsign_abs_f:
1902 case Intrinsic::nvvm_fmin_nan_f:
1903 case Intrinsic::nvvm_fmin_nan_xorsign_abs_f:
1904 case Intrinsic::nvvm_fmin_xorsign_abs_f:
1907 case Intrinsic::nvvm_f2i_rm:
1908 case Intrinsic::nvvm_f2i_rn:
1909 case Intrinsic::nvvm_f2i_rp:
1910 case Intrinsic::nvvm_f2i_rz:
1911 case Intrinsic::nvvm_f2i_rm_ftz:
1912 case Intrinsic::nvvm_f2i_rn_ftz:
1913 case Intrinsic::nvvm_f2i_rp_ftz:
1914 case Intrinsic::nvvm_f2i_rz_ftz:
1915 case Intrinsic::nvvm_f2ui_rm:
1916 case Intrinsic::nvvm_f2ui_rn:
1917 case Intrinsic::nvvm_f2ui_rp:
1918 case Intrinsic::nvvm_f2ui_rz:
1919 case Intrinsic::nvvm_f2ui_rm_ftz:
1920 case Intrinsic::nvvm_f2ui_rn_ftz:
1921 case Intrinsic::nvvm_f2ui_rp_ftz:
1922 case Intrinsic::nvvm_f2ui_rz_ftz:
1923 case Intrinsic::nvvm_d2i_rm:
1924 case Intrinsic::nvvm_d2i_rn:
1925 case Intrinsic::nvvm_d2i_rp:
1926 case Intrinsic::nvvm_d2i_rz:
1927 case Intrinsic::nvvm_d2ui_rm:
1928 case Intrinsic::nvvm_d2ui_rn:
1929 case Intrinsic::nvvm_d2ui_rp:
1930 case Intrinsic::nvvm_d2ui_rz:
1933 case Intrinsic::nvvm_f2ll_rm:
1934 case Intrinsic::nvvm_f2ll_rn:
1935 case Intrinsic::nvvm_f2ll_rp:
1936 case Intrinsic::nvvm_f2ll_rz:
1937 case Intrinsic::nvvm_f2ll_rm_ftz:
1938 case Intrinsic::nvvm_f2ll_rn_ftz:
1939 case Intrinsic::nvvm_f2ll_rp_ftz:
1940 case Intrinsic::nvvm_f2ll_rz_ftz:
1941 case Intrinsic::nvvm_f2ull_rm:
1942 case Intrinsic::nvvm_f2ull_rn:
1943 case Intrinsic::nvvm_f2ull_rp:
1944 case Intrinsic::nvvm_f2ull_rz:
1945 case Intrinsic::nvvm_f2ull_rm_ftz:
1946 case Intrinsic::nvvm_f2ull_rn_ftz:
1947 case Intrinsic::nvvm_f2ull_rp_ftz:
1948 case Intrinsic::nvvm_f2ull_rz_ftz:
1949 case Intrinsic::nvvm_d2ll_rm:
1950 case Intrinsic::nvvm_d2ll_rn:
1951 case Intrinsic::nvvm_d2ll_rp:
1952 case Intrinsic::nvvm_d2ll_rz:
1953 case Intrinsic::nvvm_d2ull_rm:
1954 case Intrinsic::nvvm_d2ull_rn:
1955 case Intrinsic::nvvm_d2ull_rp:
1956 case Intrinsic::nvvm_d2ull_rz:
1959 case Intrinsic::nvvm_ceil_d:
1960 case Intrinsic::nvvm_ceil_f:
1961 case Intrinsic::nvvm_ceil_ftz_f:
1963 case Intrinsic::nvvm_fabs:
1964 case Intrinsic::nvvm_fabs_ftz:
1966 case Intrinsic::nvvm_floor_d:
1967 case Intrinsic::nvvm_floor_f:
1968 case Intrinsic::nvvm_floor_ftz_f:
1970 case Intrinsic::nvvm_rcp_rm_d:
1971 case Intrinsic::nvvm_rcp_rm_f:
1972 case Intrinsic::nvvm_rcp_rm_ftz_f:
1973 case Intrinsic::nvvm_rcp_rn_d:
1974 case Intrinsic::nvvm_rcp_rn_f:
1975 case Intrinsic::nvvm_rcp_rn_ftz_f:
1976 case Intrinsic::nvvm_rcp_rp_d:
1977 case Intrinsic::nvvm_rcp_rp_f:
1978 case Intrinsic::nvvm_rcp_rp_ftz_f:
1979 case Intrinsic::nvvm_rcp_rz_d:
1980 case Intrinsic::nvvm_rcp_rz_f:
1981 case Intrinsic::nvvm_rcp_rz_ftz_f:
1983 case Intrinsic::nvvm_round_d:
1984 case Intrinsic::nvvm_round_f:
1985 case Intrinsic::nvvm_round_ftz_f:
1987 case Intrinsic::nvvm_saturate_d:
1988 case Intrinsic::nvvm_saturate_f:
1989 case Intrinsic::nvvm_saturate_ftz_f:
1991 case Intrinsic::nvvm_sqrt_f:
1992 case Intrinsic::nvvm_sqrt_rn_d:
1993 case Intrinsic::nvvm_sqrt_rn_f:
1994 case Intrinsic::nvvm_sqrt_rn_ftz_f:
1995 return !
Call->isStrictFP();
1998 case Intrinsic::nvvm_add_rm_d:
1999 case Intrinsic::nvvm_add_rn_d:
2000 case Intrinsic::nvvm_add_rp_d:
2001 case Intrinsic::nvvm_add_rz_d:
2002 case Intrinsic::nvvm_add_rm_f:
2003 case Intrinsic::nvvm_add_rn_f:
2004 case Intrinsic::nvvm_add_rp_f:
2005 case Intrinsic::nvvm_add_rz_f:
2006 case Intrinsic::nvvm_add_rm_ftz_f:
2007 case Intrinsic::nvvm_add_rn_ftz_f:
2008 case Intrinsic::nvvm_add_rp_ftz_f:
2009 case Intrinsic::nvvm_add_rz_ftz_f:
2012 case Intrinsic::nvvm_div_rm_d:
2013 case Intrinsic::nvvm_div_rn_d:
2014 case Intrinsic::nvvm_div_rp_d:
2015 case Intrinsic::nvvm_div_rz_d:
2016 case Intrinsic::nvvm_div_rm_f:
2017 case Intrinsic::nvvm_div_rn_f:
2018 case Intrinsic::nvvm_div_rp_f:
2019 case Intrinsic::nvvm_div_rz_f:
2020 case Intrinsic::nvvm_div_rm_ftz_f:
2021 case Intrinsic::nvvm_div_rn_ftz_f:
2022 case Intrinsic::nvvm_div_rp_ftz_f:
2023 case Intrinsic::nvvm_div_rz_ftz_f:
2026 case Intrinsic::nvvm_mul_rm_d:
2027 case Intrinsic::nvvm_mul_rn_d:
2028 case Intrinsic::nvvm_mul_rp_d:
2029 case Intrinsic::nvvm_mul_rz_d:
2030 case Intrinsic::nvvm_mul_rm_f:
2031 case Intrinsic::nvvm_mul_rn_f:
2032 case Intrinsic::nvvm_mul_rp_f:
2033 case Intrinsic::nvvm_mul_rz_f:
2034 case Intrinsic::nvvm_mul_rm_ftz_f:
2035 case Intrinsic::nvvm_mul_rn_ftz_f:
2036 case Intrinsic::nvvm_mul_rp_ftz_f:
2037 case Intrinsic::nvvm_mul_rz_ftz_f:
2040 case Intrinsic::nvvm_fma_rm_d:
2041 case Intrinsic::nvvm_fma_rn_d:
2042 case Intrinsic::nvvm_fma_rp_d:
2043 case Intrinsic::nvvm_fma_rz_d:
2044 case Intrinsic::nvvm_fma_rm_f:
2045 case Intrinsic::nvvm_fma_rn_f:
2046 case Intrinsic::nvvm_fma_rp_f:
2047 case Intrinsic::nvvm_fma_rz_f:
2048 case Intrinsic::nvvm_fma_rm_ftz_f:
2049 case Intrinsic::nvvm_fma_rn_ftz_f:
2050 case Intrinsic::nvvm_fma_rp_ftz_f:
2051 case Intrinsic::nvvm_fma_rz_ftz_f:
2055 case Intrinsic::fabs:
2056 case Intrinsic::copysign:
2057 case Intrinsic::is_fpclass:
2060 case Intrinsic::ceil:
2061 case Intrinsic::floor:
2062 case Intrinsic::round:
2063 case Intrinsic::roundeven:
2064 case Intrinsic::trunc:
2065 case Intrinsic::nearbyint:
2066 case Intrinsic::rint:
2067 case Intrinsic::canonicalize:
2071 case Intrinsic::experimental_constrained_fma:
2072 case Intrinsic::experimental_constrained_fmuladd:
2073 case Intrinsic::experimental_constrained_fadd:
2074 case Intrinsic::experimental_constrained_fsub:
2075 case Intrinsic::experimental_constrained_fmul:
2076 case Intrinsic::experimental_constrained_fdiv:
2077 case Intrinsic::experimental_constrained_frem:
2078 case Intrinsic::experimental_constrained_ceil:
2079 case Intrinsic::experimental_constrained_floor:
2080 case Intrinsic::experimental_constrained_round:
2081 case Intrinsic::experimental_constrained_roundeven:
2082 case Intrinsic::experimental_constrained_trunc:
2083 case Intrinsic::experimental_constrained_nearbyint:
2084 case Intrinsic::experimental_constrained_rint:
2085 case Intrinsic::experimental_constrained_fcmp:
2086 case Intrinsic::experimental_constrained_fcmps:
2088 case Intrinsic::experimental_cttz_elts:
2095 if (!
F->hasName() ||
Call->isStrictFP())
2107 return Name ==
"acos" || Name ==
"acosf" ||
2108 Name ==
"asin" || Name ==
"asinf" ||
2109 Name ==
"atan" || Name ==
"atanf" ||
2110 Name ==
"atan2" || Name ==
"atan2f";
2112 return Name ==
"ceil" || Name ==
"ceilf" ||
2113 Name ==
"cos" || Name ==
"cosf" ||
2114 Name ==
"cosh" || Name ==
"coshf";
2116 return Name ==
"exp" || Name ==
"expf" || Name ==
"exp2" ||
2117 Name ==
"exp2f" || Name ==
"erf" || Name ==
"erff";
2119 return Name ==
"fabs" || Name ==
"fabsf" ||
2120 Name ==
"floor" || Name ==
"floorf" ||
2121 Name ==
"fmod" || Name ==
"fmodf";
2123 return Name ==
"ilogb" || Name ==
"ilogbf";
2125 return Name ==
"log" || Name ==
"logf" || Name ==
"logl" ||
2126 Name ==
"log2" || Name ==
"log2f" || Name ==
"log10" ||
2127 Name ==
"log10f" || Name ==
"logb" || Name ==
"logbf" ||
2128 Name ==
"log1p" || Name ==
"log1pf";
2130 return Name ==
"nearbyint" || Name ==
"nearbyintf" || Name ==
"nextafter" ||
2131 Name ==
"nextafterf" || Name ==
"nexttoward" ||
2132 Name ==
"nexttowardf";
2134 return Name ==
"pow" || Name ==
"powf";
2136 return Name ==
"remainder" || Name ==
"remainderf" ||
2137 Name ==
"rint" || Name ==
"rintf" ||
2138 Name ==
"round" || Name ==
"roundf" ||
2139 Name ==
"roundeven" || Name ==
"roundevenf";
2141 return Name ==
"sin" || Name ==
"sinf" ||
2142 Name ==
"sinh" || Name ==
"sinhf" ||
2143 Name ==
"sqrt" || Name ==
"sqrtf";
2145 return Name ==
"tan" || Name ==
"tanf" ||
2146 Name ==
"tanh" || Name ==
"tanhf" ||
2147 Name ==
"trunc" || Name ==
"truncf";
2155 if (Name.size() < 12 || Name[1] !=
'_')
2161 return Name ==
"__acos_finite" || Name ==
"__acosf_finite" ||
2162 Name ==
"__asin_finite" || Name ==
"__asinf_finite" ||
2163 Name ==
"__atan2_finite" || Name ==
"__atan2f_finite";
2165 return Name ==
"__cosh_finite" || Name ==
"__coshf_finite";
2167 return Name ==
"__exp_finite" || Name ==
"__expf_finite" ||
2168 Name ==
"__exp2_finite" || Name ==
"__exp2f_finite";
2170 return Name ==
"__log_finite" || Name ==
"__logf_finite" ||
2171 Name ==
"__log10_finite" || Name ==
"__log10f_finite";
2173 return Name ==
"__pow_finite" || Name ==
"__powf_finite";
2175 return Name ==
"__sinh_finite" || Name ==
"__sinhf_finite";
2184 if (Ty->isHalfTy() || Ty->isFloatTy()) {
2188 return ConstantFP::get(Ty->getContext(), APF);
2190 if (Ty->isDoubleTy())
2191 return ConstantFP::get(Ty->getContext(),
APFloat(V));
2195#if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
2196Constant *GetConstantFoldFPValue128(float128 V,
Type *Ty) {
2197 if (Ty->isFP128Ty())
2198 return ConstantFP::get(Ty, V);
2204inline void llvm_fenv_clearexcept() {
2205#if HAVE_DECL_FE_ALL_EXCEPT
2206 feclearexcept(FE_ALL_EXCEPT);
2212inline bool llvm_fenv_testexcept() {
2213 int errno_val = errno;
2214 if (errno_val == ERANGE || errno_val == EDOM)
2216#if HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
2217 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
2239 switch (DenormKind) {
2243 return FTZPreserveSign(V);
2245 return FlushToPositiveZero(V);
2253 if (!DenormMode.isValid() ||
2258 llvm_fenv_clearexcept();
2259 auto Input = FlushWithDenormKind(V, DenormMode.Input);
2260 double Result = NativeFP(
Input.convertToDouble());
2261 if (llvm_fenv_testexcept()) {
2262 llvm_fenv_clearexcept();
2266 Constant *Output = GetConstantFoldFPValue(Result, Ty);
2269 const auto *CFP =
static_cast<ConstantFP *
>(Output);
2270 const auto Res = FlushWithDenormKind(CFP->getValueAPF(), DenormMode.Output);
2271 return ConstantFP::get(Ty->getContext(), Res);
2274#if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
2275Constant *ConstantFoldFP128(float128 (*NativeFP)(float128),
const APFloat &V,
2277 llvm_fenv_clearexcept();
2278 float128
Result = NativeFP(V.convertToQuad());
2279 if (llvm_fenv_testexcept()) {
2280 llvm_fenv_clearexcept();
2284 return GetConstantFoldFPValue128(Result, Ty);
2288Constant *ConstantFoldBinaryFP(
double (*NativeFP)(
double,
double),
2290 llvm_fenv_clearexcept();
2291 double Result = NativeFP(V.convertToDouble(),
W.convertToDouble());
2292 if (llvm_fenv_testexcept()) {
2293 llvm_fenv_clearexcept();
2297 return GetConstantFoldFPValue(Result, Ty);
2304 if (
Op->containsPoisonElement())
2308 if (
Constant *SplatVal =
Op->getSplatValue()) {
2310 case Intrinsic::vector_reduce_and:
2311 case Intrinsic::vector_reduce_or:
2312 case Intrinsic::vector_reduce_smin:
2313 case Intrinsic::vector_reduce_smax:
2314 case Intrinsic::vector_reduce_umin:
2315 case Intrinsic::vector_reduce_umax:
2317 case Intrinsic::vector_reduce_add:
2318 if (SplatVal->isNullValue())
2321 case Intrinsic::vector_reduce_mul:
2322 if (SplatVal->isNullValue() || SplatVal->isOneValue())
2325 case Intrinsic::vector_reduce_xor:
2326 if (SplatVal->isNullValue())
2328 if (OpVT->getElementCount().isKnownMultipleOf(2))
2343 APInt Acc = EltC->getValue();
2347 const APInt &
X = EltC->getValue();
2349 case Intrinsic::vector_reduce_add:
2352 case Intrinsic::vector_reduce_mul:
2355 case Intrinsic::vector_reduce_and:
2358 case Intrinsic::vector_reduce_or:
2361 case Intrinsic::vector_reduce_xor:
2364 case Intrinsic::vector_reduce_smin:
2367 case Intrinsic::vector_reduce_smax:
2370 case Intrinsic::vector_reduce_umin:
2373 case Intrinsic::vector_reduce_umax:
2379 return ConstantInt::get(
Op->getContext(), Acc);
2389Constant *ConstantFoldSSEConvertToInt(
const APFloat &Val,
bool roundTowardZero,
2390 Type *Ty,
bool IsSigned) {
2392 unsigned ResultWidth = Ty->getIntegerBitWidth();
2393 assert(ResultWidth <= 64 &&
2394 "Can only constant fold conversions to 64 and 32 bit ints");
2397 bool isExact =
false;
2402 IsSigned,
mode, &isExact);
2406 return ConstantInt::get(Ty, UIntVal, IsSigned);
2410 Type *Ty =
Op->getType();
2412 if (Ty->isBFloatTy() || Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy())
2413 return Op->getValueAPF().convertToDouble();
2423 C = &CI->getValue();
2482 return ConstantFP::get(
2487 if (!Ty->isIEEELikeFPTy())
2494 if (Src.isNormal() || Src.isInfinity())
2495 return ConstantFP::get(CI->
getContext(), Src);
2502 return ConstantFP::get(CI->
getContext(), Src);
2532 assert(Operands.
size() == 1 &&
"Wrong number of operands.");
2534 if (IntrinsicID == Intrinsic::is_constant) {
2538 if (Operands[0]->isManifestConstant())
2547 if (IntrinsicID == Intrinsic::cos ||
2548 IntrinsicID == Intrinsic::ctpop ||
2549 IntrinsicID == Intrinsic::fptoui_sat ||
2550 IntrinsicID == Intrinsic::fptosi_sat ||
2551 IntrinsicID == Intrinsic::canonicalize)
2553 if (IntrinsicID == Intrinsic::bswap ||
2554 IntrinsicID == Intrinsic::bitreverse ||
2555 IntrinsicID == Intrinsic::launder_invariant_group ||
2556 IntrinsicID == Intrinsic::strip_invariant_group)
2562 if (IntrinsicID == Intrinsic::launder_invariant_group ||
2563 IntrinsicID == Intrinsic::strip_invariant_group) {
2568 Call->getParent() ?
Call->getCaller() :
nullptr;
2581 if (IntrinsicID == Intrinsic::wasm_trunc_signed ||
2582 IntrinsicID == Intrinsic::wasm_trunc_unsigned) {
2583 bool Signed = IntrinsicID == Intrinsic::wasm_trunc_signed;
2588 unsigned Width = Ty->getIntegerBitWidth();
2590 bool IsExact =
false;
2595 return ConstantInt::get(Ty,
Int);
2600 if (IntrinsicID == Intrinsic::fptoui_sat ||
2601 IntrinsicID == Intrinsic::fptosi_sat) {
2604 IntrinsicID == Intrinsic::fptoui_sat);
2607 return ConstantInt::get(Ty,
Int);
2610 if (IntrinsicID == Intrinsic::canonicalize)
2611 return constantFoldCanonicalize(Ty,
Call, U);
2613#if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
2614 if (Ty->isFP128Ty()) {
2615 if (IntrinsicID == Intrinsic::log) {
2616 float128
Result = logf128(
Op->getValueAPF().convertToQuad());
2617 return GetConstantFoldFPValue128(Result, Ty);
2620 LibFunc Fp128Func = NotLibFunc;
2621 if (TLI && TLI->
getLibFunc(Name, Fp128Func) && TLI->
has(Fp128Func) &&
2622 Fp128Func == LibFunc_logl)
2623 return ConstantFoldFP128(logf128,
Op->getValueAPF(), Ty);
2627 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy() &&
2633 if (IntrinsicID == Intrinsic::nearbyint || IntrinsicID == Intrinsic::rint ||
2634 IntrinsicID == Intrinsic::roundeven) {
2636 return ConstantFP::get(Ty, U);
2639 if (IntrinsicID == Intrinsic::round) {
2641 return ConstantFP::get(Ty, U);
2644 if (IntrinsicID == Intrinsic::roundeven) {
2646 return ConstantFP::get(Ty, U);
2649 if (IntrinsicID == Intrinsic::ceil) {
2651 return ConstantFP::get(Ty, U);
2654 if (IntrinsicID == Intrinsic::floor) {
2656 return ConstantFP::get(Ty, U);
2659 if (IntrinsicID == Intrinsic::trunc) {
2661 return ConstantFP::get(Ty, U);
2664 if (IntrinsicID == Intrinsic::fabs) {
2666 return ConstantFP::get(Ty, U);
2669 if (IntrinsicID == Intrinsic::amdgcn_fract) {
2677 APFloat AlmostOne(U.getSemantics(), 1);
2678 AlmostOne.next(
true);
2679 return ConstantFP::get(Ty,
minimum(FractU, AlmostOne));
2685 std::optional<APFloat::roundingMode>
RM;
2686 switch (IntrinsicID) {
2689 case Intrinsic::experimental_constrained_nearbyint:
2690 case Intrinsic::experimental_constrained_rint: {
2692 RM = CI->getRoundingMode();
2697 case Intrinsic::experimental_constrained_round:
2700 case Intrinsic::experimental_constrained_ceil:
2703 case Intrinsic::experimental_constrained_floor:
2706 case Intrinsic::experimental_constrained_trunc:
2714 if (IntrinsicID == Intrinsic::experimental_constrained_rint &&
2716 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2720 }
else if (U.isSignaling()) {
2721 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2726 return ConstantFP::get(Ty, U);
2730 switch (IntrinsicID) {
2732 case Intrinsic::nvvm_f2i_rm:
2733 case Intrinsic::nvvm_f2i_rn:
2734 case Intrinsic::nvvm_f2i_rp:
2735 case Intrinsic::nvvm_f2i_rz:
2736 case Intrinsic::nvvm_f2i_rm_ftz:
2737 case Intrinsic::nvvm_f2i_rn_ftz:
2738 case Intrinsic::nvvm_f2i_rp_ftz:
2739 case Intrinsic::nvvm_f2i_rz_ftz:
2741 case Intrinsic::nvvm_f2ui_rm:
2742 case Intrinsic::nvvm_f2ui_rn:
2743 case Intrinsic::nvvm_f2ui_rp:
2744 case Intrinsic::nvvm_f2ui_rz:
2745 case Intrinsic::nvvm_f2ui_rm_ftz:
2746 case Intrinsic::nvvm_f2ui_rn_ftz:
2747 case Intrinsic::nvvm_f2ui_rp_ftz:
2748 case Intrinsic::nvvm_f2ui_rz_ftz:
2750 case Intrinsic::nvvm_d2i_rm:
2751 case Intrinsic::nvvm_d2i_rn:
2752 case Intrinsic::nvvm_d2i_rp:
2753 case Intrinsic::nvvm_d2i_rz:
2755 case Intrinsic::nvvm_d2ui_rm:
2756 case Intrinsic::nvvm_d2ui_rn:
2757 case Intrinsic::nvvm_d2ui_rp:
2758 case Intrinsic::nvvm_d2ui_rz:
2760 case Intrinsic::nvvm_f2ll_rm:
2761 case Intrinsic::nvvm_f2ll_rn:
2762 case Intrinsic::nvvm_f2ll_rp:
2763 case Intrinsic::nvvm_f2ll_rz:
2764 case Intrinsic::nvvm_f2ll_rm_ftz:
2765 case Intrinsic::nvvm_f2ll_rn_ftz:
2766 case Intrinsic::nvvm_f2ll_rp_ftz:
2767 case Intrinsic::nvvm_f2ll_rz_ftz:
2769 case Intrinsic::nvvm_f2ull_rm:
2770 case Intrinsic::nvvm_f2ull_rn:
2771 case Intrinsic::nvvm_f2ull_rp:
2772 case Intrinsic::nvvm_f2ull_rz:
2773 case Intrinsic::nvvm_f2ull_rm_ftz:
2774 case Intrinsic::nvvm_f2ull_rn_ftz:
2775 case Intrinsic::nvvm_f2ull_rp_ftz:
2776 case Intrinsic::nvvm_f2ull_rz_ftz:
2778 case Intrinsic::nvvm_d2ll_rm:
2779 case Intrinsic::nvvm_d2ll_rn:
2780 case Intrinsic::nvvm_d2ll_rp:
2781 case Intrinsic::nvvm_d2ll_rz:
2783 case Intrinsic::nvvm_d2ull_rm:
2784 case Intrinsic::nvvm_d2ull_rn:
2785 case Intrinsic::nvvm_d2ull_rp:
2786 case Intrinsic::nvvm_d2ull_rz: {
2792 return ConstantInt::get(Ty, 0);
2795 unsigned BitWidth = Ty->getIntegerBitWidth();
2805 APSInt ResInt(Ty->getIntegerBitWidth(), !IsSigned);
2806 auto FloatToRound = IsFTZ ? FTZPreserveSign(U) : U;
2810 bool IsExact =
false;
2811 FloatToRound.convertToInteger(ResInt, RMode, &IsExact);
2812 return ConstantInt::get(Ty, ResInt);
2828 switch (IntrinsicID) {
2830 case Intrinsic::log:
2835 if (U.isExactlyValue(1.0))
2837 return ConstantFoldFP(log, APF, Ty);
2838 case Intrinsic::log2:
2843 if (U.isExactlyValue(1.0))
2846 return ConstantFoldFP(
log2, APF, Ty);
2847 case Intrinsic::log10:
2852 if (U.isExactlyValue(1.0))
2855 return ConstantFoldFP(log10, APF, Ty);
2856 case Intrinsic::exp:
2857 return ConstantFoldFP(exp, APF, Ty);
2858 case Intrinsic::exp2:
2860 return ConstantFoldBinaryFP(pow,
APFloat(2.0), APF, Ty);
2861 case Intrinsic::exp10:
2863 return ConstantFoldBinaryFP(pow,
APFloat(10.0), APF, Ty);
2864 case Intrinsic::sin:
2865 return ConstantFoldFP(sin, APF, Ty);
2866 case Intrinsic::cos:
2867 return ConstantFoldFP(cos, APF, Ty);
2868 case Intrinsic::sinh:
2869 return ConstantFoldFP(sinh, APF, Ty);
2870 case Intrinsic::cosh:
2871 return ConstantFoldFP(cosh, APF, Ty);
2872 case Intrinsic::atan:
2875 return ConstantFP::get(Ty, U);
2876 return ConstantFoldFP(atan, APF, Ty);
2877 case Intrinsic::sqrt:
2878 return ConstantFoldFP(sqrt, APF, Ty);
2881 case Intrinsic::nvvm_ceil_ftz_f:
2882 case Intrinsic::nvvm_ceil_f:
2883 case Intrinsic::nvvm_ceil_d:
2884 return ConstantFoldFP(
2889 case Intrinsic::nvvm_fabs_ftz:
2890 case Intrinsic::nvvm_fabs:
2891 return ConstantFoldFP(
2896 case Intrinsic::nvvm_floor_ftz_f:
2897 case Intrinsic::nvvm_floor_f:
2898 case Intrinsic::nvvm_floor_d:
2899 return ConstantFoldFP(
2904 case Intrinsic::nvvm_rcp_rm_ftz_f:
2905 case Intrinsic::nvvm_rcp_rn_ftz_f:
2906 case Intrinsic::nvvm_rcp_rp_ftz_f:
2907 case Intrinsic::nvvm_rcp_rz_ftz_f:
2908 case Intrinsic::nvvm_rcp_rm_d:
2909 case Intrinsic::nvvm_rcp_rm_f:
2910 case Intrinsic::nvvm_rcp_rn_d:
2911 case Intrinsic::nvvm_rcp_rn_f:
2912 case Intrinsic::nvvm_rcp_rp_d:
2913 case Intrinsic::nvvm_rcp_rp_f:
2914 case Intrinsic::nvvm_rcp_rz_d:
2915 case Intrinsic::nvvm_rcp_rz_f: {
2919 auto Denominator = IsFTZ ? FTZPreserveSign(APF) : APF;
2925 Res = FTZPreserveSign(Res);
2926 return ConstantFP::get(Ty, Res);
2931 case Intrinsic::nvvm_round_ftz_f:
2932 case Intrinsic::nvvm_round_f:
2933 case Intrinsic::nvvm_round_d: {
2938 auto V = IsFTZ ? FTZPreserveSign(APF) : APF;
2940 return ConstantFP::get(Ty, V);
2943 case Intrinsic::nvvm_saturate_ftz_f:
2944 case Intrinsic::nvvm_saturate_d:
2945 case Intrinsic::nvvm_saturate_f: {
2947 auto V = IsFTZ ? FTZPreserveSign(APF) : APF;
2948 if (V.isNegative() || V.isZero() || V.isNaN())
2952 return ConstantFP::get(Ty, One);
2953 return ConstantFP::get(Ty, APF);
2956 case Intrinsic::nvvm_sqrt_rn_ftz_f:
2957 case Intrinsic::nvvm_sqrt_f:
2958 case Intrinsic::nvvm_sqrt_rn_d:
2959 case Intrinsic::nvvm_sqrt_rn_f:
2962 return ConstantFoldFP(
2968 case Intrinsic::amdgcn_cos:
2969 case Intrinsic::amdgcn_sin: {
2970 double V = getValueAsDouble(
Op);
2971 if (V < -256.0 || V > 256.0)
2976 bool IsCos = IntrinsicID == Intrinsic::amdgcn_cos;
2977 double V4 = V * 4.0;
2978 if (V4 == floor(V4)) {
2980 const double SinVals[4] = { 0.0, 1.0, 0.0, -1.0 };
2981 V = SinVals[((int)V4 + (IsCos ? 1 : 0)) & 3];
2988 return GetConstantFoldFPValue(V, Ty);
2995 LibFunc
Func = NotLibFunc;
3004 case LibFunc_acos_finite:
3005 case LibFunc_acosf_finite:
3007 return ConstantFoldFP(acos, APF, Ty);
3011 case LibFunc_asin_finite:
3012 case LibFunc_asinf_finite:
3014 return ConstantFoldFP(asin, APF, Ty);
3020 return ConstantFP::get(Ty, U);
3022 return ConstantFoldFP(atan, APF, Ty);
3026 if (TLI->
has(Func)) {
3028 return ConstantFP::get(Ty, U);
3034 return ConstantFoldFP(cos, APF, Ty);
3038 case LibFunc_cosh_finite:
3039 case LibFunc_coshf_finite:
3041 return ConstantFoldFP(cosh, APF, Ty);
3045 case LibFunc_exp_finite:
3046 case LibFunc_expf_finite:
3048 return ConstantFoldFP(exp, APF, Ty);
3052 case LibFunc_exp2_finite:
3053 case LibFunc_exp2f_finite:
3056 return ConstantFoldBinaryFP(pow,
APFloat(2.0), APF, Ty);
3060 if (TLI->
has(Func)) {
3062 return ConstantFP::get(Ty, U);
3066 case LibFunc_floorf:
3067 if (TLI->
has(Func)) {
3069 return ConstantFP::get(Ty, U);
3074 case LibFunc_log_finite:
3075 case LibFunc_logf_finite:
3077 return ConstantFoldFP(log, APF, Ty);
3081 case LibFunc_log2_finite:
3082 case LibFunc_log2f_finite:
3085 return ConstantFoldFP(
log2, APF, Ty);
3088 case LibFunc_log10f:
3089 case LibFunc_log10_finite:
3090 case LibFunc_log10f_finite:
3093 return ConstantFoldFP(log10, APF, Ty);
3096 case LibFunc_ilogbf:
3098 return ConstantInt::get(Ty,
ilogb(APF),
true);
3103 return ConstantFoldFP(logb, APF, Ty);
3106 case LibFunc_log1pf:
3109 return ConstantFP::get(Ty, U);
3111 return ConstantFoldFP(log1p, APF, Ty);
3118 return ConstantFoldFP(erf, APF, Ty);
3120 case LibFunc_nearbyint:
3121 case LibFunc_nearbyintf:
3124 case LibFunc_roundeven:
3125 case LibFunc_roundevenf:
3126 if (TLI->
has(Func)) {
3128 return ConstantFP::get(Ty, U);
3132 case LibFunc_roundf:
3133 if (TLI->
has(Func)) {
3135 return ConstantFP::get(Ty, U);
3141 return ConstantFoldFP(sin, APF, Ty);
3145 case LibFunc_sinh_finite:
3146 case LibFunc_sinhf_finite:
3148 return ConstantFoldFP(sinh, APF, Ty);
3153 return ConstantFoldFP(sqrt, APF, Ty);
3158 return ConstantFoldFP(tan, APF, Ty);
3163 return ConstantFoldFP(tanh, APF, Ty);
3166 case LibFunc_truncf:
3167 if (TLI->
has(Func)) {
3169 return ConstantFP::get(Ty, U);
3177 switch (IntrinsicID) {
3178 case Intrinsic::bswap:
3179 return ConstantInt::get(Ty->getContext(),
Op->getValue().byteSwap());
3180 case Intrinsic::ctpop:
3181 return ConstantInt::get(Ty,
Op->getValue().popcount());
3182 case Intrinsic::bitreverse:
3183 return ConstantInt::get(Ty->getContext(),
Op->getValue().reverseBits());
3184 case Intrinsic::amdgcn_s_wqm: {
3186 Val |= (Val & 0x5555555555555555ULL) << 1 |
3187 ((Val >> 1) & 0x5555555555555555ULL);
3188 Val |= (Val & 0x3333333333333333ULL) << 2 |
3189 ((Val >> 2) & 0x3333333333333333ULL);
3190 return ConstantInt::get(Ty, Val);
3193 case Intrinsic::amdgcn_s_quadmask: {
3196 for (
unsigned I = 0;
I <
Op->getBitWidth() / 4; ++
I, Val >>= 4) {
3200 QuadMask |= (1ULL <<
I);
3202 return ConstantInt::get(Ty, QuadMask);
3205 case Intrinsic::amdgcn_s_bitreplicate: {
3207 Val = (Val & 0x000000000000FFFFULL) | (Val & 0x00000000FFFF0000ULL) << 16;
3208 Val = (Val & 0x000000FF000000FFULL) | (Val & 0x0000FF000000FF00ULL) << 8;
3209 Val = (Val & 0x000F000F000F000FULL) | (Val & 0x00F000F000F000F0ULL) << 4;
3210 Val = (Val & 0x0303030303030303ULL) | (Val & 0x0C0C0C0C0C0C0C0CULL) << 2;
3211 Val = (Val & 0x1111111111111111ULL) | (Val & 0x2222222222222222ULL) << 1;
3212 Val = Val | Val << 1;
3213 return ConstantInt::get(Ty, Val);
3218 if (Operands[0]->
getType()->isVectorTy()) {
3220 switch (IntrinsicID) {
3222 case Intrinsic::vector_reduce_add:
3223 case Intrinsic::vector_reduce_mul:
3224 case Intrinsic::vector_reduce_and:
3225 case Intrinsic::vector_reduce_or:
3226 case Intrinsic::vector_reduce_xor:
3227 case Intrinsic::vector_reduce_smin:
3228 case Intrinsic::vector_reduce_smax:
3229 case Intrinsic::vector_reduce_umin:
3230 case Intrinsic::vector_reduce_umax:
3231 if (
Constant *
C = constantFoldVectorReduce(IntrinsicID, Operands[0]))
3234 case Intrinsic::x86_sse_cvtss2si:
3235 case Intrinsic::x86_sse_cvtss2si64:
3236 case Intrinsic::x86_sse2_cvtsd2si:
3237 case Intrinsic::x86_sse2_cvtsd2si64:
3240 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
3244 case Intrinsic::x86_sse_cvttss2si:
3245 case Intrinsic::x86_sse_cvttss2si64:
3246 case Intrinsic::x86_sse2_cvttsd2si:
3247 case Intrinsic::x86_sse2_cvttsd2si64:
3250 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
3255 case Intrinsic::wasm_anytrue:
3256 return Op->isNullValue() ? ConstantInt::get(Ty, 0)
3259 case Intrinsic::wasm_alltrue:
3262 for (
unsigned I = 0;
I !=
E; ++
I) {
3266 return ConstantInt::get(Ty, 0);
3272 return ConstantInt::get(Ty, 1);
3284 if (FCmp->isSignaling()) {
3293 return ConstantInt::get(
Call->getType()->getScalarType(), Result);
3298 const Type *RetTy) {
3299 assert(RetTy !=
nullptr);
3308 return ConstantFP::get(RetTy->
getContext(), Ret);
3316 assert(!LosesInfo &&
"Unexpected lossy promotion");
3326 return ConstantFP::get(RetTy->
getContext(), Ret);
3331 if (
Next.isZero() ||
Next.isDenormal() ||
Next.isSignaling())
3342 LibFunc
Func = NotLibFunc;
3354 const APFloat &Op1V = Op1->getValueAPF();
3355 const APFloat &Op2V = Op2->getValueAPF();
3362 case LibFunc_pow_finite:
3363 case LibFunc_powf_finite:
3365 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
3369 if (TLI->
has(Func)) {
3370 APFloat V = Op1->getValueAPF();
3372 return ConstantFP::get(Ty, V);
3375 case LibFunc_remainder:
3376 case LibFunc_remainderf:
3377 if (TLI->
has(Func)) {
3378 APFloat V = Op1->getValueAPF();
3380 return ConstantFP::get(Ty, V);
3384 case LibFunc_atan2f:
3390 case LibFunc_atan2_finite:
3391 case LibFunc_atan2f_finite:
3393 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
3395 case LibFunc_nextafter:
3396 case LibFunc_nextafterf:
3397 case LibFunc_nexttoward:
3398 case LibFunc_nexttowardf:
3400 return ConstantFoldNextToward(Op1V, Op2V, Ty);
3410 assert(Operands.
size() == 2 &&
"Wrong number of operands.");
3412 if (Ty->isFloatingPointTy()) {
3417 switch (IntrinsicID) {
3418 case Intrinsic::maxnum:
3419 case Intrinsic::minnum:
3420 case Intrinsic::maximum:
3421 case Intrinsic::minimum:
3422 case Intrinsic::maximumnum:
3423 case Intrinsic::minimumnum:
3424 case Intrinsic::nvvm_fmax_d:
3425 case Intrinsic::nvvm_fmin_d:
3433 case Intrinsic::nvvm_fmax_f:
3434 case Intrinsic::nvvm_fmax_ftz_f:
3435 case Intrinsic::nvvm_fmax_ftz_nan_f:
3436 case Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f:
3437 case Intrinsic::nvvm_fmax_ftz_xorsign_abs_f:
3438 case Intrinsic::nvvm_fmax_nan_f:
3439 case Intrinsic::nvvm_fmax_nan_xorsign_abs_f:
3440 case Intrinsic::nvvm_fmax_xorsign_abs_f:
3442 case Intrinsic::nvvm_fmin_f:
3443 case Intrinsic::nvvm_fmin_ftz_f:
3444 case Intrinsic::nvvm_fmin_ftz_nan_f:
3445 case Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f:
3446 case Intrinsic::nvvm_fmin_ftz_xorsign_abs_f:
3447 case Intrinsic::nvvm_fmin_nan_f:
3448 case Intrinsic::nvvm_fmin_nan_xorsign_abs_f:
3449 case Intrinsic::nvvm_fmin_xorsign_abs_f:
3453 if (!IsOp0Undef && !IsOp1Undef)
3457 APInt NVCanonicalNaN(32, 0x7fffffff);
3458 return ConstantFP::get(
3459 Ty,
APFloat(Ty->getFltSemantics(), NVCanonicalNaN));
3462 return ConstantFP::get(Ty, FTZPreserveSign(
Op->getValueAPF()));
3471 const APFloat &Op1V = Op1->getValueAPF();
3474 if (Op2->getType() != Op1->getType())
3476 const APFloat &Op2V = Op2->getValueAPF();
3478 if (
const auto *ConstrIntr =
3483 switch (IntrinsicID) {
3486 case Intrinsic::experimental_constrained_fadd:
3487 St = Res.
add(Op2V, RM);
3489 case Intrinsic::experimental_constrained_fsub:
3492 case Intrinsic::experimental_constrained_fmul:
3495 case Intrinsic::experimental_constrained_fdiv:
3496 St = Res.
divide(Op2V, RM);
3498 case Intrinsic::experimental_constrained_frem:
3501 case Intrinsic::experimental_constrained_fcmp:
3502 case Intrinsic::experimental_constrained_fcmps:
3503 return evaluateCompare(Op1V, Op2V, ConstrIntr);
3507 return ConstantFP::get(Ty, Res);
3511 switch (IntrinsicID) {
3514 case Intrinsic::copysign:
3516 case Intrinsic::minnum:
3517 return ConstantFP::get(Ty,
minnum(Op1V, Op2V));
3518 case Intrinsic::maxnum:
3519 return ConstantFP::get(Ty,
maxnum(Op1V, Op2V));
3520 case Intrinsic::minimum:
3521 return ConstantFP::get(Ty,
minimum(Op1V, Op2V));
3522 case Intrinsic::maximum:
3523 return ConstantFP::get(Ty,
maximum(Op1V, Op2V));
3524 case Intrinsic::minimumnum:
3525 return ConstantFP::get(Ty,
minimumnum(Op1V, Op2V));
3526 case Intrinsic::maximumnum:
3527 return ConstantFP::get(Ty,
maximumnum(Op1V, Op2V));
3529 case Intrinsic::nvvm_fmax_d:
3530 case Intrinsic::nvvm_fmax_f:
3531 case Intrinsic::nvvm_fmax_ftz_f:
3532 case Intrinsic::nvvm_fmax_ftz_nan_f:
3533 case Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f:
3534 case Intrinsic::nvvm_fmax_ftz_xorsign_abs_f:
3535 case Intrinsic::nvvm_fmax_nan_f:
3536 case Intrinsic::nvvm_fmax_nan_xorsign_abs_f:
3537 case Intrinsic::nvvm_fmax_xorsign_abs_f:
3539 case Intrinsic::nvvm_fmin_d:
3540 case Intrinsic::nvvm_fmin_f:
3541 case Intrinsic::nvvm_fmin_ftz_f:
3542 case Intrinsic::nvvm_fmin_ftz_nan_f:
3543 case Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f:
3544 case Intrinsic::nvvm_fmin_ftz_xorsign_abs_f:
3545 case Intrinsic::nvvm_fmin_nan_f:
3546 case Intrinsic::nvvm_fmin_nan_xorsign_abs_f:
3547 case Intrinsic::nvvm_fmin_xorsign_abs_f: {
3549 bool ShouldCanonicalizeNaNs = !(IntrinsicID == Intrinsic::nvvm_fmax_d ||
3550 IntrinsicID == Intrinsic::nvvm_fmin_d);
3555 APFloat A = IsFTZ ? FTZPreserveSign(Op1V) : Op1V;
3556 APFloat B = IsFTZ ? FTZPreserveSign(Op2V) : Op2V;
3558 bool XorSign =
false;
3560 XorSign =
A.isNegative() ^
B.isNegative();
3565 bool IsFMax =
false;
3566 switch (IntrinsicID) {
3567 case Intrinsic::nvvm_fmax_d:
3568 case Intrinsic::nvvm_fmax_f:
3569 case Intrinsic::nvvm_fmax_ftz_f:
3570 case Intrinsic::nvvm_fmax_ftz_nan_f:
3571 case Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f:
3572 case Intrinsic::nvvm_fmax_ftz_xorsign_abs_f:
3573 case Intrinsic::nvvm_fmax_nan_f:
3574 case Intrinsic::nvvm_fmax_nan_xorsign_abs_f:
3575 case Intrinsic::nvvm_fmax_xorsign_abs_f:
3583 if (ShouldCanonicalizeNaNs && Res.
isNaN()) {
3584 APFloat NVCanonicalNaN(Res.getSemantics(), APInt(32, 0x7fffffff));
3585 return ConstantFP::get(Ty, NVCanonicalNaN);
3591 return ConstantFP::get(Ty, Res);
3594 case Intrinsic::nvvm_add_rm_f:
3595 case Intrinsic::nvvm_add_rn_f:
3596 case Intrinsic::nvvm_add_rp_f:
3597 case Intrinsic::nvvm_add_rz_f:
3598 case Intrinsic::nvvm_add_rm_d:
3599 case Intrinsic::nvvm_add_rn_d:
3600 case Intrinsic::nvvm_add_rp_d:
3601 case Intrinsic::nvvm_add_rz_d:
3602 case Intrinsic::nvvm_add_rm_ftz_f:
3603 case Intrinsic::nvvm_add_rn_ftz_f:
3604 case Intrinsic::nvvm_add_rp_ftz_f:
3605 case Intrinsic::nvvm_add_rz_ftz_f: {
3608 APFloat A = IsFTZ ? FTZPreserveSign(Op1V) : Op1V;
3609 APFloat B = IsFTZ ? FTZPreserveSign(Op2V) : Op2V;
3619 Res = IsFTZ ? FTZPreserveSign(Res) : Res;
3620 return ConstantFP::get(Ty, Res);
3625 case Intrinsic::nvvm_mul_rm_f:
3626 case Intrinsic::nvvm_mul_rn_f:
3627 case Intrinsic::nvvm_mul_rp_f:
3628 case Intrinsic::nvvm_mul_rz_f:
3629 case Intrinsic::nvvm_mul_rm_d:
3630 case Intrinsic::nvvm_mul_rn_d:
3631 case Intrinsic::nvvm_mul_rp_d:
3632 case Intrinsic::nvvm_mul_rz_d:
3633 case Intrinsic::nvvm_mul_rm_ftz_f:
3634 case Intrinsic::nvvm_mul_rn_ftz_f:
3635 case Intrinsic::nvvm_mul_rp_ftz_f:
3636 case Intrinsic::nvvm_mul_rz_ftz_f: {
3639 APFloat A = IsFTZ ? FTZPreserveSign(Op1V) : Op1V;
3640 APFloat B = IsFTZ ? FTZPreserveSign(Op2V) : Op2V;
3650 Res = IsFTZ ? FTZPreserveSign(Res) : Res;
3651 return ConstantFP::get(Ty, Res);
3656 case Intrinsic::nvvm_div_rm_f:
3657 case Intrinsic::nvvm_div_rn_f:
3658 case Intrinsic::nvvm_div_rp_f:
3659 case Intrinsic::nvvm_div_rz_f:
3660 case Intrinsic::nvvm_div_rm_d:
3661 case Intrinsic::nvvm_div_rn_d:
3662 case Intrinsic::nvvm_div_rp_d:
3663 case Intrinsic::nvvm_div_rz_d:
3664 case Intrinsic::nvvm_div_rm_ftz_f:
3665 case Intrinsic::nvvm_div_rn_ftz_f:
3666 case Intrinsic::nvvm_div_rp_ftz_f:
3667 case Intrinsic::nvvm_div_rz_ftz_f: {
3669 APFloat A = IsFTZ ? FTZPreserveSign(Op1V) : Op1V;
3670 APFloat B = IsFTZ ? FTZPreserveSign(Op2V) : Op2V;
3678 Res = IsFTZ ? FTZPreserveSign(Res) : Res;
3679 return ConstantFP::get(Ty, Res);
3685 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
3688 switch (IntrinsicID) {
3691 case Intrinsic::pow:
3692 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
3693 case Intrinsic::amdgcn_fmul_legacy:
3698 return ConstantFP::get(Ty, Op1V * Op2V);
3702 switch (IntrinsicID) {
3703 case Intrinsic::ldexp: {
3704 return ConstantFP::get(
3708 case Intrinsic::is_fpclass: {
3721 return ConstantInt::get(Ty, Result);
3723 case Intrinsic::powi: {
3724 int Exp =
static_cast<int>(Op2C->getSExtValue());
3725 switch (Ty->getTypeID()) {
3729 if (Ty->isHalfTy()) {
3734 return ConstantFP::get(Ty, Res);
3749 if (Operands[0]->
getType()->isIntegerTy() &&
3750 Operands[1]->
getType()->isIntegerTy()) {
3751 const APInt *C0, *C1;
3752 if (!getConstIntOrUndef(Operands[0], C0) ||
3753 !getConstIntOrUndef(Operands[1], C1))
3756 switch (IntrinsicID) {
3758 case Intrinsic::smax:
3759 case Intrinsic::smin:
3760 case Intrinsic::umax:
3761 case Intrinsic::umin:
3766 return ConstantInt::get(
3772 case Intrinsic::scmp:
3773 case Intrinsic::ucmp:
3775 return ConstantInt::get(Ty, 0);
3778 if (IntrinsicID == Intrinsic::scmp)
3779 Res = C0->
sgt(*C1) ? 1 : C0->
slt(*C1) ? -1 : 0;
3781 Res = C0->
ugt(*C1) ? 1 : C0->
ult(*C1) ? -1 : 0;
3782 return ConstantInt::get(Ty, Res,
true);
3784 case Intrinsic::usub_with_overflow:
3785 case Intrinsic::ssub_with_overflow:
3791 case Intrinsic::uadd_with_overflow:
3792 case Intrinsic::sadd_with_overflow:
3802 case Intrinsic::smul_with_overflow:
3803 case Intrinsic::umul_with_overflow: {
3811 switch (IntrinsicID) {
3813 case Intrinsic::sadd_with_overflow:
3814 Res = C0->
sadd_ov(*C1, Overflow);
3816 case Intrinsic::uadd_with_overflow:
3817 Res = C0->
uadd_ov(*C1, Overflow);
3819 case Intrinsic::ssub_with_overflow:
3820 Res = C0->
ssub_ov(*C1, Overflow);
3822 case Intrinsic::usub_with_overflow:
3823 Res = C0->
usub_ov(*C1, Overflow);
3825 case Intrinsic::smul_with_overflow:
3826 Res = C0->
smul_ov(*C1, Overflow);
3828 case Intrinsic::umul_with_overflow:
3829 Res = C0->
umul_ov(*C1, Overflow);
3833 ConstantInt::get(Ty->getContext(), Res),
3838 case Intrinsic::uadd_sat:
3839 case Intrinsic::sadd_sat:
3844 if (IntrinsicID == Intrinsic::uadd_sat)
3845 return ConstantInt::get(Ty, C0->
uadd_sat(*C1));
3847 return ConstantInt::get(Ty, C0->
sadd_sat(*C1));
3848 case Intrinsic::usub_sat:
3849 case Intrinsic::ssub_sat:
3854 if (IntrinsicID == Intrinsic::usub_sat)
3855 return ConstantInt::get(Ty, C0->
usub_sat(*C1));
3857 return ConstantInt::get(Ty, C0->
ssub_sat(*C1));
3858 case Intrinsic::cttz:
3859 case Intrinsic::ctlz:
3860 assert(C1 &&
"Must be constant int");
3867 if (IntrinsicID == Intrinsic::cttz)
3872 case Intrinsic::abs:
3873 assert(C1 &&
"Must be constant int");
3884 return ConstantInt::get(Ty, C0->
abs());
3885 case Intrinsic::amdgcn_wave_reduce_umin:
3886 case Intrinsic::amdgcn_wave_reduce_umax:
3887 case Intrinsic::amdgcn_wave_reduce_max:
3888 case Intrinsic::amdgcn_wave_reduce_min:
3889 case Intrinsic::amdgcn_wave_reduce_and:
3890 case Intrinsic::amdgcn_wave_reduce_or:
3905 switch (IntrinsicID) {
3907 case Intrinsic::x86_avx512_vcvtss2si32:
3908 case Intrinsic::x86_avx512_vcvtss2si64:
3909 case Intrinsic::x86_avx512_vcvtsd2si32:
3910 case Intrinsic::x86_avx512_vcvtsd2si64:
3913 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
3917 case Intrinsic::x86_avx512_vcvtss2usi32:
3918 case Intrinsic::x86_avx512_vcvtss2usi64:
3919 case Intrinsic::x86_avx512_vcvtsd2usi32:
3920 case Intrinsic::x86_avx512_vcvtsd2usi64:
3923 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
3927 case Intrinsic::x86_avx512_cvttss2si:
3928 case Intrinsic::x86_avx512_cvttss2si64:
3929 case Intrinsic::x86_avx512_cvttsd2si:
3930 case Intrinsic::x86_avx512_cvttsd2si64:
3933 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
3937 case Intrinsic::x86_avx512_cvttss2usi:
3938 case Intrinsic::x86_avx512_cvttss2usi64:
3939 case Intrinsic::x86_avx512_cvttsd2usi:
3940 case Intrinsic::x86_avx512_cvttsd2usi64:
3943 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
3950 if (IntrinsicID == Intrinsic::experimental_cttz_elts) {
3955 unsigned Width = Ty->getIntegerBitWidth();
3958 for (
unsigned I = 0;
I < FVTy->getNumElements(); ++
I) {
3959 Constant *Elt = Operands[0]->getAggregateElement(
I);
3964 return ConstantInt::get(Ty,
I);
3968 return ConstantInt::get(Ty, FVTy->getNumElements());
3979 APFloat MA(Sem), SC(Sem), TC(Sem);
3992 if (
S1.isNegative() &&
S1.isNonZero() && !
S1.isNaN()) {
4014 switch (IntrinsicID) {
4017 case Intrinsic::amdgcn_cubeid:
4019 case Intrinsic::amdgcn_cubema:
4021 case Intrinsic::amdgcn_cubesc:
4023 case Intrinsic::amdgcn_cubetc:
4030 const APInt *C0, *C1, *C2;
4031 if (!getConstIntOrUndef(Operands[0], C0) ||
4032 !getConstIntOrUndef(Operands[1], C1) ||
4033 !getConstIntOrUndef(Operands[2], C2))
4040 unsigned NumUndefBytes = 0;
4041 for (
unsigned I = 0;
I < 32;
I += 8) {
4050 const APInt *Src = ((Sel & 10) == 10 || (Sel & 12) == 4) ? C0 : C1;
4054 B = Src->extractBitsAsZExtValue(8, (Sel & 3) * 8);
4056 B = Src->extractBitsAsZExtValue(1, (Sel & 1) ? 31 : 15) * 0xff;
4059 Val.insertBits(
B,
I, 8);
4062 if (NumUndefBytes == 4)
4065 return ConstantInt::get(Ty, Val);
4074 assert(Operands.
size() == 3 &&
"Wrong number of operands.");
4079 const APFloat &C1 = Op1->getValueAPF();
4080 const APFloat &C2 = Op2->getValueAPF();
4081 const APFloat &C3 = Op3->getValueAPF();
4087 switch (IntrinsicID) {
4090 case Intrinsic::experimental_constrained_fma:
4091 case Intrinsic::experimental_constrained_fmuladd:
4095 if (mayFoldConstrained(
4097 return ConstantFP::get(Ty, Res);
4101 switch (IntrinsicID) {
4103 case Intrinsic::amdgcn_fma_legacy: {
4109 return ConstantFP::get(Ty,
APFloat(0.0f) + C3);
4113 case Intrinsic::fma:
4114 case Intrinsic::fmuladd: {
4117 return ConstantFP::get(Ty, V);
4120 case Intrinsic::nvvm_fma_rm_f:
4121 case Intrinsic::nvvm_fma_rn_f:
4122 case Intrinsic::nvvm_fma_rp_f:
4123 case Intrinsic::nvvm_fma_rz_f:
4124 case Intrinsic::nvvm_fma_rm_d:
4125 case Intrinsic::nvvm_fma_rn_d:
4126 case Intrinsic::nvvm_fma_rp_d:
4127 case Intrinsic::nvvm_fma_rz_d:
4128 case Intrinsic::nvvm_fma_rm_ftz_f:
4129 case Intrinsic::nvvm_fma_rn_ftz_f:
4130 case Intrinsic::nvvm_fma_rp_ftz_f:
4131 case Intrinsic::nvvm_fma_rz_ftz_f: {
4133 APFloat A = IsFTZ ? FTZPreserveSign(C1) : C1;
4134 APFloat B = IsFTZ ? FTZPreserveSign(C2) : C2;
4135 APFloat C = IsFTZ ? FTZPreserveSign(C3) : C3;
4145 Res = IsFTZ ? FTZPreserveSign(Res) : Res;
4146 return ConstantFP::get(Ty, Res);
4151 case Intrinsic::amdgcn_cubeid:
4152 case Intrinsic::amdgcn_cubema:
4153 case Intrinsic::amdgcn_cubesc:
4154 case Intrinsic::amdgcn_cubetc: {
4155 APFloat V = ConstantFoldAMDGCNCubeIntrinsic(IntrinsicID, C1, C2, C3);
4156 return ConstantFP::get(Ty, V);
4163 if (IntrinsicID == Intrinsic::smul_fix ||
4164 IntrinsicID == Intrinsic::smul_fix_sat) {
4165 const APInt *C0, *C1;
4166 if (!getConstIntOrUndef(Operands[0], C0) ||
4167 !getConstIntOrUndef(Operands[1], C1))
4183 assert(Scale < Width &&
"Illegal scale.");
4184 unsigned ExtendedWidth = Width * 2;
4186 (C0->
sext(ExtendedWidth) * C1->
sext(ExtendedWidth)).
ashr(Scale);
4187 if (IntrinsicID == Intrinsic::smul_fix_sat) {
4193 return ConstantInt::get(Ty->getContext(), Product.
sextOrTrunc(Width));
4196 if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
4197 const APInt *C0, *C1, *C2;
4198 if (!getConstIntOrUndef(Operands[0], C0) ||
4199 !getConstIntOrUndef(Operands[1], C1) ||
4200 !getConstIntOrUndef(Operands[2], C2))
4203 bool IsRight = IntrinsicID == Intrinsic::fshr;
4205 return Operands[IsRight ? 1 : 0];
4214 return Operands[IsRight ? 1 : 0];
4217 unsigned LshrAmt = IsRight ? ShAmt :
BitWidth - ShAmt;
4218 unsigned ShlAmt = !IsRight ? ShAmt :
BitWidth - ShAmt;
4220 return ConstantInt::get(Ty, C1->
lshr(LshrAmt));
4222 return ConstantInt::get(Ty, C0->
shl(ShlAmt));
4223 return ConstantInt::get(Ty, C0->
shl(ShlAmt) | C1->
lshr(LshrAmt));
4226 if (IntrinsicID == Intrinsic::amdgcn_perm)
4227 return ConstantFoldAMDGCNPermIntrinsic(Operands, Ty);
4243 if (Operands.
size() == 1)
4244 return ConstantFoldScalarCall1(Name, IntrinsicID, Ty, Operands, TLI,
Call);
4246 if (Operands.
size() == 2) {
4248 ConstantFoldLibCall2(Name, Ty, Operands, TLI)) {
4249 return FoldedLibCall;
4251 return ConstantFoldIntrinsicCall2(IntrinsicID, Ty, Operands,
Call);
4254 if (Operands.
size() == 3)
4255 return ConstantFoldScalarCall3(Name, IntrinsicID, Ty, Operands, TLI,
Call);
4260static Constant *ConstantFoldFixedVectorCall(
4268 switch (IntrinsicID) {
4269 case Intrinsic::masked_load: {
4270 auto *SrcPtr = Operands[0];
4271 auto *
Mask = Operands[1];
4272 auto *Passthru = Operands[2];
4278 auto *MaskElt =
Mask->getAggregateElement(
I);
4281 auto *PassthruElt = Passthru->getAggregateElement(
I);
4291 if (MaskElt->isNullValue()) {
4295 }
else if (MaskElt->isOneValue()) {
4307 case Intrinsic::arm_mve_vctp8:
4308 case Intrinsic::arm_mve_vctp16:
4309 case Intrinsic::arm_mve_vctp32:
4310 case Intrinsic::arm_mve_vctp64: {
4316 for (
unsigned i = 0; i < Lanes; i++) {
4326 case Intrinsic::get_active_lane_mask: {
4332 uint64_t Limit = Op1->getZExtValue();
4335 for (
unsigned i = 0; i < Lanes; i++) {
4336 if (
Base + i < Limit)
4345 case Intrinsic::vector_extract: {
4352 unsigned VecNumElements =
4354 unsigned StartingIndex = Idx->getZExtValue();
4357 if (NumElements == VecNumElements && StartingIndex == 0)
4360 for (
unsigned I = StartingIndex,
E = StartingIndex + NumElements;
I <
E;
4365 Result[
I - StartingIndex] = Elt;
4370 case Intrinsic::vector_insert: {
4377 unsigned SubVecNumElements =
4379 unsigned VecNumElements =
4381 unsigned IdxN = Idx->getZExtValue();
4383 if (SubVecNumElements == VecNumElements && IdxN == 0)
4386 for (
unsigned I = 0;
I < VecNumElements; ++
I) {
4388 if (
I < IdxN + SubVecNumElements)
4398 case Intrinsic::vector_interleave2:
4399 case Intrinsic::vector_interleave3:
4400 case Intrinsic::vector_interleave4:
4401 case Intrinsic::vector_interleave5:
4402 case Intrinsic::vector_interleave6:
4403 case Intrinsic::vector_interleave7:
4404 case Intrinsic::vector_interleave8: {
4405 unsigned NumElements =
4407 unsigned NumOperands = Operands.
size();
4408 for (
unsigned I = 0;
I < NumElements; ++
I) {
4409 for (
unsigned J = 0; J < NumOperands; ++J) {
4410 Constant *Elt = Operands[J]->getAggregateElement(
I);
4413 Result[NumOperands *
I + J] = Elt;
4418 case Intrinsic::wasm_dot: {
4419 unsigned NumElements =
4423 "wasm dot takes i16x8 and produces i32x4");
4424 assert(Ty->isIntegerTy());
4425 int32_t MulVector[8];
4427 for (
unsigned I = 0;
I < NumElements; ++
I) {
4435 for (
unsigned I = 0;
I <
Result.size();
I++) {
4436 int64_t IAdd = (int64_t)MulVector[
I * 2] + (int64_t)MulVector[
I * 2 + 1];
4448 for (
unsigned J = 0, JE = Operands.
size(); J != JE; ++J) {
4451 Lane[J] = Operands[J];
4455 Constant *Agg = Operands[J]->getAggregateElement(
I);
4464 ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI,
Call);
4473static Constant *ConstantFoldScalableVectorCall(
4477 switch (IntrinsicID) {
4478 case Intrinsic::aarch64_sve_convert_from_svbool: {
4480 if (!Src->isNullValue())
4485 case Intrinsic::get_active_lane_mask: {
4488 if (Op0 && Op1 && Op0->getValue().uge(Op1->getValue()))
4492 case Intrinsic::vector_interleave2:
4493 case Intrinsic::vector_interleave3:
4494 case Intrinsic::vector_interleave4:
4495 case Intrinsic::vector_interleave5:
4496 case Intrinsic::vector_interleave6:
4497 case Intrinsic::vector_interleave7:
4498 case Intrinsic::vector_interleave8: {
4499 Constant *SplatVal = Operands[0]->getSplatValue();
4530 Constant *Folded = ConstantFoldScalarCall(
4537static std::pair<Constant *, Constant *>
4546 const APFloat &U = ConstFP->getValueAPF();
4549 Constant *Result0 = ConstantFP::get(ConstFP->getType(), FrexpMant);
4556 return {Result0, Result1};
4566 switch (IntrinsicID) {
4567 case Intrinsic::frexp: {
4575 for (
unsigned I = 0,
E = FVTy0->getNumElements();
I !=
E; ++
I) {
4576 Constant *Lane = Operands[0]->getAggregateElement(
I);
4577 std::tie(Results0[
I], Results1[
I]) =
4578 ConstantFoldScalarFrexpCall(Lane, Ty1);
4587 auto [Result0, Result1] = ConstantFoldScalarFrexpCall(Operands[0], Ty1);
4592 case Intrinsic::sincos: {
4596 auto ConstantFoldScalarSincosCall =
4597 [&](
Constant *
Op) -> std::pair<Constant *, Constant *> {
4599 ConstantFoldScalarCall(Name, Intrinsic::sin, TyScalar,
Op, TLI,
Call);
4601 ConstantFoldScalarCall(Name, Intrinsic::cos, TyScalar,
Op, TLI,
Call);
4602 return std::make_pair(SinResult, CosResult);
4610 Constant *Lane = Operands[0]->getAggregateElement(
I);
4611 std::tie(SinResults[
I], CosResults[
I]) =
4612 ConstantFoldScalarSincosCall(Lane);
4613 if (!SinResults[
I] || !CosResults[
I])
4621 auto [SinResult, CosResult] = ConstantFoldScalarSincosCall(Operands[0]);
4622 if (!SinResult || !CosResult)
4626 case Intrinsic::vector_deinterleave2:
4627 case Intrinsic::vector_deinterleave3:
4628 case Intrinsic::vector_deinterleave4:
4629 case Intrinsic::vector_deinterleave5:
4630 case Intrinsic::vector_deinterleave6:
4631 case Intrinsic::vector_deinterleave7:
4632 case Intrinsic::vector_deinterleave8: {
4634 auto *Vec = Operands[0];
4652 for (
unsigned I = 0;
I != NumResults; ++
I) {
4653 for (
unsigned J = 0; J != NumElements; ++J) {
4666 return ConstantFoldScalarCall(Name, IntrinsicID, StTy, Operands, TLI,
Call);
4682 return ConstantFoldIntrinsicCall2(
ID, Ty, {LHS, RHS},
Call);
4688 bool AllowNonDeterministic) {
4689 if (
Call->isNoBuiltin())
4706 Type *Ty =
F->getReturnType();
4707 if (!AllowNonDeterministic && Ty->isFPOrFPVectorTy())
4712 return ConstantFoldFixedVectorCall(
4713 Name, IID, FVTy, Operands,
F->getDataLayout(), TLI,
Call);
4716 return ConstantFoldScalableVectorCall(
4717 Name, IID, SVTy, Operands,
F->getDataLayout(), TLI,
Call);
4720 return ConstantFoldStructCall(Name, IID, StTy, Operands,
4721 F->getDataLayout(), TLI,
Call);
4726 return ConstantFoldScalarCall(Name, IID, Ty, Operands, TLI,
Call);
4733 if (
Call->isNoBuiltin() ||
Call->isStrictFP())
4743 if (
Call->arg_size() == 1) {
4753 case LibFunc_log10l:
4755 case LibFunc_log10f:
4756 return Op.isNaN() || (!
Op.isZero() && !
Op.isNegative());
4759 return !
Op.isNaN() && !
Op.isZero() && !
Op.isInfinity();
4765 if (OpC->getType()->isDoubleTy())
4767 if (OpC->getType()->isFloatTy())
4775 if (OpC->getType()->isDoubleTy())
4777 if (OpC->getType()->isFloatTy())
4787 return !
Op.isInfinity();
4791 case LibFunc_tanf: {
4794 Type *Ty = OpC->getType();
4795 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy())
4796 return ConstantFoldFP(tan, OpC->getValueAPF(), Ty) !=
nullptr;
4822 if (OpC->getType()->isDoubleTy())
4824 if (OpC->getType()->isFloatTy())
4831 return Op.isNaN() ||
Op.isZero() || !
Op.isNegative();
4841 if (
Call->arg_size() == 2) {
4851 case LibFunc_powf: {
4855 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
4857 return ConstantFoldBinaryFP(pow, Op0, Op1, Ty) !=
nullptr;
4865 case LibFunc_remainderl:
4866 case LibFunc_remainder:
4867 case LibFunc_remainderf:
4872 case LibFunc_atan2f:
4873 case LibFunc_atan2l:
4880 case LibFunc_nextafter:
4881 case LibFunc_nextafterf:
4882 case LibFunc_nextafterl:
4883 case LibFunc_nexttoward:
4884 case LibFunc_nexttowardf:
4885 case LibFunc_nexttowardl: {
4886 return ConstantFoldNextToward(Op0, Op1,
F->getReturnType()) !=
nullptr;
4901 case Instruction::BitCast:
4904 case Instruction::Trunc: {
4912 Flags->NSW = ZExtC == SExtC;
4916 case Instruction::SExt:
4917 case Instruction::ZExt: {
4921 if (!CastInvC || CastInvC !=
C)
4923 if (Flags && CastOp == Instruction::ZExt) {
4927 Flags->NNeg = CastInvC == SExtInvC;
4931 case Instruction::FPExt: {
4959void TargetFolder::anchor() {}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static Constant * FoldBitCast(Constant *V, Type *DestTy)
Constant * getConstantAtOffset(Constant *Base, APInt Offset, const DataLayout &DL)
If this Offset points exactly to the start of an aggregate element, return that element,...
static cl::opt< bool > DisableFPCallFolding("disable-fp-call-folding", cl::desc("Disable constant-folding of FP intrinsics and libcalls."), cl::init(false), cl::Hidden)
static Constant * flushDenormalConstant(Type *Ty, const APFloat &APF, DenormalMode::DenormalModeKind Mode)
static Constant * flushDenormalConstantFP(ConstantFP *CFP, const Instruction *Inst, bool IsOutput)
static DenormalMode getInstrDenormalMode(const Instruction *CtxI, Type *Ty)
Return the denormal mode that can be assumed when executing a floating point operation at CtxI.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
amode Optimize addressing mode
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool InRange(int64_t Value, unsigned short Shift, int LBound, int HBound)
This file contains the definitions of the enumerations and flags associated with NVVM Intrinsics,...
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
This file implements the SmallBitVector class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
static constexpr roundingMode rmTowardZero
llvm::RoundingMode roundingMode
IEEE-754R 4.3: Rounding-direction attributes.
static const fltSemantics & IEEEdouble()
static constexpr roundingMode rmTowardNegative
static constexpr roundingMode rmNearestTiesToEven
static constexpr roundingMode rmTowardPositive
static const fltSemantics & IEEEhalf()
static constexpr roundingMode rmNearestTiesToAway
opStatus
IEEE-754R 7: Default exception handling.
static APFloat getQNaN(const fltSemantics &Sem, bool Negative=false, const APInt *payload=nullptr)
Factory for QNaN values.
opStatus divide(const APFloat &RHS, roundingMode RM)
void copySign(const APFloat &RHS)
LLVM_ABI opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
opStatus subtract(const APFloat &RHS, roundingMode RM)
LLVM_ABI double convertToDouble() const
Converts this APFloat to host double value.
bool isPosInfinity() const
opStatus add(const APFloat &RHS, roundingMode RM)
const fltSemantics & getSemantics() const
static APFloat getOne(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative One.
opStatus multiply(const APFloat &RHS, roundingMode RM)
LLVM_ABI float convertToFloat() const
Converts this APFloat to host float value.
opStatus fusedMultiplyAdd(const APFloat &Multiplicand, const APFloat &Addend, roundingMode RM)
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
opStatus mod(const APFloat &RHS)
bool isNegInfinity() const
opStatus roundToIntegral(roundingMode RM)
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Class for arbitrary precision integers.
LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const
LLVM_ABI APInt usub_sat(const APInt &RHS) const
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
LLVM_ABI uint64_t extractBitsAsZExtValue(unsigned numBits, unsigned bitPosition) const
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
APInt abs() const
Get the absolute value.
LLVM_ABI APInt sadd_sat(const APInt &RHS) const
bool sgt(const APInt &RHS) const
Signed greater than comparison.
LLVM_ABI APInt usub_ov(const APInt &RHS, bool &Overflow) const
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const
LLVM_ABI APInt uadd_ov(const APInt &RHS, bool &Overflow) const
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
LLVM_ABI APInt uadd_sat(const APInt &RHS) const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
LLVM_ABI APInt smul_ov(const APInt &RHS, bool &Overflow) const
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const
bool isOne() const
Determine if this is a value of 1.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
LLVM_ABI APInt ssub_sat(const APInt &RHS) const
An arbitrary precision integer that knows its signedness.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
static LLVM_ABI Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static LLVM_ABI unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, const DataLayout *DL)
Determine how a pair of casts can be eliminated, if they can be at all.
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
static bool isFPPredicate(Predicate P)
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getExtractElement(Constant *Vec, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static LLVM_ABI bool isDesirableCastOp(unsigned Opcode)
Whether creating a constant expression for this cast is desirable.
static LLVM_ABI Constant * getCast(unsigned ops, Constant *C, Type *Ty, bool OnlyIfReduced=false)
Convenience function for getting a Cast operation.
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getPtrAdd(Constant *Ptr, Constant *Offset, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReduced=nullptr)
Create a getelementptr i8, ptr, offset constant expression.
static LLVM_ABI Constant * getInsertElement(Constant *Vec, Constant *Elt, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static LLVM_ABI Constant * getShuffleVector(Constant *V1, Constant *V2, ArrayRef< int > Mask, Type *OnlyIfReducedTy=nullptr)
static bool isSupportedGetElementPtr(const Type *SrcElemTy)
Whether creating a constant expression for this getelementptr type is supported.
static LLVM_ABI Constant * get(unsigned Opcode, Constant *C1, Constant *C2, unsigned Flags=0, Type *OnlyIfReducedTy=nullptr)
get - Return a binary or shift operator constant expression, folding if possible.
static LLVM_ABI bool isDesirableBinOp(unsigned Opcode)
Whether creating a constant expression for this binary operator is desirable.
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static LLVM_ABI Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
static LLVM_ABI Constant * getInfinity(Type *Ty, bool Negative=false)
static LLVM_ABI Constant * getZero(Type *Ty, bool Negative=false)
static LLVM_ABI Constant * getNaN(Type *Ty, bool Negative=false, uint64_t Payload=0)
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getSigned(IntegerType *Ty, int64_t V, bool ImplicitTrunc=false)
Return a ConstantInt with the specified value for the specified type.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
LLVM_ABI Constant * getSplatValue(bool AllowPoison=false) const
If all elements of the vector constant have the same value, return that value.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Constrained floating point compare intrinsics.
This is the common base class for constrained floating point intrinsics.
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
Wrapper for a function that represents a value that functionally represents the original function.
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
static LLVM_ABI bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
This provides a helper for copying FMF from an instruction or setting specified flags.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
GEPNoWrapFlags withoutNoUnsignedSignedWrap() const
static GEPNoWrapFlags noUnsignedWrap()
bool hasNoUnsignedSignedWrap() const
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
PointerType * getType() const
Global values are always pointers.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this global belongs to.
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
This is an important class for using LLVM in a threaded context.
static APInt getSaturationPoint(Intrinsic::ID ID, unsigned numBits)
Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values, so there is a certain thre...
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Class to represent scalable SIMD vectors.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
iterator_range< const_set_bits_iterator > set_bits() const
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
LLVM_ABI unsigned getElementContainingOffset(uint64_t FixedOffset) const
Given a valid byte offset into the structure, returns the structure index that contains it.
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
unsigned getNumElements() const
Random access to the elements.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
bool isByteTy() const
True if this is an instance of ByteType.
bool isVectorTy() const
True if this is an instance of VectorType.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
@ HalfTyID
16-bit floating point type
@ FloatTyID
32-bit floating point type
@ DoubleTyID
64-bit floating point type
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
static LLVM_ABI IntegerType * getInt16Ty(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isX86_AMXTy() const
Return true if this is X86 AMX.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Type * getContainedType(unsigned i) const
This method is used to implement the type iterator (defined at the end of the file).
LLVM_ABI const fltSemantics & getFltSemantics() const
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVMContext & getContext() const
All values hold a context through their type.
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr bool isFixed() const
Returns true if the quantity is not scaled by vscale.
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ CE
Windows NT (Windows on ARM)
initializer< Ty > init(const Ty &Val)
static constexpr roundingMode rmNearestTiesToEven
static constexpr cmpResult cmpEqual
@ ebStrict
This corresponds to "fpexcept.strict".
@ ebIgnore
This corresponds to "fpexcept.ignore".
APFloat::roundingMode GetFMARoundingMode(Intrinsic::ID IntrinsicID)
DenormalMode GetNVVMDenormMode(bool ShouldFTZ)
bool FPToIntegerIntrinsicNaNZero(Intrinsic::ID IntrinsicID)
APFloat::roundingMode GetFDivRoundingMode(Intrinsic::ID IntrinsicID)
bool FPToIntegerIntrinsicResultIsSigned(Intrinsic::ID IntrinsicID)
APFloat::roundingMode GetFPToIntegerRoundingMode(Intrinsic::ID IntrinsicID)
bool RCPShouldFTZ(Intrinsic::ID IntrinsicID)
bool FPToIntegerIntrinsicShouldFTZ(Intrinsic::ID IntrinsicID)
bool FDivShouldFTZ(Intrinsic::ID IntrinsicID)
bool FAddShouldFTZ(Intrinsic::ID IntrinsicID)
bool FMinFMaxIsXorSignAbs(Intrinsic::ID IntrinsicID)
APFloat::roundingMode GetFMulRoundingMode(Intrinsic::ID IntrinsicID)
bool UnaryMathIntrinsicShouldFTZ(Intrinsic::ID IntrinsicID)
bool FMinFMaxShouldFTZ(Intrinsic::ID IntrinsicID)
APFloat::roundingMode GetFAddRoundingMode(Intrinsic::ID IntrinsicID)
bool FMAShouldFTZ(Intrinsic::ID IntrinsicID)
bool FMulShouldFTZ(Intrinsic::ID IntrinsicID)
APFloat::roundingMode GetRCPRoundingMode(Intrinsic::ID IntrinsicID)
bool FMinFMaxPropagatesNaNs(Intrinsic::ID IntrinsicID)
NodeAddr< FuncNode * > Func
LLVM_ABI std::error_code status(const Twine &path, file_status &result, bool follow=true)
Get file status as if by POSIX stat().
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
LLVM_ABI Constant * ConstantFoldBinaryIntrinsic(Intrinsic::ID ID, Constant *LHS, Constant *RHS, Type *Ty, Instruction *FMFSource)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Constant * ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy, const DataLayout &DL)
ConstantFoldLoadThroughBitcast - try to cast constant to destination type returning null if unsuccess...
static double log2(double V)
LLVM_ABI Constant * ConstantFoldSelectInstruction(Constant *Cond, Constant *V1, Constant *V2)
Attempt to constant fold a select instruction with the specified operands.
LLVM_ABI Constant * ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL, const Instruction *I, bool AllowNonDeterministic=true)
Attempt to constant fold a floating point binary operation with the specified operands,...
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
LLVM_ABI bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
unsigned getPointerAddressSpace(const Type *T)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI Constant * ConstantFoldInstruction(const Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
APFloat abs(APFloat X)
Returns the absolute value of the argument.
LLVM_ABI Constant * ConstantFoldCompareInstruction(CmpInst::Predicate Predicate, Constant *C1, Constant *C2)
LLVM_ABI Constant * ConstantFoldUnaryInstruction(unsigned Opcode, Constant *V)
LLVM_ABI bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset, const DataLayout &DL, DSOLocalEquivalent **DSOEquiv=nullptr)
If this constant is a constant offset from a global, return the global and the constant.
LLVM_ABI bool isMathLibCallNoop(const CallBase *Call, const TargetLibraryInfo *TLI)
Check whether the given call has no side-effects.
LLVM_ABI Constant * ReadByteArrayFromGlobal(const GlobalVariable *GV, uint64_t Offset)
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
int ilogb(const APFloat &Arg)
Returns the exponent of the internal representation of the APFloat.
bool isa_and_nonnull(const Y &Val)
LLVM_ABI Constant * ConstantFoldCall(const CallBase *Call, Function *F, ArrayRef< Constant * > Operands, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldCall - Attempt to constant fold a call to the specified function with the specified argum...
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
LLVM_ABI Constant * ConstantFoldExtractValueInstruction(Constant *Agg, ArrayRef< unsigned > Idxs)
Attempt to constant fold an extractvalue instruction with the specified operands and indices.
LLVM_ABI Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
auto dyn_cast_or_null(const Y &Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 maxNum semantics.
LLVM_ABI Constant * ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty, const DataLayout &DL)
If C is a uniform value where all bits are the same (either all zero, all ones, all undef or all pois...
LLVM_ABI Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
LLVM_ABI Constant * FlushFPConstant(Constant *Operand, const Instruction *I, bool IsOutput)
Attempt to flush float point constant according to denormal mode set in the instruction's parent func...
LLVM_ABI Constant * getLosslessUnsignedTrunc(Constant *C, Type *DestTy, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_READONLY APFloat minimumnum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimumNumber semantics.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
APFloat scalbn(APFloat X, int Exp, APFloat::roundingMode RM)
Returns: X * 2^Exp for integral exponents.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI Constant * getLosslessSignedTrunc(Constant *C, Type *DestTy, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI bool intrinsicPropagatesPoison(Intrinsic::ID IID)
Return whether this intrinsic propagates poison for all operands.
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
MutableArrayRef(T &OneElt) -> MutableArrayRef< T >
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 minNum semantics.
@ Sub
Subtraction of integers.
LLVM_ABI bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx, const TargetTransformInfo *TTI)
Identifies if the vector form of the intrinsic has a scalar operand.
FunctionAddr VTableAddr Next
DWARFExpression::Operation Op
RoundingMode
Rounding mode.
@ NearestTiesToEven
roundTiesToEven.
@ Dynamic
Denotes mode unknown at compile time.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
constexpr unsigned BitWidth
LLVM_ABI Constant * getLosslessInvCast(Constant *C, Type *InvCastTo, unsigned CastOp, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
Try to cast C to InvC losslessly, satisfying CastOp(InvC) equals C, or CastOp(InvC) is a refined valu...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
LLVM_ABI Constant * ConstantFoldCastInstruction(unsigned opcode, Constant *V, Type *DestTy)
LLVM_ABI Constant * ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val, ArrayRef< unsigned > Idxs)
Attempt to constant fold an insertvalue instruction with the specified operands and indices.
LLVM_ABI Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
LLVM_ABI Constant * ConstantFoldInstOperands(const Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
LLVM_READONLY APFloat maximumnum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximumNumber semantics.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
LLVM_ABI bool isTriviallyVectorizable(Intrinsic::ID ID)
Identify if the intrinsic is trivially vectorizable.
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
LLVM_ABI Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
DenormalModeKind
Represent handled modes for denormal (aka subnormal) modes in the floating point environment.
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ Dynamic
Denormals have unknown treatment.
@ IEEE
IEEE-754 denormal numbers preserved.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
static constexpr DenormalMode getDynamic()
static constexpr DenormalMode getIEEE()
bool isConstant() const
Returns true if we know the value of all bits.
const APInt & getConstant() const
Returns the value when all bits have a known value.