31#include "llvm/Config/config.h"
45#include "llvm/IR/IntrinsicsAArch64.h"
46#include "llvm/IR/IntrinsicsAMDGPU.h"
47#include "llvm/IR/IntrinsicsARM.h"
48#include "llvm/IR/IntrinsicsWebAssembly.h"
49#include "llvm/IR/IntrinsicsX86.h"
77 unsigned BitShift =
DL.getTypeSizeInBits(SrcEltTy);
78 for (
unsigned i = 0; i != NumSrcElts; ++i) {
80 if (
DL.isLittleEndian())
81 Element =
C->getAggregateElement(NumSrcElts - i - 1);
83 Element =
C->getAggregateElement(i);
85 if (Element && isa<UndefValue>(Element)) {
90 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
95 Result |= ElementCI->getValue().zext(
Result.getBitWidth());
106 "Invalid constantexpr bitcast!");
112 if (
auto *VTy = dyn_cast<VectorType>(
C->getType())) {
115 unsigned NumSrcElts = cast<FixedVectorType>(VTy)->getNumElements();
116 Type *SrcEltTy = VTy->getElementType();
129 if (
Constant *CE = foldConstVectorToAPInt(Result, DestTy,
C,
130 SrcEltTy, NumSrcElts,
DL))
133 if (isa<IntegerType>(DestTy))
142 auto *DestVTy = dyn_cast<VectorType>(DestTy);
148 if (isa<ConstantFP>(
C) || isa<ConstantInt>(
C)) {
154 if (!isa<ConstantDataVector>(
C) && !isa<ConstantVector>(
C))
158 unsigned NumDstElt = cast<FixedVectorType>(DestVTy)->getNumElements();
159 unsigned NumSrcElt = cast<FixedVectorType>(
C->getType())->getNumElements();
160 if (NumDstElt == NumSrcElt)
163 Type *SrcEltTy = cast<VectorType>(
C->getType())->getElementType();
164 Type *DstEltTy = DestVTy->getElementType();
197 if (!isa<ConstantVector>(
C) &&
198 !isa<ConstantDataVector>(
C))
206 bool isLittleEndian =
DL.isLittleEndian();
209 if (NumDstElt < NumSrcElt) {
212 unsigned Ratio = NumSrcElt/NumDstElt;
215 for (
unsigned i = 0; i != NumDstElt; ++i) {
218 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
219 for (
unsigned j = 0;
j != Ratio; ++
j) {
220 Constant *Src =
C->getAggregateElement(SrcElt++);
221 if (Src && isa<UndefValue>(Src))
223 cast<VectorType>(
C->getType())->getElementType());
225 Src = dyn_cast_or_null<ConstantInt>(Src);
232 assert(Src &&
"Constant folding cannot fail on plain integers");
238 assert(Src &&
"Constant folding cannot fail on plain integers");
240 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
244 assert(Elt &&
"Constant folding cannot fail on plain integers");
252 unsigned Ratio = NumDstElt/NumSrcElt;
253 unsigned DstBitSize =
DL.getTypeSizeInBits(DstEltTy);
256 for (
unsigned i = 0; i != NumSrcElt; ++i) {
257 auto *Element =
C->getAggregateElement(i);
262 if (isa<UndefValue>(Element)) {
268 auto *Src = dyn_cast<ConstantInt>(Element);
272 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
273 for (
unsigned j = 0;
j != Ratio; ++
j) {
276 APInt Elt = Src->getValue().lshr(ShiftAmt);
277 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
298 if ((GV = dyn_cast<GlobalValue>(
C))) {
304 if (
auto *FoundDSOEquiv = dyn_cast<DSOLocalEquivalent>(
C)) {
306 *DSOEquiv = FoundDSOEquiv;
307 GV = FoundDSOEquiv->getGlobalValue();
314 auto *CE = dyn_cast<ConstantExpr>(
C);
315 if (!CE)
return false;
318 if (CE->getOpcode() == Instruction::PtrToInt ||
319 CE->getOpcode() == Instruction::BitCast)
324 auto *
GEP = dyn_cast<GEPOperator>(CE);
328 unsigned BitWidth =
DL.getIndexTypeSizeInBits(
GEP->getType());
337 if (!
GEP->accumulateConstantOffset(
DL, TmpOffset))
347 Type *SrcTy =
C->getType();
351 TypeSize DestSize =
DL.getTypeSizeInBits(DestTy);
352 TypeSize SrcSize =
DL.getTypeSizeInBits(SrcTy);
353 if (!TypeSize::isKnownGE(SrcSize, DestSize))
364 if (SrcSize == DestSize &&
371 Cast = Instruction::IntToPtr;
373 Cast = Instruction::PtrToInt;
394 ElemC =
C->getAggregateElement(Elem++);
395 }
while (ElemC &&
DL.getTypeSizeInBits(ElemC->
getType()).isZero());
400 if (
auto *VT = dyn_cast<VectorType>(SrcTy))
401 if (!
DL.typeSizeEqualsStoreSize(VT->getElementType()))
404 C =
C->getAggregateElement(0u);
419 assert(ByteOffset <=
DL.getTypeAllocSize(
C->getType()) &&
420 "Out of range access");
424 if (isa<ConstantAggregateZero>(
C) || isa<UndefValue>(
C))
427 if (
auto *CI = dyn_cast<ConstantInt>(
C)) {
428 if ((CI->getBitWidth() & 7) != 0)
430 const APInt &Val = CI->getValue();
431 unsigned IntBytes =
unsigned(CI->getBitWidth()/8);
433 for (
unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
434 unsigned n = ByteOffset;
435 if (!
DL.isLittleEndian())
436 n = IntBytes - n - 1;
443 if (
auto *CFP = dyn_cast<ConstantFP>(
C)) {
444 if (CFP->getType()->isDoubleTy()) {
446 return ReadDataFromGlobal(
C, ByteOffset, CurPtr, BytesLeft,
DL);
448 if (CFP->getType()->isFloatTy()){
450 return ReadDataFromGlobal(
C, ByteOffset, CurPtr, BytesLeft,
DL);
452 if (CFP->getType()->isHalfTy()){
454 return ReadDataFromGlobal(
C, ByteOffset, CurPtr, BytesLeft,
DL);
459 if (
auto *CS = dyn_cast<ConstantStruct>(
C)) {
463 ByteOffset -= CurEltOffset;
468 uint64_t EltSize =
DL.getTypeAllocSize(CS->getOperand(
Index)->getType());
470 if (ByteOffset < EltSize &&
471 !ReadDataFromGlobal(CS->getOperand(
Index), ByteOffset, CurPtr,
478 if (
Index == CS->getType()->getNumElements())
484 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
488 CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
489 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
491 CurEltOffset = NextEltOffset;
496 if (isa<ConstantArray>(
C) || isa<ConstantVector>(
C) ||
497 isa<ConstantDataSequential>(
C)) {
500 if (
auto *AT = dyn_cast<ArrayType>(
C->getType())) {
501 NumElts = AT->getNumElements();
502 EltTy = AT->getElementType();
503 EltSize =
DL.getTypeAllocSize(EltTy);
505 NumElts = cast<FixedVectorType>(
C->getType())->getNumElements();
506 EltTy = cast<FixedVectorType>(
C->getType())->getElementType();
509 if (!
DL.typeSizeEqualsStoreSize(EltTy))
512 EltSize =
DL.getTypeStoreSize(EltTy);
518 if (!ReadDataFromGlobal(
C->getAggregateElement(
Index),
Offset, CurPtr,
523 assert(BytesWritten <= EltSize &&
"Not indexing into this element?");
524 if (BytesWritten >= BytesLeft)
528 BytesLeft -= BytesWritten;
529 CurPtr += BytesWritten;
534 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
535 if (
CE->getOpcode() == Instruction::IntToPtr &&
536 CE->getOperand(0)->getType() ==
DL.getIntPtrType(
CE->getType())) {
537 return ReadDataFromGlobal(
CE->getOperand(0), ByteOffset, CurPtr,
549 if (isa<ScalableVectorType>(LoadTy))
552 auto *IntType = dyn_cast<IntegerType>(LoadTy);
565 DL.getTypeSizeInBits(LoadTy).getFixedValue());
567 if (Res->isNullValue() && !LoadTy->
isX86_MMXTy() &&
575 if (Res->isNullValue() && !LoadTy->
isX86_MMXTy() &&
588 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
589 if (BytesLoaded > 32 || BytesLoaded == 0)
593 if (
Offset <= -1 *
static_cast<int64_t
>(BytesLoaded))
597 TypeSize InitializerSize =
DL.getTypeAllocSize(
C->getType());
605 unsigned char RawBytes[32] = {0};
606 unsigned char *CurPtr = RawBytes;
607 unsigned BytesLeft = BytesLoaded;
616 if (!ReadDataFromGlobal(
C,
Offset, CurPtr, BytesLeft,
DL))
619 APInt ResultVal =
APInt(IntType->getBitWidth(), 0);
620 if (
DL.isLittleEndian()) {
621 ResultVal = RawBytes[BytesLoaded - 1];
622 for (
unsigned i = 1; i != BytesLoaded; ++i) {
624 ResultVal |= RawBytes[BytesLoaded - 1 - i];
627 ResultVal = RawBytes[0];
628 for (
unsigned i = 1; i != BytesLoaded; ++i) {
630 ResultVal |= RawBytes[i];
654 if (NBytes > UINT16_MAX)
662 unsigned char *CurPtr = RawBytes.
data();
664 if (!ReadDataFromGlobal(
Init,
Offset, CurPtr, NBytes,
DL))
677 if (!isa<ConstantAggregate>(
Base) && !isa<ConstantDataSequential>(
Base))
682 if (!
Offset.isZero() || !Indices[0].isZero())
687 if (
Index.isNegative() ||
Index.getActiveBits() >= 32)
690 C =
C->getAggregateElement(
Index.getZExtValue());
716 if (
Offset.getSignificantBits() <= 64)
718 FoldReinterpretLoadFromConst(
C, Ty,
Offset.getSExtValue(),
DL))
735 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
738 C = cast<Constant>(
C->stripAndAccumulateConstantOffsets(
758 if (isa<PoisonValue>(
C))
760 if (isa<UndefValue>(
C))
764 if (
C->isAllOnesValue() &&
784 if (Opc == Instruction::And) {
787 if ((Known1.
One | Known0.
Zero).isAllOnes()) {
791 if ((Known0.
One | Known1.
Zero).isAllOnes()) {
803 if (Opc == Instruction::Sub) {
809 unsigned OpSize =
DL.getTypeSizeInBits(Op0->
getType());
825 Type *ResultTy,
bool InBounds,
826 std::optional<unsigned> InRangeIndex,
828 Type *IntIdxTy =
DL.getIndexType(ResultTy);
833 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i) {
836 SrcElemTy, Ops.
slice(1, i - 1)))) &&
837 Ops[i]->getType()->getScalarType() != IntIdxScalarTy) {
840 Ops[i]->getType()->isVectorTy() ? IntIdxTy : IntIdxScalarTy;
855 SrcElemTy, Ops[0], NewIdxs, InBounds, InRangeIndex);
865 bool InBounds =
GEP->isInBounds();
867 Type *SrcElemTy =
GEP->getSourceElementType();
868 Type *ResElemTy =
GEP->getResultElementType();
870 if (!SrcElemTy->
isSized() || isa<ScalableVectorType>(SrcElemTy))
873 if (
Constant *
C = CastGEPIndices(SrcElemTy, Ops, ResTy,
874 GEP->isInBounds(),
GEP->getInRangeIndex(),
879 if (!
Ptr->getType()->isPointerTy())
882 Type *IntIdxTy =
DL.getIndexType(
Ptr->getType());
884 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i)
885 if (!isa<ConstantInt>(Ops[i]))
888 unsigned BitWidth =
DL.getTypeSizeInBits(IntIdxTy);
891 DL.getIndexedOffsetInType(
895 while (
auto *
GEP = dyn_cast<GEPOperator>(
Ptr)) {
897 InBounds &=
GEP->isInBounds();
902 bool AllConstantInt =
true;
903 for (
Value *NestedOp : NestedOps)
904 if (!isa<ConstantInt>(NestedOp)) {
905 AllConstantInt =
false;
911 Ptr = cast<Constant>(
GEP->getOperand(0));
912 SrcElemTy =
GEP->getSourceElementType();
919 if (
auto *CE = dyn_cast<ConstantExpr>(
Ptr)) {
920 if (
CE->getOpcode() == Instruction::IntToPtr) {
921 if (
auto *
Base = dyn_cast<ConstantInt>(
CE->getOperand(0)))
926 auto *PTy = cast<PointerType>(
Ptr->getType());
927 if ((
Ptr->isNullValue() || BasePtr != 0) &&
928 !
DL.isNonIntegralPointerType(PTy)) {
938 if (
auto *GV = dyn_cast<GlobalValue>(
Ptr))
939 SrcElemTy = GV->getValueType();
946 Type *ElemTy = SrcElemTy;
955 while (ElemTy != ResElemTy) {
971 std::optional<unsigned> InRangeIndex;
972 if (std::optional<unsigned> LastIRIndex = InnermostGEP->
getInRangeIndex())
974 NewIdxs.
size() > *LastIRIndex) {
975 InRangeIndex = LastIRIndex;
976 for (
unsigned I = 0;
I <= *LastIRIndex; ++
I)
1004 case Instruction::FAdd:
1005 case Instruction::FSub:
1006 case Instruction::FMul:
1007 case Instruction::FDiv:
1008 case Instruction::FRem:
1012 if (
const auto *
I = dyn_cast<Instruction>(InstOrCE)) {
1022 if (
auto *
GEP = dyn_cast<GEPOperator>(InstOrCE)) {
1023 Type *SrcElemTy =
GEP->getSourceElementType();
1032 GEP->getInRangeIndex());
1035 if (
auto *CE = dyn_cast<ConstantExpr>(InstOrCE)) {
1036 if (
CE->isCompare())
1039 return CE->getWithOperands(Ops);
1043 default:
return nullptr;
1044 case Instruction::ICmp:
1045 case Instruction::FCmp: {
1046 auto *
C = cast<CmpInst>(InstOrCE);
1050 case Instruction::Freeze:
1052 case Instruction::Call:
1053 if (
auto *
F = dyn_cast<Function>(Ops.
back())) {
1054 const auto *
Call = cast<CallBase>(InstOrCE);
1059 case Instruction::Select:
1061 case Instruction::ExtractElement:
1063 case Instruction::ExtractValue:
1065 Ops[0], cast<ExtractValueInst>(InstOrCE)->getIndices());
1066 case Instruction::InsertElement:
1068 case Instruction::InsertValue:
1070 Ops[0], Ops[1], cast<InsertValueInst>(InstOrCE)->getIndices());
1071 case Instruction::ShuffleVector:
1073 Ops[0], Ops[1], cast<ShuffleVectorInst>(InstOrCE)->getShuffleMask());
1074 case Instruction::Load: {
1075 const auto *LI = dyn_cast<LoadInst>(InstOrCE);
1076 if (LI->isVolatile())
1095 if (!isa<ConstantVector>(
C) && !isa<ConstantExpr>(
C))
1099 for (
const Use &OldU :
C->operands()) {
1100 Constant *OldC = cast<Constant>(&OldU);
1104 if (isa<ConstantVector>(OldC) || isa<ConstantExpr>(OldC)) {
1105 auto It = FoldedOps.
find(OldC);
1106 if (It == FoldedOps.
end()) {
1107 NewC = ConstantFoldConstantImpl(OldC,
DL, TLI, FoldedOps);
1108 FoldedOps.
insert({OldC, NewC});
1116 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
1118 ConstantFoldInstOperandsImpl(CE,
CE->getOpcode(), Ops,
DL, TLI))
1123 assert(isa<ConstantVector>(
C));
1132 if (
auto *PN = dyn_cast<PHINode>(
I)) {
1136 for (
Value *Incoming : PN->incoming_values()) {
1141 if (isa<UndefValue>(Incoming))
1144 auto *
C = dyn_cast<Constant>(Incoming);
1148 C = ConstantFoldConstantImpl(
C,
DL, TLI, FoldedOps);
1151 if (CommonValue &&
C != CommonValue)
1162 if (!
all_of(
I->operands(), [](
Use &U) { return isa<Constant>(U); }))
1167 for (
const Use &OpU :
I->operands()) {
1168 auto *
Op = cast<Constant>(&OpU);
1170 Op = ConstantFoldConstantImpl(
Op,
DL, TLI, FoldedOps);
1180 return ConstantFoldConstantImpl(
C,
DL, TLI, FoldedOps);
1187 return ConstantFoldInstOperandsImpl(
I,
I->getOpcode(), Ops,
DL, TLI);
1204 if (
auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
1206 if (CE0->getOpcode() == Instruction::IntToPtr) {
1207 Type *IntPtrTy =
DL.getIntPtrType(CE0->getType());
1219 if (CE0->getOpcode() == Instruction::PtrToInt) {
1220 Type *IntPtrTy =
DL.getIntPtrType(CE0->getOperand(0)->getType());
1221 if (CE0->getType() == IntPtrTy) {
1229 if (
auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
1230 if (CE0->getOpcode() == CE1->getOpcode()) {
1231 if (CE0->getOpcode() == Instruction::IntToPtr) {
1232 Type *IntPtrTy =
DL.getIntPtrType(CE0->getType());
1246 if (CE0->getOpcode() == Instruction::PtrToInt) {
1247 Type *IntPtrTy =
DL.getIntPtrType(CE0->getOperand(0)->getType());
1248 if (CE0->getType() == IntPtrTy &&
1249 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
1251 Predicate, CE0->getOperand(0), CE1->getOperand(0),
DL, TLI);
1263 unsigned IndexWidth =
DL.getIndexTypeSizeInBits(Ops0->
getType());
1264 APInt Offset0(IndexWidth, 0);
1267 APInt Offset1(IndexWidth, 0);
1270 if (Stripped0 == Stripped1)
1276 }
else if (isa<ConstantExpr>(Ops1)) {
1279 Predicate = ICmpInst::getSwappedPredicate(Predicate);
1306 if (isa<ConstantExpr>(
LHS) || isa<ConstantExpr>(
RHS))
1317 if (!
I || !
I->getParent() || !
I->getFunction())
1320 ConstantFP *CFP = dyn_cast<ConstantFP>(Operand);
1389 case Instruction::PtrToInt:
1390 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
1394 if (CE->getOpcode() == Instruction::IntToPtr) {
1397 DL.getIntPtrType(CE->getType()),
1399 }
else if (
auto *
GEP = dyn_cast<GEPOperator>(CE)) {
1403 unsigned BitWidth =
DL.getIndexTypeSizeInBits(
GEP->getType());
1405 auto *
Base = cast<Constant>(
GEP->stripAndAccumulateConstantOffsets(
1406 DL, BaseOffset,
true));
1407 if (
Base->isNullValue()) {
1411 if (
GEP->getNumIndices() == 1 &&
1412 GEP->getSourceElementType()->isIntegerTy(8)) {
1413 auto *
Ptr = cast<Constant>(
GEP->getPointerOperand());
1414 auto *Sub = dyn_cast<ConstantExpr>(
GEP->getOperand(1));
1415 Type *IntIdxTy =
DL.getIndexType(
Ptr->getType());
1416 if (Sub && Sub->getType() == IntIdxTy &&
1417 Sub->getOpcode() == Instruction::Sub &&
1418 Sub->getOperand(0)->isNullValue())
1431 case Instruction::IntToPtr:
1436 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
1437 if (CE->getOpcode() == Instruction::PtrToInt) {
1438 Constant *SrcPtr = CE->getOperand(0);
1439 unsigned SrcPtrSize =
DL.getPointerTypeSizeInBits(SrcPtr->
getType());
1440 unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1442 if (MidIntSize >= SrcPtrSize) {
1450 case Instruction::Trunc:
1451 case Instruction::ZExt:
1452 case Instruction::SExt:
1453 case Instruction::FPTrunc:
1454 case Instruction::FPExt:
1455 case Instruction::UIToFP:
1456 case Instruction::SIToFP:
1457 case Instruction::FPToUI:
1458 case Instruction::FPToSI:
1459 case Instruction::AddrSpaceCast:
1461 case Instruction::BitCast:
1472 Type *SrcTy =
C->getType();
1473 if (SrcTy == DestTy)
1487 if (Call->isNoBuiltin())
1489 if (Call->getFunctionType() !=
F->getFunctionType())
1491 switch (
F->getIntrinsicID()) {
1494 case Intrinsic::bswap:
1495 case Intrinsic::ctpop:
1496 case Intrinsic::ctlz:
1497 case Intrinsic::cttz:
1498 case Intrinsic::fshl:
1499 case Intrinsic::fshr:
1500 case Intrinsic::launder_invariant_group:
1501 case Intrinsic::strip_invariant_group:
1502 case Intrinsic::masked_load:
1503 case Intrinsic::get_active_lane_mask:
1504 case Intrinsic::abs:
1505 case Intrinsic::smax:
1506 case Intrinsic::smin:
1507 case Intrinsic::umax:
1508 case Intrinsic::umin:
1509 case Intrinsic::sadd_with_overflow:
1510 case Intrinsic::uadd_with_overflow:
1511 case Intrinsic::ssub_with_overflow:
1512 case Intrinsic::usub_with_overflow:
1513 case Intrinsic::smul_with_overflow:
1514 case Intrinsic::umul_with_overflow:
1515 case Intrinsic::sadd_sat:
1516 case Intrinsic::uadd_sat:
1517 case Intrinsic::ssub_sat:
1518 case Intrinsic::usub_sat:
1519 case Intrinsic::smul_fix:
1520 case Intrinsic::smul_fix_sat:
1521 case Intrinsic::bitreverse:
1522 case Intrinsic::is_constant:
1523 case Intrinsic::vector_reduce_add:
1524 case Intrinsic::vector_reduce_mul:
1525 case Intrinsic::vector_reduce_and:
1526 case Intrinsic::vector_reduce_or:
1527 case Intrinsic::vector_reduce_xor:
1528 case Intrinsic::vector_reduce_smin:
1529 case Intrinsic::vector_reduce_smax:
1530 case Intrinsic::vector_reduce_umin:
1531 case Intrinsic::vector_reduce_umax:
1533 case Intrinsic::amdgcn_perm:
1534 case Intrinsic::amdgcn_wave_reduce_umin:
1535 case Intrinsic::amdgcn_wave_reduce_umax:
1536 case Intrinsic::amdgcn_s_wqm:
1537 case Intrinsic::amdgcn_s_quadmask:
1538 case Intrinsic::amdgcn_s_bitreplicate:
1539 case Intrinsic::arm_mve_vctp8:
1540 case Intrinsic::arm_mve_vctp16:
1541 case Intrinsic::arm_mve_vctp32:
1542 case Intrinsic::arm_mve_vctp64:
1543 case Intrinsic::aarch64_sve_convert_from_svbool:
1545 case Intrinsic::wasm_trunc_signed:
1546 case Intrinsic::wasm_trunc_unsigned:
1551 case Intrinsic::minnum:
1552 case Intrinsic::maxnum:
1553 case Intrinsic::minimum:
1554 case Intrinsic::maximum:
1555 case Intrinsic::log:
1556 case Intrinsic::log2:
1557 case Intrinsic::log10:
1558 case Intrinsic::exp:
1559 case Intrinsic::exp2:
1560 case Intrinsic::exp10:
1561 case Intrinsic::sqrt:
1562 case Intrinsic::sin:
1563 case Intrinsic::cos:
1564 case Intrinsic::pow:
1565 case Intrinsic::powi:
1566 case Intrinsic::ldexp:
1567 case Intrinsic::fma:
1568 case Intrinsic::fmuladd:
1569 case Intrinsic::frexp:
1570 case Intrinsic::fptoui_sat:
1571 case Intrinsic::fptosi_sat:
1572 case Intrinsic::convert_from_fp16:
1573 case Intrinsic::convert_to_fp16:
1574 case Intrinsic::amdgcn_cos:
1575 case Intrinsic::amdgcn_cubeid:
1576 case Intrinsic::amdgcn_cubema:
1577 case Intrinsic::amdgcn_cubesc:
1578 case Intrinsic::amdgcn_cubetc:
1579 case Intrinsic::amdgcn_fmul_legacy:
1580 case Intrinsic::amdgcn_fma_legacy:
1581 case Intrinsic::amdgcn_fract:
1582 case Intrinsic::amdgcn_sin:
1584 case Intrinsic::x86_sse_cvtss2si:
1585 case Intrinsic::x86_sse_cvtss2si64:
1586 case Intrinsic::x86_sse_cvttss2si:
1587 case Intrinsic::x86_sse_cvttss2si64:
1588 case Intrinsic::x86_sse2_cvtsd2si:
1589 case Intrinsic::x86_sse2_cvtsd2si64:
1590 case Intrinsic::x86_sse2_cvttsd2si:
1591 case Intrinsic::x86_sse2_cvttsd2si64:
1592 case Intrinsic::x86_avx512_vcvtss2si32:
1593 case Intrinsic::x86_avx512_vcvtss2si64:
1594 case Intrinsic::x86_avx512_cvttss2si:
1595 case Intrinsic::x86_avx512_cvttss2si64:
1596 case Intrinsic::x86_avx512_vcvtsd2si32:
1597 case Intrinsic::x86_avx512_vcvtsd2si64:
1598 case Intrinsic::x86_avx512_cvttsd2si:
1599 case Intrinsic::x86_avx512_cvttsd2si64:
1600 case Intrinsic::x86_avx512_vcvtss2usi32:
1601 case Intrinsic::x86_avx512_vcvtss2usi64:
1602 case Intrinsic::x86_avx512_cvttss2usi:
1603 case Intrinsic::x86_avx512_cvttss2usi64:
1604 case Intrinsic::x86_avx512_vcvtsd2usi32:
1605 case Intrinsic::x86_avx512_vcvtsd2usi64:
1606 case Intrinsic::x86_avx512_cvttsd2usi:
1607 case Intrinsic::x86_avx512_cvttsd2usi64:
1608 return !Call->isStrictFP();
1612 case Intrinsic::fabs:
1613 case Intrinsic::copysign:
1614 case Intrinsic::is_fpclass:
1617 case Intrinsic::ceil:
1618 case Intrinsic::floor:
1619 case Intrinsic::round:
1620 case Intrinsic::roundeven:
1621 case Intrinsic::trunc:
1622 case Intrinsic::nearbyint:
1623 case Intrinsic::rint:
1624 case Intrinsic::canonicalize:
1627 case Intrinsic::experimental_constrained_fma:
1628 case Intrinsic::experimental_constrained_fmuladd:
1629 case Intrinsic::experimental_constrained_fadd:
1630 case Intrinsic::experimental_constrained_fsub:
1631 case Intrinsic::experimental_constrained_fmul:
1632 case Intrinsic::experimental_constrained_fdiv:
1633 case Intrinsic::experimental_constrained_frem:
1634 case Intrinsic::experimental_constrained_ceil:
1635 case Intrinsic::experimental_constrained_floor:
1636 case Intrinsic::experimental_constrained_round:
1637 case Intrinsic::experimental_constrained_roundeven:
1638 case Intrinsic::experimental_constrained_trunc:
1639 case Intrinsic::experimental_constrained_nearbyint:
1640 case Intrinsic::experimental_constrained_rint:
1641 case Intrinsic::experimental_constrained_fcmp:
1642 case Intrinsic::experimental_constrained_fcmps:
1649 if (!
F->hasName() || Call->isStrictFP())
1660 return Name ==
"acos" ||
Name ==
"acosf" ||
1661 Name ==
"asin" ||
Name ==
"asinf" ||
1662 Name ==
"atan" ||
Name ==
"atanf" ||
1663 Name ==
"atan2" ||
Name ==
"atan2f";
1665 return Name ==
"ceil" ||
Name ==
"ceilf" ||
1669 return Name ==
"exp" ||
Name ==
"expf" ||
1672 return Name ==
"fabs" ||
Name ==
"fabsf" ||
1673 Name ==
"floor" ||
Name ==
"floorf" ||
1676 return Name ==
"log" ||
Name ==
"logf" ||
1677 Name ==
"log2" ||
Name ==
"log2f" ||
1678 Name ==
"log10" ||
Name ==
"log10f";
1680 return Name ==
"nearbyint" ||
Name ==
"nearbyintf";
1682 return Name ==
"pow" ||
Name ==
"powf";
1684 return Name ==
"remainder" ||
Name ==
"remainderf" ||
1685 Name ==
"rint" ||
Name ==
"rintf" ||
1686 Name ==
"round" ||
Name ==
"roundf";
1688 return Name ==
"sin" ||
Name ==
"sinf" ||
1689 Name ==
"sinh" ||
Name ==
"sinhf" ||
1692 return Name ==
"tan" ||
Name ==
"tanf" ||
1693 Name ==
"tanh" ||
Name ==
"tanhf" ||
1694 Name ==
"trunc" ||
Name ==
"truncf";
1702 if (
Name.size() < 12 ||
Name[1] !=
'_')
1708 return Name ==
"__acos_finite" ||
Name ==
"__acosf_finite" ||
1709 Name ==
"__asin_finite" ||
Name ==
"__asinf_finite" ||
1710 Name ==
"__atan2_finite" ||
Name ==
"__atan2f_finite";
1712 return Name ==
"__cosh_finite" ||
Name ==
"__coshf_finite";
1714 return Name ==
"__exp_finite" ||
Name ==
"__expf_finite" ||
1715 Name ==
"__exp2_finite" ||
Name ==
"__exp2f_finite";
1717 return Name ==
"__log_finite" ||
Name ==
"__logf_finite" ||
1718 Name ==
"__log10_finite" ||
Name ==
"__log10f_finite";
1720 return Name ==
"__pow_finite" ||
Name ==
"__powf_finite";
1722 return Name ==
"__sinh_finite" ||
Name ==
"__sinhf_finite";
1733 APF.convert(Ty->
getFltSemantics(), APFloat::rmNearestTiesToEven, &unused);
1742inline void llvm_fenv_clearexcept() {
1743#if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
1744 feclearexcept(FE_ALL_EXCEPT);
1750inline bool llvm_fenv_testexcept() {
1751 int errno_val = errno;
1752 if (errno_val == ERANGE || errno_val == EDOM)
1754#if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
1755 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
1763 llvm_fenv_clearexcept();
1764 double Result = NativeFP(
V.convertToDouble());
1765 if (llvm_fenv_testexcept()) {
1766 llvm_fenv_clearexcept();
1770 return GetConstantFoldFPValue(Result, Ty);
1773Constant *ConstantFoldBinaryFP(
double (*NativeFP)(
double,
double),
1775 llvm_fenv_clearexcept();
1776 double Result = NativeFP(
V.convertToDouble(),
W.convertToDouble());
1777 if (llvm_fenv_testexcept()) {
1778 llvm_fenv_clearexcept();
1782 return GetConstantFoldFPValue(Result, Ty);
1792 if (isa<ConstantAggregateZero>(
Op))
1796 if (isa<PoisonValue>(
Op) ||
Op->containsPoisonElement())
1800 if (!isa<ConstantVector>(
Op) && !isa<ConstantDataVector>(
Op))
1803 auto *EltC = dyn_cast<ConstantInt>(
Op->getAggregateElement(0U));
1807 APInt Acc = EltC->getValue();
1809 if (!(EltC = dyn_cast<ConstantInt>(
Op->getAggregateElement(
I))))
1811 const APInt &
X = EltC->getValue();
1813 case Intrinsic::vector_reduce_add:
1816 case Intrinsic::vector_reduce_mul:
1819 case Intrinsic::vector_reduce_and:
1822 case Intrinsic::vector_reduce_or:
1825 case Intrinsic::vector_reduce_xor:
1828 case Intrinsic::vector_reduce_smin:
1831 case Intrinsic::vector_reduce_smax:
1834 case Intrinsic::vector_reduce_umin:
1837 case Intrinsic::vector_reduce_umax:
1853Constant *ConstantFoldSSEConvertToInt(
const APFloat &Val,
bool roundTowardZero,
1854 Type *Ty,
bool IsSigned) {
1857 assert(ResultWidth <= 64 &&
1858 "Can only constant fold conversions to 64 and 32 bit ints");
1861 bool isExact =
false;
1863 : APFloat::rmNearestTiesToEven;
1866 IsSigned,
mode, &isExact);
1867 if (status != APFloat::opOK &&
1868 (!roundTowardZero || status != APFloat::opInexact))
1874 Type *Ty =
Op->getType();
1877 return Op->getValueAPF().convertToDouble();
1881 APF.
convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused);
1886 if (
auto *CI = dyn_cast<ConstantInt>(
Op)) {
1887 C = &CI->getValue();
1890 if (isa<UndefValue>(
Op)) {
1909 if (St == APFloat::opStatus::opOK)
1914 if (ORM && *ORM == RoundingMode::Dynamic)
1919 if (EB && *EB != fp::ExceptionBehavior::ebStrict)
1931 if (!ORM || *ORM == RoundingMode::Dynamic)
1936 return RoundingMode::NearestTiesToEven;
1958 if (Src.isNormal() || Src.isInfinity())
1998 if (IntrinsicID == Intrinsic::is_constant) {
2002 if (
Operands[0]->isManifestConstant())
2007 if (isa<PoisonValue>(
Operands[0])) {
2009 if (IntrinsicID == Intrinsic::canonicalize)
2013 if (isa<UndefValue>(
Operands[0])) {
2017 if (IntrinsicID == Intrinsic::cos ||
2018 IntrinsicID == Intrinsic::ctpop ||
2019 IntrinsicID == Intrinsic::fptoui_sat ||
2020 IntrinsicID == Intrinsic::fptosi_sat ||
2021 IntrinsicID == Intrinsic::canonicalize)
2023 if (IntrinsicID == Intrinsic::bswap ||
2024 IntrinsicID == Intrinsic::bitreverse ||
2025 IntrinsicID == Intrinsic::launder_invariant_group ||
2026 IntrinsicID == Intrinsic::strip_invariant_group)
2030 if (isa<ConstantPointerNull>(
Operands[0])) {
2032 if (IntrinsicID == Intrinsic::launder_invariant_group ||
2033 IntrinsicID == Intrinsic::strip_invariant_group) {
2038 Call->getParent() ?
Call->getCaller() :
nullptr;
2048 if (
auto *
Op = dyn_cast<ConstantFP>(
Operands[0])) {
2049 if (IntrinsicID == Intrinsic::convert_to_fp16) {
2053 Val.
convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost);
2060 if (IntrinsicID == Intrinsic::wasm_trunc_signed ||
2061 IntrinsicID == Intrinsic::wasm_trunc_unsigned) {
2062 bool Signed = IntrinsicID == Intrinsic::wasm_trunc_signed;
2069 bool IsExact =
false;
2071 U.convertToInteger(
Int, APFloat::rmTowardZero, &IsExact);
2073 if (
Status == APFloat::opOK ||
Status == APFloat::opInexact)
2079 if (IntrinsicID == Intrinsic::fptoui_sat ||
2080 IntrinsicID == Intrinsic::fptosi_sat) {
2083 IntrinsicID == Intrinsic::fptoui_sat);
2085 U.convertToInteger(
Int, APFloat::rmTowardZero, &IsExact);
2089 if (IntrinsicID == Intrinsic::canonicalize)
2090 return constantFoldCanonicalize(Ty, Call, U);
2097 if (IntrinsicID == Intrinsic::nearbyint || IntrinsicID == Intrinsic::rint) {
2098 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2102 if (IntrinsicID == Intrinsic::round) {
2103 U.roundToIntegral(APFloat::rmNearestTiesToAway);
2107 if (IntrinsicID == Intrinsic::roundeven) {
2108 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2112 if (IntrinsicID == Intrinsic::ceil) {
2113 U.roundToIntegral(APFloat::rmTowardPositive);
2117 if (IntrinsicID == Intrinsic::floor) {
2118 U.roundToIntegral(APFloat::rmTowardNegative);
2122 if (IntrinsicID == Intrinsic::trunc) {
2123 U.roundToIntegral(APFloat::rmTowardZero);
2127 if (IntrinsicID == Intrinsic::fabs) {
2132 if (IntrinsicID == Intrinsic::amdgcn_fract) {
2138 FloorU.roundToIntegral(APFloat::rmTowardNegative);
2140 APFloat AlmostOne(
U.getSemantics(), 1);
2141 AlmostOne.next(
true);
2148 std::optional<APFloat::roundingMode>
RM;
2149 switch (IntrinsicID) {
2152 case Intrinsic::experimental_constrained_nearbyint:
2153 case Intrinsic::experimental_constrained_rint: {
2154 auto CI = cast<ConstrainedFPIntrinsic>(Call);
2155 RM = CI->getRoundingMode();
2156 if (!RM || *RM == RoundingMode::Dynamic)
2160 case Intrinsic::experimental_constrained_round:
2161 RM = APFloat::rmNearestTiesToAway;
2163 case Intrinsic::experimental_constrained_ceil:
2164 RM = APFloat::rmTowardPositive;
2166 case Intrinsic::experimental_constrained_floor:
2167 RM = APFloat::rmTowardNegative;
2169 case Intrinsic::experimental_constrained_trunc:
2170 RM = APFloat::rmTowardZero;
2174 auto CI = cast<ConstrainedFPIntrinsic>(Call);
2177 if (IntrinsicID == Intrinsic::experimental_constrained_rint &&
2178 St == APFloat::opInexact) {
2179 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2183 }
else if (
U.isSignaling()) {
2184 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2204 switch (IntrinsicID) {
2206 case Intrinsic::log:
2207 return ConstantFoldFP(log, APF, Ty);
2208 case Intrinsic::log2:
2210 return ConstantFoldFP(
log2, APF, Ty);
2211 case Intrinsic::log10:
2213 return ConstantFoldFP(log10, APF, Ty);
2214 case Intrinsic::exp:
2215 return ConstantFoldFP(exp, APF, Ty);
2216 case Intrinsic::exp2:
2218 return ConstantFoldBinaryFP(pow,
APFloat(2.0), APF, Ty);
2219 case Intrinsic::exp10:
2221 return ConstantFoldBinaryFP(pow,
APFloat(10.0), APF, Ty);
2222 case Intrinsic::sin:
2223 return ConstantFoldFP(sin, APF, Ty);
2224 case Intrinsic::cos:
2225 return ConstantFoldFP(cos, APF, Ty);
2226 case Intrinsic::sqrt:
2227 return ConstantFoldFP(sqrt, APF, Ty);
2228 case Intrinsic::amdgcn_cos:
2229 case Intrinsic::amdgcn_sin: {
2230 double V = getValueAsDouble(
Op);
2231 if (V < -256.0 || V > 256.0)
2236 bool IsCos = IntrinsicID == Intrinsic::amdgcn_cos;
2237 double V4 =
V * 4.0;
2238 if (V4 == floor(V4)) {
2240 const double SinVals[4] = { 0.0, 1.0, 0.0, -1.0 };
2241 V = SinVals[((int)V4 + (IsCos ? 1 : 0)) & 3];
2248 return GetConstantFoldFPValue(V, Ty);
2264 case LibFunc_acos_finite:
2265 case LibFunc_acosf_finite:
2267 return ConstantFoldFP(acos, APF, Ty);
2271 case LibFunc_asin_finite:
2272 case LibFunc_asinf_finite:
2274 return ConstantFoldFP(asin, APF, Ty);
2279 return ConstantFoldFP(atan, APF, Ty);
2283 if (TLI->
has(Func)) {
2284 U.roundToIntegral(APFloat::rmTowardPositive);
2291 return ConstantFoldFP(cos, APF, Ty);
2295 case LibFunc_cosh_finite:
2296 case LibFunc_coshf_finite:
2298 return ConstantFoldFP(cosh, APF, Ty);
2302 case LibFunc_exp_finite:
2303 case LibFunc_expf_finite:
2305 return ConstantFoldFP(exp, APF, Ty);
2309 case LibFunc_exp2_finite:
2310 case LibFunc_exp2f_finite:
2313 return ConstantFoldBinaryFP(pow,
APFloat(2.0), APF, Ty);
2317 if (TLI->
has(Func)) {
2323 case LibFunc_floorf:
2324 if (TLI->
has(Func)) {
2325 U.roundToIntegral(APFloat::rmTowardNegative);
2331 case LibFunc_log_finite:
2332 case LibFunc_logf_finite:
2334 return ConstantFoldFP(log, APF, Ty);
2338 case LibFunc_log2_finite:
2339 case LibFunc_log2f_finite:
2342 return ConstantFoldFP(
log2, APF, Ty);
2345 case LibFunc_log10f:
2346 case LibFunc_log10_finite:
2347 case LibFunc_log10f_finite:
2350 return ConstantFoldFP(log10, APF, Ty);
2352 case LibFunc_nearbyint:
2353 case LibFunc_nearbyintf:
2356 if (TLI->
has(Func)) {
2357 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2362 case LibFunc_roundf:
2363 if (TLI->
has(Func)) {
2364 U.roundToIntegral(APFloat::rmNearestTiesToAway);
2371 return ConstantFoldFP(sin, APF, Ty);
2375 case LibFunc_sinh_finite:
2376 case LibFunc_sinhf_finite:
2378 return ConstantFoldFP(sinh, APF, Ty);
2383 return ConstantFoldFP(sqrt, APF, Ty);
2388 return ConstantFoldFP(tan, APF, Ty);
2393 return ConstantFoldFP(tanh, APF, Ty);
2396 case LibFunc_truncf:
2397 if (TLI->
has(Func)) {
2398 U.roundToIntegral(APFloat::rmTowardZero);
2406 if (
auto *
Op = dyn_cast<ConstantInt>(
Operands[0])) {
2407 switch (IntrinsicID) {
2408 case Intrinsic::bswap:
2410 case Intrinsic::ctpop:
2412 case Intrinsic::bitreverse:
2414 case Intrinsic::convert_from_fp16: {
2415 APFloat Val(APFloat::IEEEhalf(),
Op->getValue());
2423 assert(status != APFloat::opInexact && !lost &&
2424 "Precision lost during fp16 constfolding");
2429 case Intrinsic::amdgcn_s_wqm: {
2431 Val |= (Val & 0x5555555555555555ULL) << 1 |
2432 ((Val >> 1) & 0x5555555555555555ULL);
2433 Val |= (Val & 0x3333333333333333ULL) << 2 |
2434 ((Val >> 2) & 0x3333333333333333ULL);
2438 case Intrinsic::amdgcn_s_quadmask: {
2441 for (
unsigned I = 0;
I <
Op->getBitWidth() / 4; ++
I, Val >>= 4) {
2445 QuadMask |= (1ULL <<
I);
2450 case Intrinsic::amdgcn_s_bitreplicate: {
2452 Val = (Val & 0x000000000000FFFFULL) | (Val & 0x00000000FFFF0000ULL) << 16;
2453 Val = (Val & 0x000000FF000000FFULL) | (Val & 0x0000FF000000FF00ULL) << 8;
2454 Val = (Val & 0x000F000F000F000FULL) | (Val & 0x00F000F000F000F0ULL) << 4;
2455 Val = (Val & 0x0303030303030303ULL) | (Val & 0x0C0C0C0C0C0C0C0CULL) << 2;
2456 Val = (Val & 0x1111111111111111ULL) | (Val & 0x2222222222222222ULL) << 1;
2457 Val = Val | Val << 1;
2466 switch (IntrinsicID) {
2468 case Intrinsic::vector_reduce_add:
2469 case Intrinsic::vector_reduce_mul:
2470 case Intrinsic::vector_reduce_and:
2471 case Intrinsic::vector_reduce_or:
2472 case Intrinsic::vector_reduce_xor:
2473 case Intrinsic::vector_reduce_smin:
2474 case Intrinsic::vector_reduce_smax:
2475 case Intrinsic::vector_reduce_umin:
2476 case Intrinsic::vector_reduce_umax:
2483 if (isa<ConstantVector>(
Operands[0]) ||
2484 isa<ConstantDataVector>(
Operands[0])) {
2486 switch (IntrinsicID) {
2488 case Intrinsic::x86_sse_cvtss2si:
2489 case Intrinsic::x86_sse_cvtss2si64:
2490 case Intrinsic::x86_sse2_cvtsd2si:
2491 case Intrinsic::x86_sse2_cvtsd2si64:
2493 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2494 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2498 case Intrinsic::x86_sse_cvttss2si:
2499 case Intrinsic::x86_sse_cvttss2si64:
2500 case Intrinsic::x86_sse2_cvttsd2si:
2501 case Intrinsic::x86_sse2_cvttsd2si64:
2503 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2504 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2517 auto *FCmp = cast<ConstrainedFPCmpIntrinsic>(Call);
2519 if (FCmp->isSignaling()) {
2521 St = APFloat::opInvalidOp;
2524 St = APFloat::opInvalidOp;
2543 bool IsOp0Undef = isa<UndefValue>(
Operands[0]);
2544 bool IsOp1Undef = isa<UndefValue>(
Operands[1]);
2545 switch (IntrinsicID) {
2546 case Intrinsic::maxnum:
2547 case Intrinsic::minnum:
2548 case Intrinsic::maximum:
2549 case Intrinsic::minimum:
2559 if (
const auto *Op1 = dyn_cast<ConstantFP>(
Operands[0])) {
2560 const APFloat &Op1V = Op1->getValueAPF();
2562 if (
const auto *Op2 = dyn_cast<ConstantFP>(
Operands[1])) {
2563 if (Op2->getType() != Op1->getType())
2565 const APFloat &Op2V = Op2->getValueAPF();
2567 if (
const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Call)) {
2571 switch (IntrinsicID) {
2574 case Intrinsic::experimental_constrained_fadd:
2575 St = Res.
add(Op2V, RM);
2577 case Intrinsic::experimental_constrained_fsub:
2580 case Intrinsic::experimental_constrained_fmul:
2583 case Intrinsic::experimental_constrained_fdiv:
2584 St = Res.
divide(Op2V, RM);
2586 case Intrinsic::experimental_constrained_frem:
2589 case Intrinsic::experimental_constrained_fcmp:
2590 case Intrinsic::experimental_constrained_fcmps:
2591 return evaluateCompare(Op1V, Op2V, ConstrIntr);
2599 switch (IntrinsicID) {
2602 case Intrinsic::copysign:
2604 case Intrinsic::minnum:
2606 case Intrinsic::maxnum:
2608 case Intrinsic::minimum:
2610 case Intrinsic::maximum:
2617 switch (IntrinsicID) {
2620 case Intrinsic::pow:
2621 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2622 case Intrinsic::amdgcn_fmul_legacy:
2642 case LibFunc_pow_finite:
2643 case LibFunc_powf_finite:
2645 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2649 if (TLI->
has(Func)) {
2651 if (APFloat::opStatus::opOK ==
V.mod(Op2->getValueAPF()))
2655 case LibFunc_remainder:
2656 case LibFunc_remainderf:
2657 if (TLI->
has(Func)) {
2659 if (APFloat::opStatus::opOK ==
V.remainder(Op2->getValueAPF()))
2664 case LibFunc_atan2f:
2670 case LibFunc_atan2_finite:
2671 case LibFunc_atan2f_finite:
2673 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
2676 }
else if (
auto *Op2C = dyn_cast<ConstantInt>(
Operands[1])) {
2677 switch (IntrinsicID) {
2678 case Intrinsic::ldexp: {
2681 scalbn(Op1V, Op2C->getSExtValue(), APFloat::rmNearestTiesToEven));
2683 case Intrinsic::is_fpclass: {
2704 if (IntrinsicID == Intrinsic::powi && Ty->
isHalfTy())
2708 (
int)Op2C->getZExtValue())));
2709 if (IntrinsicID == Intrinsic::powi && Ty->
isFloatTy())
2713 (
int)Op2C->getZExtValue())));
2714 if (IntrinsicID == Intrinsic::powi && Ty->
isDoubleTy())
2718 (
int)Op2C->getZExtValue())));
2725 const APInt *C0, *C1;
2726 if (!getConstIntOrUndef(
Operands[0], C0) ||
2727 !getConstIntOrUndef(
Operands[1], C1))
2730 switch (IntrinsicID) {
2732 case Intrinsic::smax:
2733 case Intrinsic::smin:
2734 case Intrinsic::umax:
2735 case Intrinsic::umin:
2751 case Intrinsic::usub_with_overflow:
2752 case Intrinsic::ssub_with_overflow:
2758 case Intrinsic::uadd_with_overflow:
2759 case Intrinsic::sadd_with_overflow:
2764 cast<StructType>(Ty),
2769 case Intrinsic::smul_with_overflow:
2770 case Intrinsic::umul_with_overflow: {
2778 switch (IntrinsicID) {
2780 case Intrinsic::sadd_with_overflow:
2781 Res = C0->
sadd_ov(*C1, Overflow);
2783 case Intrinsic::uadd_with_overflow:
2784 Res = C0->
uadd_ov(*C1, Overflow);
2786 case Intrinsic::ssub_with_overflow:
2787 Res = C0->
ssub_ov(*C1, Overflow);
2789 case Intrinsic::usub_with_overflow:
2790 Res = C0->
usub_ov(*C1, Overflow);
2792 case Intrinsic::smul_with_overflow:
2793 Res = C0->
smul_ov(*C1, Overflow);
2795 case Intrinsic::umul_with_overflow:
2796 Res = C0->
umul_ov(*C1, Overflow);
2805 case Intrinsic::uadd_sat:
2806 case Intrinsic::sadd_sat:
2816 if (IntrinsicID == Intrinsic::uadd_sat)
2820 case Intrinsic::usub_sat:
2821 case Intrinsic::ssub_sat:
2831 if (IntrinsicID == Intrinsic::usub_sat)
2835 case Intrinsic::cttz:
2836 case Intrinsic::ctlz:
2837 assert(C1 &&
"Must be constant int");
2844 if (IntrinsicID == Intrinsic::cttz)
2849 case Intrinsic::abs:
2850 assert(C1 &&
"Must be constant int");
2862 case Intrinsic::amdgcn_wave_reduce_umin:
2863 case Intrinsic::amdgcn_wave_reduce_umax:
2864 return dyn_cast<Constant>(
Operands[0]);
2871 if ((isa<ConstantVector>(
Operands[0]) ||
2872 isa<ConstantDataVector>(
Operands[0])) &&
2876 cast<ConstantInt>(
Operands[1])->getValue() == 4) {
2878 switch (IntrinsicID) {
2880 case Intrinsic::x86_avx512_vcvtss2si32:
2881 case Intrinsic::x86_avx512_vcvtss2si64:
2882 case Intrinsic::x86_avx512_vcvtsd2si32:
2883 case Intrinsic::x86_avx512_vcvtsd2si64:
2885 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2886 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2890 case Intrinsic::x86_avx512_vcvtss2usi32:
2891 case Intrinsic::x86_avx512_vcvtss2usi64:
2892 case Intrinsic::x86_avx512_vcvtsd2usi32:
2893 case Intrinsic::x86_avx512_vcvtsd2usi64:
2895 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2896 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2900 case Intrinsic::x86_avx512_cvttss2si:
2901 case Intrinsic::x86_avx512_cvttss2si64:
2902 case Intrinsic::x86_avx512_cvttsd2si:
2903 case Intrinsic::x86_avx512_cvttsd2si64:
2905 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2906 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2910 case Intrinsic::x86_avx512_cvttss2usi:
2911 case Intrinsic::x86_avx512_cvttss2usi64:
2912 case Intrinsic::x86_avx512_cvttsd2usi:
2913 case Intrinsic::x86_avx512_cvttsd2usi64:
2915 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2916 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2943 }
else if (
abs(S1) >=
abs(S0)) {
2966 switch (IntrinsicID) {
2969 case Intrinsic::amdgcn_cubeid:
2971 case Intrinsic::amdgcn_cubema:
2973 case Intrinsic::amdgcn_cubesc:
2975 case Intrinsic::amdgcn_cubetc:
2982 const APInt *C0, *C1, *C2;
2983 if (!getConstIntOrUndef(
Operands[0], C0) ||
2984 !getConstIntOrUndef(
Operands[1], C1) ||
2985 !getConstIntOrUndef(
Operands[2], C2))
2992 unsigned NumUndefBytes = 0;
2993 for (
unsigned I = 0;
I < 32;
I += 8) {
3002 const APInt *Src = ((Sel & 10) == 10 || (Sel & 12) == 4) ? C0 : C1;
3006 B = Src->extractBitsAsZExtValue(8, (Sel & 3) * 8);
3008 B = Src->extractBitsAsZExtValue(1, (Sel & 1) ? 31 : 15) * 0xff;
3011 Val.insertBits(
B,
I, 8);
3014 if (NumUndefBytes == 4)
3028 if (
const auto *Op1 = dyn_cast<ConstantFP>(
Operands[0])) {
3029 if (
const auto *Op2 = dyn_cast<ConstantFP>(
Operands[1])) {
3030 if (
const auto *Op3 = dyn_cast<ConstantFP>(
Operands[2])) {
3031 const APFloat &C1 = Op1->getValueAPF();
3032 const APFloat &C2 = Op2->getValueAPF();
3033 const APFloat &C3 = Op3->getValueAPF();
3035 if (
const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Call)) {
3039 switch (IntrinsicID) {
3042 case Intrinsic::experimental_constrained_fma:
3043 case Intrinsic::experimental_constrained_fmuladd:
3047 if (mayFoldConstrained(
3053 switch (IntrinsicID) {
3055 case Intrinsic::amdgcn_fma_legacy: {
3065 case Intrinsic::fma:
3066 case Intrinsic::fmuladd: {
3068 V.fusedMultiplyAdd(C2, C3, APFloat::rmNearestTiesToEven);
3071 case Intrinsic::amdgcn_cubeid:
3072 case Intrinsic::amdgcn_cubema:
3073 case Intrinsic::amdgcn_cubesc:
3074 case Intrinsic::amdgcn_cubetc: {
3075 APFloat V = ConstantFoldAMDGCNCubeIntrinsic(IntrinsicID, C1, C2, C3);
3083 if (IntrinsicID == Intrinsic::smul_fix ||
3084 IntrinsicID == Intrinsic::smul_fix_sat) {
3090 const APInt *C0, *C1;
3091 if (!getConstIntOrUndef(
Operands[0], C0) ||
3092 !getConstIntOrUndef(
Operands[1], C1))
3106 unsigned Scale = cast<ConstantInt>(
Operands[2])->getZExtValue();
3108 assert(Scale < Width &&
"Illegal scale.");
3109 unsigned ExtendedWidth =
Width * 2;
3111 (C0->
sext(ExtendedWidth) * C1->
sext(ExtendedWidth)).ashr(Scale);
3112 if (IntrinsicID == Intrinsic::smul_fix_sat) {
3121 if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
3122 const APInt *C0, *C1, *C2;
3123 if (!getConstIntOrUndef(
Operands[0], C0) ||
3124 !getConstIntOrUndef(
Operands[1], C1) ||
3125 !getConstIntOrUndef(
Operands[2], C2))
3128 bool IsRight = IntrinsicID == Intrinsic::fshr;
3142 unsigned LshrAmt = IsRight ? ShAmt :
BitWidth - ShAmt;
3143 unsigned ShlAmt = !IsRight ? ShAmt :
BitWidth - ShAmt;
3151 if (IntrinsicID == Intrinsic::amdgcn_perm)
3152 return ConstantFoldAMDGCNPermIntrinsic(
Operands, Ty);
3164 return ConstantFoldScalarCall1(
Name, IntrinsicID, Ty,
Operands, TLI, Call);
3167 return ConstantFoldScalarCall2(
Name, IntrinsicID, Ty,
Operands, TLI, Call);
3170 return ConstantFoldScalarCall3(
Name, IntrinsicID, Ty,
Operands, TLI, Call);
3175static Constant *ConstantFoldFixedVectorCall(
3183 switch (IntrinsicID) {
3184 case Intrinsic::masked_load: {
3193 auto *MaskElt =
Mask->getAggregateElement(
I);
3196 auto *PassthruElt = Passthru->getAggregateElement(
I);
3198 if (isa<UndefValue>(MaskElt)) {
3206 if (MaskElt->isNullValue()) {
3210 }
else if (MaskElt->isOneValue()) {
3222 case Intrinsic::arm_mve_vctp8:
3223 case Intrinsic::arm_mve_vctp16:
3224 case Intrinsic::arm_mve_vctp32:
3225 case Intrinsic::arm_mve_vctp64: {
3226 if (
auto *
Op = dyn_cast<ConstantInt>(
Operands[0])) {
3231 for (
unsigned i = 0; i < Lanes; i++) {
3241 case Intrinsic::get_active_lane_mask: {
3242 auto *Op0 = dyn_cast<ConstantInt>(
Operands[0]);
3243 auto *Op1 = dyn_cast<ConstantInt>(
Operands[1]);
3247 uint64_t Limit = Op1->getZExtValue();
3250 for (
unsigned i = 0; i < Lanes; i++) {
3251 if (
Base + i < Limit)
3266 for (
unsigned J = 0, JE =
Operands.size(); J != JE; ++J) {
3282 ConstantFoldScalarCall(
Name, IntrinsicID, Ty, Lane, TLI, Call);
3291static Constant *ConstantFoldScalableVectorCall(
3295 switch (IntrinsicID) {
3296 case Intrinsic::aarch64_sve_convert_from_svbool: {
3297 auto *Src = dyn_cast<Constant>(
Operands[0]);
3298 if (!Src || !Src->isNullValue())
3309static std::pair<Constant *, Constant *>
3311 if (isa<PoisonValue>(
Op))
3314 auto *ConstFP = dyn_cast<ConstantFP>(
Op);
3318 const APFloat &
U = ConstFP->getValueAPF();
3320 APFloat FrexpMant =
frexp(U, FrexpExp, APFloat::rmNearestTiesToEven);
3327 return {Result0, Result1};
3337 switch (IntrinsicID) {
3338 case Intrinsic::frexp: {
3342 if (
auto *FVTy0 = dyn_cast<FixedVectorType>(Ty0)) {
3346 for (
unsigned I = 0,
E = FVTy0->getNumElements();
I !=
E; ++
I) {
3348 std::tie(Results0[
I], Results1[
I]) =
3349 ConstantFoldScalarFrexpCall(Lane, Ty1);
3358 auto [Result0, Result1] = ConstantFoldScalarFrexpCall(
Operands[0], Ty1);
3366 return ConstantFoldScalarCall(
Name, IntrinsicID, StTy,
Operands, TLI, Call);
3377 if (Call->isNoBuiltin())
3393 Type *Ty =
F->getReturnType();
3394 if (
auto *FVTy = dyn_cast<FixedVectorType>(Ty))
3395 return ConstantFoldFixedVectorCall(
3396 Name, IID, FVTy,
Operands,
F->getParent()->getDataLayout(), TLI, Call);
3398 if (
auto *SVTy = dyn_cast<ScalableVectorType>(Ty))
3399 return ConstantFoldScalableVectorCall(
3400 Name, IID, SVTy,
Operands,
F->getParent()->getDataLayout(), TLI, Call);
3402 if (
auto *StTy = dyn_cast<StructType>(Ty))
3403 return ConstantFoldStructCall(
Name, IID, StTy,
Operands,
3404 F->getParent()->getDataLayout(), TLI, Call);
3409 return ConstantFoldScalarCall(
Name, IID, Ty,
Operands, TLI, Call);
3416 if (Call->isNoBuiltin() || Call->isStrictFP())
3418 Function *
F = Call->getCalledFunction();
3426 if (Call->arg_size() == 1) {
3427 if (
ConstantFP *OpC = dyn_cast<ConstantFP>(Call->getArgOperand(0))) {
3436 case LibFunc_log10l:
3438 case LibFunc_log10f:
3439 return Op.isNaN() || (!
Op.isZero() && !
Op.isNegative());
3445 if (OpC->getType()->isDoubleTy())
3447 if (OpC->getType()->isFloatTy())
3455 if (OpC->getType()->isDoubleTy())
3457 if (OpC->getType()->isFloatTy())
3467 return !
Op.isInfinity();
3471 case LibFunc_tanf: {
3474 Type *Ty = OpC->getType();
3476 return ConstantFoldFP(tan, OpC->getValueAPF(), Ty) !=
nullptr;
3503 if (OpC->getType()->isDoubleTy())
3505 if (OpC->getType()->isFloatTy())
3512 return Op.isNaN() ||
Op.isZero() || !
Op.isNegative();
3522 if (Call->arg_size() == 2) {
3523 ConstantFP *Op0C = dyn_cast<ConstantFP>(Call->getArgOperand(0));
3524 ConstantFP *Op1C = dyn_cast<ConstantFP>(Call->getArgOperand(1));
3532 case LibFunc_powf: {
3538 return ConstantFoldBinaryFP(pow, Op0, Op1, Ty) !=
nullptr;
3546 case LibFunc_remainderl:
3547 case LibFunc_remainder:
3548 case LibFunc_remainderf:
3553 case LibFunc_atan2f:
3554 case LibFunc_atan2l:
3570void TargetFolder::anchor() {}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static Constant * FoldBitCast(Constant *V, Type *DestTy)
Constant * getConstantAtOffset(Constant *Base, APInt Offset, const DataLayout &DL)
If this Offset points exactly to the start of an aggregate element, return that element,...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
amode Optimize addressing mode
mir Rename Register Operands
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
static constexpr uint32_t Opcode
static APFloat getQNaN(const fltSemantics &Sem, bool Negative=false, const APInt *payload=nullptr)
Factory for QNaN values.
opStatus divide(const APFloat &RHS, roundingMode RM)
void copySign(const APFloat &RHS)
opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
opStatus subtract(const APFloat &RHS, roundingMode RM)
double convertToDouble() const
Converts this APFloat to host double value.
bool isPosInfinity() const
opStatus add(const APFloat &RHS, roundingMode RM)
const fltSemantics & getSemantics() const
opStatus multiply(const APFloat &RHS, roundingMode RM)
opStatus fusedMultiplyAdd(const APFloat &Multiplicand, const APFloat &Addend, roundingMode RM)
APInt bitcastToAPInt() const
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
opStatus mod(const APFloat &RHS)
bool isNegInfinity() const
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Class for arbitrary precision integers.
APInt umul_ov(const APInt &RHS, bool &Overflow) const
APInt usub_sat(const APInt &RHS) const
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
uint64_t extractBitsAsZExtValue(unsigned numBits, unsigned bitPosition) const
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
APInt trunc(unsigned width) const
Truncate to new width.
APInt abs() const
Get the absolute value.
APInt sadd_sat(const APInt &RHS) const
APInt usub_ov(const APInt &RHS, bool &Overflow) const
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
APInt sadd_ov(const APInt &RHS, bool &Overflow) const
APInt uadd_ov(const APInt &RHS, bool &Overflow) const
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
APInt uadd_sat(const APInt &RHS) const
APInt smul_ov(const APInt &RHS, bool &Overflow) const
APInt sext(unsigned width) const
Sign extend to a new width.
APInt shl(unsigned shiftAmt) const
Left-shift function.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
APInt ssub_ov(const APInt &RHS, bool &Overflow) const
bool isOne() const
Determine if this is a value of 1.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
APInt ssub_sat(const APInt &RHS) const
An arbitrary precision integer that knows its signedness.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & back() const
back - Get the last element.
size_t size() const
size - Get the array size.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
static Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getExtractElement(Constant *Vec, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static bool isDesirableCastOp(unsigned Opcode)
Whether creating a constant expression for this cast is desirable.
static Constant * getCast(unsigned ops, Constant *C, Type *Ty, bool OnlyIfReduced=false)
Convenience function for getting a Cast operation.
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getInsertElement(Constant *Vec, Constant *Elt, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getShuffleVector(Constant *V1, Constant *V2, ArrayRef< int > Mask, Type *OnlyIfReducedTy=nullptr)
static bool isSupportedGetElementPtr(const Type *SrcElemTy)
Whether creating a constant expression for this getelementptr type is supported.
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, bool InBounds=false, std::optional< unsigned > InRangeIndex=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static Constant * get(unsigned Opcode, Constant *C1, Constant *C2, unsigned Flags=0, Type *OnlyIfReducedTy=nullptr)
get - Return a binary or shift operator constant expression, folding if possible.
static bool isDesirableBinOp(unsigned Opcode)
Whether creating a constant expression for this binary operator is desirable.
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getCompare(unsigned short pred, Constant *C1, Constant *C2, bool OnlyIfReduced=false)
Return an ICmp or FCmp comparison operator constant expression.
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
static Constant * get(Type *Ty, double V)
This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in...
static Constant * getZero(Type *Ty, bool Negative=false)
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
static ConstantInt * getFalse(LLVMContext &Context)
static Constant * get(StructType *T, ArrayRef< Constant * > V)
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static Constant * getAllOnesValue(Type *Ty)
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Constrained floating point compare intrinsics.
This is the common base class for constrained floating point intrinsics.
std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
std::optional< RoundingMode > getRoundingMode() const
Wrapper for a function that represents a value that functionally represents the original function.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
static bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
Type * getSourceElementType() const
std::optional< unsigned > getInRangeIndex() const
Returns the offset of the index with an inrange attachment, or std::nullopt if none.
static Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
static bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
const BasicBlock * getParent() const
const Function * getFunction() const
Return the function this instruction belongs to.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
static APInt getSaturationPoint(Intrinsic::ID ID, unsigned numBits)
Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values, so there is a certain thre...
ICmpInst::Predicate getPredicate() const
Returns the comparison predicate underlying the intrinsic.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Class to represent scalable SIMD vectors.
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
unsigned getElementContainingOffset(uint64_t FixedOffset) const
Given a valid byte offset into the structure, returns the structure index that contains it.
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
Type * getStructElementType(unsigned N) const
const fltSemantics & getFltSemantics() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
static IntegerType * getInt1Ty(LLVMContext &C)
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
bool isBFloatTy() const
Return true if this is 'bfloat', a 16-bit bfloat type.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isX86_MMXTy() const
Return true if this is X86 MMX.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isStructTy() const
True if this is an instance of StructType.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
static IntegerType * getInt16Ty(LLVMContext &C)
bool isAggregateType() const
Return true if the type is an aggregate type.
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isX86_AMXTy() const
Return true if this is X86 AMX.
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getContainedType(unsigned i) const
This method is used to implement the type iterator (defined at the end of the file).
bool isIEEELikeFPTy() const
Return true if this is a well-behaved IEEE-like type, which has a IEEE compatible layout as defined b...
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
LLVMContext & getContext() const
All values hold a context through their type.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ SC
CHAIN = SC CHAIN, Imm128 - System call.
@ CE
Windows NT (Windows on ARM)
@ ebStrict
This corresponds to "fpexcept.strict".
@ ebIgnore
This corresponds to "fpexcept.ignore".
NodeAddr< FuncNode * > Func
std::error_code status(const Twine &path, file_status &result, bool follow=true)
Get file status as if by POSIX stat().
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Constant * ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy, const DataLayout &DL)
ConstantFoldLoadThroughBitcast - try to cast constant to destination type returning null if unsuccess...
static double log2(double V)
Constant * ConstantFoldSelectInstruction(Constant *Cond, Constant *V1, Constant *V2)
Attempt to constant fold a select instruction with the specified operands.
bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
APFloat abs(APFloat X)
Returns the absolute value of the argument.
Constant * ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL, const Instruction *I)
Attempt to constant fold a floating point binary operation with the specified operands,...
Constant * ConstantFoldUnaryInstruction(unsigned Opcode, Constant *V)
bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset, const DataLayout &DL, DSOLocalEquivalent **DSOEquiv=nullptr)
If this constant is a constant offset from a global, return the global and the constant.
bool isMathLibCallNoop(const CallBase *Call, const TargetLibraryInfo *TLI)
Check whether the given call has no side-effects.
Constant * ReadByteArrayFromGlobal(const GlobalVariable *GV, uint64_t Offset)
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2018 maximum semantics.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
Constant * ConstantFoldExtractValueInstruction(Constant *Agg, ArrayRef< unsigned > Idxs)
Attempt to constant fold an extractvalue instruction with the specified operands and indices.
Constant * ConstantFoldCall(const CallBase *Call, Function *F, ArrayRef< Constant * > Operands, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldCall - Attempt to constant fold a call to the specified function with the specified argum...
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE maxNum semantics.
Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
Constant * FlushFPConstant(Constant *Operand, const Instruction *I, bool IsOutput)
Attempt to flush float point constant according to denormal mode set in the instruction's parent func...
Constant * ConstantFoldInstOperands(Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
APFloat scalbn(APFloat X, int Exp, APFloat::roundingMode RM)
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
Constant * ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty)
If C is a uniform value where all bits are the same (either all zero, all ones, all undef or all pois...
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE minNum semantics.
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
DWARFExpression::Operation Op
Constant * ConstantFoldInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
RoundingMode
Rounding mode.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
constexpr unsigned BitWidth
bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx)
Identifies if the vector form of the intrinsic has a scalar operand.
Constant * ConstantFoldCastInstruction(unsigned opcode, Constant *V, Type *DestTy)
Constant * ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val, ArrayRef< unsigned > Idxs)
ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue instruction with the spe...
Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2018 minimum semantics.
Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
opStatus
IEEE-754R 7: Default exception handling.
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
DenormalModeKind
Represent handled modes for denormal (aka subnormal) modes in the floating point environment.
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ Dynamic
Denormals have unknown treatment.
@ IEEE
IEEE-754 denormal numbers preserved.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
static constexpr DenormalMode getIEEE()
bool isConstant() const
Returns true if we know the value of all bits.
const APInt & getConstant() const
Returns the value when all bits have a known value.