71#define DEBUG_TYPE "loop-accesses"
75 cl::desc(
"Sets the SIMD width. Zero is autoselect."),
81 cl::desc(
"Sets the vectorization interleave count. "
82 "Zero is autoselect."),
89 cl::desc(
"When performing memory disambiguation checks at runtime do not "
90 "generate more than this number of comparisons (default = 8)."),
97 cl::desc(
"Maximum number of comparisons done when trying to merge "
98 "runtime memory checks. (default = 100)"),
107 cl::desc(
"Maximum number of dependences collected by "
108 "loop-access analysis (default = 100)"),
124 cl::desc(
"Enable symbolic stride memory access versioning"));
129 "store-to-load-forwarding-conflict-detection",
cl::Hidden,
130 cl::desc(
"Enable conflict detection in loop-access analysis"),
135 cl::desc(
"Maximum recursion depth when finding forked SCEVs (default = 5)"),
140 cl::desc(
"Speculate that non-constant strides are unit in LAA"),
146 "Hoist inner loop runtime memory checks to outer loop if possible"),
151 return ::VectorizationInterleave.getNumOccurrences() > 0;
162 if (SI == PtrToStride.
end())
166 const SCEV *StrideSCEV = SI->second;
171 assert(isa<SCEVUnknown>(StrideSCEV) &&
"shouldn't be in map");
179 <<
" by: " << *Expr <<
"\n");
188 ->getPointerAddressSpace()),
189 NeedsFreeze(RtCheck.Pointers[
Index].NeedsFreeze) {
207 Type *AccessTy,
bool WritePtr,
208 unsigned DepSetId,
unsigned ASId,
217 ScStart = ScEnd = PtrExpr;
220 assert(AR &&
"Invalid addrec expression");
229 if (
const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
230 if (CStep->getValue()->isNegative())
245 Type *IdxTy =
DL.getIndexType(
Ptr->getType());
249 Pointers.emplace_back(
Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,
253void RuntimePointerChecking::tryToCreateDiffCheck(
255 if (!CanUseDiffCheck)
262 CanUseDiffCheck =
false;
273 CanUseDiffCheck =
false;
283 if (AccSrc.
size() != 1 || AccSink.
size() != 1) {
284 CanUseDiffCheck =
false;
288 if (AccSink[0] < AccSrc[0])
291 auto *SrcAR = dyn_cast<SCEVAddRecExpr>(Src->Expr);
292 auto *SinkAR = dyn_cast<SCEVAddRecExpr>(
Sink->Expr);
295 CanUseDiffCheck =
false;
305 if (isa<ScalableVectorType>(SrcTy) || isa<ScalableVectorType>(DstTy)) {
306 CanUseDiffCheck =
false;
310 SinkAR->getLoop()->getHeader()->getModule()->getDataLayout();
312 std::max(
DL.getTypeAllocSize(SrcTy),
DL.getTypeAllocSize(DstTy));
317 auto *Step = dyn_cast<SCEVConstant>(SinkAR->getStepRecurrence(*SE));
318 if (!Step || Step != SrcAR->getStepRecurrence(*SE) ||
319 Step->getAPInt().abs() != AllocSize) {
320 CanUseDiffCheck =
false;
329 if (Step->getValue()->isNegative())
334 if (isa<SCEVCouldNotCompute>(SinkStartInt) ||
335 isa<SCEVCouldNotCompute>(SrcStartInt)) {
336 CanUseDiffCheck =
false;
340 const Loop *InnerLoop = SrcAR->getLoop();
346 isa<SCEVAddRecExpr>(SinkStartInt) && isa<SCEVAddRecExpr>(SrcStartInt)) {
347 auto *SrcStartAR = cast<SCEVAddRecExpr>(SrcStartInt);
348 auto *SinkStartAR = cast<SCEVAddRecExpr>(SinkStartInt);
349 const Loop *StartARLoop = SrcStartAR->getLoop();
350 if (StartARLoop == SinkStartAR->getLoop() &&
352 LLVM_DEBUG(
dbgs() <<
"LAA: Not creating diff runtime check, since these "
353 "cannot be hoisted out of the outer loop\n");
354 CanUseDiffCheck =
false;
360 <<
"SrcStart: " << *SrcStartInt <<
'\n'
361 <<
"SinkStartInt: " << *SinkStartInt <<
'\n');
362 DiffChecks.emplace_back(SrcStartInt, SinkStartInt, AllocSize,
363 Src->NeedsFreeze ||
Sink->NeedsFreeze);
375 tryToCreateDiffCheck(CGI, CGJ);
376 Checks.
push_back(std::make_pair(&CGI, &CGJ));
383void RuntimePointerChecking::generateChecks(
386 groupChecks(DepCands, UseDependencies);
392 for (
unsigned I = 0, EI = M.Members.size(); EI !=
I; ++
I)
393 for (
unsigned J = 0, EJ =
N.Members.size(); EJ != J; ++J)
408 if (
C->getValue()->isNegative())
417 RtCheck.
Pointers[
Index].PointerValue->getType()->getPointerAddressSpace(),
426 "all pointers in a checking group must be in the same address space");
452void RuntimePointerChecking::groupChecks(
498 if (!UseDependencies) {
504 unsigned TotalComparisons = 0;
509 Iter.first->second.push_back(
Index);
538 auto PointerI = PositionMap.
find(
MI->getPointer());
540 "pointer in equivalence class not found in PositionMap");
541 for (
unsigned Pointer : PointerI->second) {
558 if (Group.addPointer(Pointer, *
this)) {
581 return (PtrToPartition[PtrIdx1] != -1 &&
582 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
606 unsigned Depth)
const {
608 for (
const auto &
Check : Checks) {
609 const auto &
First =
Check.first->Members, &Second =
Check.second->Members;
614 for (
unsigned K = 0; K <
First.size(); ++K)
618 for (
unsigned K = 0; K < Second.size(); ++K)
633 OS.
indent(
Depth + 4) <<
"(Low: " << *CG.Low <<
" High: " << *CG.High
635 for (
unsigned J = 0; J < CG.Members.size(); ++J) {
648class AccessAnalysis {
657 : TheLoop(TheLoop), BAA(*AA), AST(BAA), LI(LI), DepCands(DA), PSE(PSE) {
659 BAA.enableCrossIterationMode();
666 Accesses[MemAccessInfo(
Ptr,
false)].insert(AccessTy);
668 ReadOnlyPtr.insert(
Ptr);
675 Accesses[MemAccessInfo(
Ptr,
true)].insert(AccessTy);
686 MemAccessInfo Access,
Type *AccessTy,
689 Loop *TheLoop,
unsigned &RunningDepId,
690 unsigned ASId,
bool ShouldCheckStride,
bool Assume);
699 Value *&UncomputablePtr,
bool ShouldCheckWrap =
false);
703 void buildDependenceSets() {
704 processMemAccesses();
712 bool isDependencyCheckNeeded() {
return !CheckDeps.empty(); }
720 MemAccessInfoList &getDependenciesToCheck() {
return CheckDeps; }
727 void processMemAccesses();
731 PtrAccessMap Accesses;
737 MemAccessInfoList CheckDeps;
763 bool IsRTCheckAnalysisNeeded =
false;
775 const SCEV *PtrScev,
Loop *L,
bool Assume) {
799 int64_t Stride =
getPtrStride(PSE, AccessTy,
Ptr, L, Strides).value_or(0);
812 while (!WorkList.
empty()) {
816 auto *PN = dyn_cast<PHINode>(
Ptr);
820 if (PN && InnermostLoop.
contains(PN->getParent()) &&
821 PN->getParent() != InnermostLoop.
getHeader()) {
822 for (
const Use &Inc : PN->incoming_values())
855 if (isa<SCEVAddRecExpr>(Scev) || L->isLoopInvariant(
Ptr) ||
856 !isa<Instruction>(
Ptr) ||
Depth == 0) {
867 auto GetBinOpExpr = [&SE](
unsigned Opcode,
const SCEV *L,
const SCEV *R) {
869 case Instruction::Add:
871 case Instruction::Sub:
879 unsigned Opcode =
I->getOpcode();
881 case Instruction::GetElementPtr: {
883 Type *SourceTy =
GEP->getSourceElementType();
886 if (
I->getNumOperands() != 2 || SourceTy->
isVectorTy()) {
896 bool NeedsFreeze =
any_of(BaseScevs, UndefPoisonCheck) ||
897 any_of(OffsetScevs, UndefPoisonCheck);
902 if (OffsetScevs.
size() == 2 && BaseScevs.
size() == 1)
904 else if (BaseScevs.
size() == 2 && OffsetScevs.
size() == 1)
907 ScevList.emplace_back(Scev, NeedsFreeze);
925 ScevList.emplace_back(SE->
getAddExpr(get<0>(BaseScevs[0]), Scaled1),
927 ScevList.emplace_back(SE->
getAddExpr(get<0>(BaseScevs[1]), Scaled2),
931 case Instruction::Select: {
938 if (ChildScevs.
size() == 2) {
939 ScevList.push_back(ChildScevs[0]);
940 ScevList.push_back(ChildScevs[1]);
945 case Instruction::PHI: {
950 if (
I->getNumOperands() == 2) {
954 if (ChildScevs.
size() == 2) {
955 ScevList.push_back(ChildScevs[0]);
956 ScevList.push_back(ChildScevs[1]);
961 case Instruction::Add:
962 case Instruction::Sub: {
970 any_of(LScevs, UndefPoisonCheck) ||
any_of(RScevs, UndefPoisonCheck);
975 if (LScevs.
size() == 2 && RScevs.
size() == 1)
977 else if (RScevs.
size() == 2 && LScevs.
size() == 1)
980 ScevList.emplace_back(Scev, NeedsFreeze);
984 ScevList.emplace_back(
985 GetBinOpExpr(Opcode, get<0>(LScevs[0]), get<0>(RScevs[0])),
987 ScevList.emplace_back(
988 GetBinOpExpr(Opcode, get<0>(LScevs[1]), get<0>(RScevs[1])),
994 LLVM_DEBUG(
dbgs() <<
"ForkedPtr unhandled instruction: " << *
I <<
"\n");
1011 if (Scevs.
size() == 2 &&
1012 (isa<SCEVAddRecExpr>(get<0>(Scevs[0])) ||
1014 (isa<SCEVAddRecExpr>(get<0>(Scevs[1])) ||
1026 MemAccessInfo Access,
Type *AccessTy,
1029 Loop *TheLoop,
unsigned &RunningDepId,
1030 unsigned ASId,
bool ShouldCheckWrap,
1037 for (
auto &
P : TranslatedPtrs) {
1038 const SCEV *PtrExpr = get<0>(
P);
1044 if (ShouldCheckWrap) {
1046 if (TranslatedPtrs.size() > 1)
1049 if (!
isNoWrap(PSE, StridesMap,
Ptr, AccessTy, TheLoop)) {
1051 if (!Assume || !isa<SCEVAddRecExpr>(Expr))
1058 if (TranslatedPtrs.size() == 1)
1063 for (
auto [PtrExpr, NeedsFreeze] : TranslatedPtrs) {
1067 if (isDependencyCheckNeeded()) {
1069 unsigned &LeaderId = DepSetId[Leader];
1071 LeaderId = RunningDepId++;
1075 DepId = RunningDepId++;
1077 bool IsWrite = Access.getInt();
1078 RtCheck.
insert(TheLoop,
Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,
1089 Value *&UncomputablePtr,
bool ShouldCheckWrap) {
1092 bool CanDoRT =
true;
1094 bool MayNeedRTCheck =
false;
1095 if (!IsRTCheckAnalysisNeeded)
return true;
1097 bool IsDepCheckNeeded = isDependencyCheckNeeded();
1102 for (
auto &AS : AST) {
1103 int NumReadPtrChecks = 0;
1104 int NumWritePtrChecks = 0;
1105 bool CanDoAliasSetRT =
true;
1110 unsigned RunningDepId = 1;
1118 for (
const auto &
A : AS) {
1120 bool IsWrite = Accesses.count(MemAccessInfo(
Ptr,
true));
1123 ++NumWritePtrChecks;
1131 if (NumWritePtrChecks == 0 ||
1132 (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
1133 assert((AS.size() <= 1 ||
1136 MemAccessInfo AccessWrite(AC.getValue(),
true);
1137 return DepCands.
findValue(AccessWrite) == DepCands.
end();
1139 "Can only skip updating CanDoRT below, if all entries in AS "
1140 "are reads or there is at most 1 entry");
1144 for (
auto &Access : AccessInfos) {
1145 for (
const auto &AccessTy : Accesses[Access]) {
1146 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1147 DepSetId, TheLoop, RunningDepId, ASId,
1148 ShouldCheckWrap,
false)) {
1150 << *Access.getPointer() <<
'\n');
1152 CanDoAliasSetRT =
false;
1166 bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.
empty();
1170 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
1174 CanDoAliasSetRT =
true;
1175 for (
auto Retry : Retries) {
1176 MemAccessInfo Access = Retry.first;
1177 Type *AccessTy = Retry.second;
1178 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1179 DepSetId, TheLoop, RunningDepId, ASId,
1180 ShouldCheckWrap,
true)) {
1181 CanDoAliasSetRT =
false;
1182 UncomputablePtr = Access.getPointer();
1188 CanDoRT &= CanDoAliasSetRT;
1189 MayNeedRTCheck |= NeedsAliasSetRTCheck;
1198 unsigned NumPointers = RtCheck.
Pointers.size();
1199 for (
unsigned i = 0; i < NumPointers; ++i) {
1200 for (
unsigned j = i + 1;
j < NumPointers; ++
j) {
1202 if (RtCheck.
Pointers[i].DependencySetId ==
1203 RtCheck.
Pointers[j].DependencySetId)
1216 dbgs() <<
"LAA: Runtime check would require comparison between"
1217 " different address spaces\n");
1223 if (MayNeedRTCheck && CanDoRT)
1227 <<
" pointer comparisons.\n");
1234 bool CanDoRTIfNeeded = !RtCheck.
Need || CanDoRT;
1235 if (!CanDoRTIfNeeded)
1237 return CanDoRTIfNeeded;
1240void AccessAnalysis::processMemAccesses() {
1247 LLVM_DEBUG(
dbgs() <<
"LAA: Accesses(" << Accesses.size() <<
"):\n");
1249 for (
auto A : Accesses)
1250 dbgs() <<
"\t" << *
A.first.getPointer() <<
" ("
1251 << (
A.first.getInt()
1253 : (ReadOnlyPtr.count(
A.first.getPointer()) ?
"read-only"
1262 for (
const auto &AS : AST) {
1267 bool SetHasWrite =
false;
1271 UnderlyingObjToAccessMap ObjToLastAccess;
1274 PtrAccessMap DeferredAccesses;
1278 for (
int SetIteration = 0; SetIteration < 2; ++SetIteration) {
1279 bool UseDeferred = SetIteration > 0;
1280 PtrAccessMap &S = UseDeferred ? DeferredAccesses : Accesses;
1282 for (
const auto &AV : AS) {
1287 for (
const auto &AC : S) {
1288 if (AC.first.getPointer() !=
Ptr)
1291 bool IsWrite = AC.first.getInt();
1295 bool IsReadOnlyPtr = ReadOnlyPtr.count(
Ptr) && !IsWrite;
1296 if (UseDeferred && !IsReadOnlyPtr)
1300 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
1301 S.count(MemAccessInfo(
Ptr,
false))) &&
1302 "Alias-set pointer not in the access set?");
1304 MemAccessInfo Access(
Ptr, IsWrite);
1312 if (!UseDeferred && IsReadOnlyPtr) {
1315 DeferredAccesses.insert({Access, {}});
1323 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
1324 CheckDeps.push_back(Access);
1325 IsRTCheckAnalysisNeeded =
true;
1334 ValueVector TempObjects;
1338 <<
"Underlying objects for pointer " << *
Ptr <<
"\n");
1339 for (
const Value *UnderlyingObj : TempObjects) {
1342 if (isa<ConstantPointerNull>(UnderlyingObj) &&
1348 UnderlyingObjToAccessMap::iterator Prev =
1349 ObjToLastAccess.find(UnderlyingObj);
1350 if (Prev != ObjToLastAccess.end())
1351 DepCands.
unionSets(Access, Prev->second);
1353 ObjToLastAccess[UnderlyingObj] = Access;
1382 auto *
GEP = dyn_cast<GetElementPtrInst>(
Ptr);
1383 if (!
GEP || !
GEP->isInBounds())
1387 Value *NonConstIndex =
nullptr;
1389 if (!isa<ConstantInt>(
Index)) {
1392 NonConstIndex =
Index;
1400 if (
auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex))
1401 if (OBO->hasNoSignedWrap() &&
1404 isa<ConstantInt>(OBO->getOperand(1))) {
1405 auto *OpScev = PSE.
getSCEV(OBO->getOperand(0));
1407 if (
auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev))
1408 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(
SCEV::FlagNSW);
1419 bool Assume,
bool ShouldCheckWrap) {
1423 if (isa<ScalableVectorType>(AccessTy)) {
1424 LLVM_DEBUG(
dbgs() <<
"LAA: Bad stride - Scalable object: " << *AccessTy
1426 return std::nullopt;
1437 <<
" SCEV: " << *PtrScev <<
"\n");
1438 return std::nullopt;
1443 LLVM_DEBUG(
dbgs() <<
"LAA: Bad stride - Not striding over innermost loop "
1444 << *
Ptr <<
" SCEV: " << *AR <<
"\n");
1445 return std::nullopt;
1455 <<
" SCEV: " << *AR <<
"\n");
1456 return std::nullopt;
1460 TypeSize AllocSize =
DL.getTypeAllocSize(AccessTy);
1462 const APInt &APStepVal =
C->getAPInt();
1466 return std::nullopt;
1471 int64_t Stride = StepVal /
Size;
1472 int64_t Rem = StepVal %
Size;
1474 return std::nullopt;
1476 if (!ShouldCheckWrap)
1488 if (
auto *
GEP = dyn_cast<GetElementPtrInst>(
Ptr);
1489 GEP &&
GEP->isInBounds() && (Stride == 1 || Stride == -1))
1497 (Stride == 1 || Stride == -1))
1503 <<
"LAA: Pointer: " << *
Ptr <<
"\n"
1504 <<
"LAA: SCEV: " << *AR <<
"\n"
1505 <<
"LAA: Added an overflow assumption\n");
1509 dbgs() <<
"LAA: Bad stride - Pointer may wrap in the address space "
1510 << *
Ptr <<
" SCEV: " << *AR <<
"\n");
1511 return std::nullopt;
1519 assert(PtrA && PtrB &&
"Expected non-nullptr pointers.");
1527 return std::nullopt;
1534 return std::nullopt;
1535 unsigned IdxWidth =
DL.getIndexSizeInBits(ASA);
1537 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1542 if (PtrA1 == PtrB1) {
1545 ASA = cast<PointerType>(PtrA1->
getType())->getAddressSpace();
1546 ASB = cast<PointerType>(PtrB1->
getType())->getAddressSpace();
1549 return std::nullopt;
1551 IdxWidth =
DL.getIndexSizeInBits(ASA);
1552 OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1562 dyn_cast<SCEVConstant>(SE.
getMinusSCEV(PtrSCEVB, PtrSCEVA));
1564 return std::nullopt;
1565 Val = Diff->getAPInt().getSExtValue();
1567 int Size =
DL.getTypeStoreSize(ElemTyA);
1568 int Dist = Val /
Size;
1572 if (!StrictCheck || Dist *
Size == Val)
1574 return std::nullopt;
1581 VL, [](
const Value *V) {
return V->getType()->isPointerTy(); }) &&
1582 "Expected list of pointer operands.");
1585 Value *Ptr0 = VL[0];
1587 using DistOrdPair = std::pair<int64_t, int>;
1589 std::set<DistOrdPair,
decltype(Compare)> Offsets(Compare);
1590 Offsets.emplace(0, 0);
1592 bool IsConsecutive =
true;
1601 auto Res = Offsets.emplace(
Offset, Cnt);
1605 IsConsecutive = IsConsecutive && std::next(Res.first) == Offsets.end();
1608 SortedIndices.
clear();
1609 if (!IsConsecutive) {
1613 for (
const std::pair<int64_t, int> &Pair : Offsets) {
1614 SortedIndices[Cnt] = Pair.second;
1630 std::optional<int> Diff =
1633 return Diff && *Diff == 1;
1639 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
1640 InstMap.push_back(SI);
1648 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
1649 InstMap.push_back(LI);
1676 case ForwardButPreventsForwarding:
1680 case BackwardVectorizable:
1682 case BackwardVectorizableButPreventsForwarding:
1695 case ForwardButPreventsForwarding:
1700 case BackwardVectorizable:
1702 case BackwardVectorizableButPreventsForwarding:
1708bool MemoryDepChecker::couldPreventStoreLoadForward(
uint64_t Distance,
1722 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1724 uint64_t MaxVFWithoutSLForwardIssues = std::min(
1728 for (
uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues;
1732 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1733 MaxVFWithoutSLForwardIssues = (VF >> 1);
1738 if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {
1740 dbgs() <<
"LAA: Distance " << Distance
1741 <<
" that could cause a store-load forwarding conflict\n");
1745 if (MaxVFWithoutSLForwardIssues < MinDepDistBytes &&
1746 MaxVFWithoutSLForwardIssues !=
1748 MinDepDistBytes = MaxVFWithoutSLForwardIssues;
1770 const SCEV &BackedgeTakenCount,
1791 const uint64_t ByteStride = Stride * TypeByteSize;
1795 const SCEV *CastedDist = &Dist;
1796 const SCEV *CastedProduct = Product;
1803 if (DistTypeSizeBits > ProductTypeSizeBits)
1831 assert(Stride > 1 &&
"The stride must be greater than 1");
1832 assert(TypeByteSize > 0 &&
"The type size in byte must be non-zero");
1833 assert(Distance > 0 &&
"The distance must be non-zero");
1836 if (Distance % TypeByteSize)
1839 uint64_t ScaledDist = Distance / TypeByteSize;
1857 return ScaledDist % Stride;
1861MemoryDepChecker::isDependent(
const MemAccessInfo &
A,
unsigned AIdx,
1864 assert (AIdx < BIdx &&
"Must pass arguments in program order");
1866 auto [APtr, AIsWrite] =
A;
1867 auto [BPtr, BIsWrite] =
B;
1872 if (!AIsWrite && !BIsWrite)
1876 if (APtr->getType()->getPointerAddressSpace() !=
1877 BPtr->getType()->getPointerAddressSpace())
1880 int64_t StrideAPtr =
1881 getPtrStride(PSE, ATy, APtr, InnermostLoop, Strides,
true).value_or(0);
1882 int64_t StrideBPtr =
1883 getPtrStride(PSE, BTy, BPtr, InnermostLoop, Strides,
true).value_or(0);
1890 if (StrideAPtr < 0) {
1902 LLVM_DEBUG(
dbgs() <<
"LAA: Src Scev: " << *Src <<
"Sink Scev: " << *Sink
1903 <<
"(Induction step: " << StrideAPtr <<
")\n");
1904 LLVM_DEBUG(
dbgs() <<
"LAA: Distance for " << *InstMap[AIdx] <<
" to "
1905 << *InstMap[BIdx] <<
": " << *Dist <<
"\n");
1910 if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){
1911 LLVM_DEBUG(
dbgs() <<
"Pointer access with non-constant stride\n");
1916 uint64_t TypeByteSize =
DL.getTypeAllocSize(ATy);
1918 DL.getTypeStoreSizeInBits(ATy) ==
DL.getTypeStoreSizeInBits(BTy);
1919 uint64_t Stride = std::abs(StrideAPtr);
1921 if (!isa<SCEVCouldNotCompute>(Dist) && HasSameSize &&
1923 Stride, TypeByteSize))
1928 LLVM_DEBUG(
dbgs() <<
"LAA: Dependence because of non-constant distance\n");
1929 FoundNonConstantDistanceDependence =
true;
1933 const APInt &Val =
C->getAPInt();
1937 if (std::abs(Distance) > 0 && Stride > 1 && HasSameSize &&
1945 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
1950 (couldPreventStoreLoadForward(Val.
abs().
getZExtValue(), TypeByteSize) ||
1952 LLVM_DEBUG(
dbgs() <<
"LAA: Forward but may prevent st->ld forwarding\n");
1965 dbgs() <<
"LAA: Zero dependence difference but different type sizes\n");
1972 LLVM_DEBUG(
dbgs() <<
"LAA: ReadWrite-Write positive dependency with "
1973 "different type sizes\n");
1983 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
2012 TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize;
2013 if (MinDistanceNeeded >
static_cast<uint64_t>(Distance)) {
2014 LLVM_DEBUG(
dbgs() <<
"LAA: Failure because of positive distance "
2015 << Distance <<
'\n');
2021 if (MinDistanceNeeded > MinDepDistBytes) {
2023 << MinDistanceNeeded <<
" size in bytes\n");
2044 std::min(
static_cast<uint64_t>(Distance), MinDepDistBytes);
2046 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
2047 uint64_t MinDepDistBytesOld = MinDepDistBytes;
2049 couldPreventStoreLoadForward(Distance, TypeByteSize)) {
2052 assert(MinDepDistBytes == MinDepDistBytesOld &&
2053 "An update to MinDepDistBytes requires an update to "
2054 "MaxSafeVectorWidthInBits");
2055 (void)MinDepDistBytesOld;
2061 uint64_t MaxVF = MinDepDistBytes / (TypeByteSize * Stride);
2063 <<
" with max VF = " << MaxVF <<
'\n');
2064 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
2065 MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);
2073 MinDepDistBytes = -1;
2076 if (Visited.
count(CurAccess))
2092 bool AIIsWrite = AI->getInt();
2096 (AIIsWrite ? AI : std::next(AI));
2099 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
2100 I1E = Accesses[*AI].
end(); I1 != I1E; ++I1)
2103 for (std::vector<unsigned>::iterator
2104 I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),
2105 I2E = (OI == AI ? I1E : Accesses[*OI].end());
2107 auto A = std::make_pair(&*AI, *I1);
2108 auto B = std::make_pair(&*OI, *I2);
2115 isDependent(*
A.first,
A.second, *
B.first,
B.second, Strides);
2122 if (RecordDependences) {
2127 RecordDependences =
false;
2128 Dependences.clear();
2130 <<
"Too many dependences, stopped recording\n");
2142 LLVM_DEBUG(
dbgs() <<
"Total Dependences: " << Dependences.size() <<
"\n");
2149 auto &IndexVector = Accesses.find(Access)->second;
2153 std::back_inserter(Insts),
2154 [&](
unsigned Idx) {
return this->InstMap[
Idx]; });
2159 "NoDep",
"Unknown",
"Forward",
"ForwardButPreventsForwarding",
"Backward",
2160 "BackwardVectorizable",
"BackwardVectorizableButPreventsForwarding"};
2170bool LoopAccessInfo::canAnalyzeLoop() {
2179 recordAnalysis(
"NotInnerMostLoop") <<
"loop is not the innermost loop";
2186 dbgs() <<
"LAA: loop control flow is not understood by analyzer\n");
2187 recordAnalysis(
"CFGNotUnderstood")
2188 <<
"loop control flow is not understood by analyzer";
2194 if (isa<SCEVCouldNotCompute>(ExitCount)) {
2195 recordAnalysis(
"CantComputeNumberOfIterations")
2196 <<
"could not determine number of loop iterations";
2197 LLVM_DEBUG(
dbgs() <<
"LAA: SCEV could not compute the loop exit count.\n");
2212 unsigned NumReads = 0;
2213 unsigned NumReadWrites = 0;
2215 bool HasComplexMemInst =
false;
2218 HasConvergentOp =
false;
2220 PtrRtChecking->Pointers.
clear();
2221 PtrRtChecking->Need =
false;
2225 const bool EnableMemAccessVersioningOfLoop =
2237 if (
auto *Call = dyn_cast<CallBase>(&
I)) {
2238 if (
Call->isConvergent())
2239 HasConvergentOp =
true;
2244 if (HasComplexMemInst && HasConvergentOp) {
2250 if (HasComplexMemInst)
2256 auto *
Call = dyn_cast<CallInst>(&
I);
2263 if (
I.mayReadFromMemory()) {
2266 if (Call && !
Call->isNoBuiltin() &&
Call->getCalledFunction() &&
2270 auto *Ld = dyn_cast<LoadInst>(&
I);
2272 recordAnalysis(
"CantVectorizeInstruction", Ld)
2273 <<
"instruction cannot be vectorized";
2274 HasComplexMemInst =
true;
2277 if (!Ld->isSimple() && !IsAnnotatedParallel) {
2278 recordAnalysis(
"NonSimpleLoad", Ld)
2279 <<
"read with atomic ordering or volatile read";
2281 HasComplexMemInst =
true;
2286 DepChecker->addAccess(Ld);
2287 if (EnableMemAccessVersioningOfLoop)
2288 collectStridedAccess(Ld);
2293 if (
I.mayWriteToMemory()) {
2294 auto *St = dyn_cast<StoreInst>(&
I);
2296 recordAnalysis(
"CantVectorizeInstruction", St)
2297 <<
"instruction cannot be vectorized";
2298 HasComplexMemInst =
true;
2301 if (!St->isSimple() && !IsAnnotatedParallel) {
2302 recordAnalysis(
"NonSimpleStore", St)
2303 <<
"write with atomic ordering or volatile write";
2305 HasComplexMemInst =
true;
2310 DepChecker->addAccess(St);
2311 if (EnableMemAccessVersioningOfLoop)
2312 collectStridedAccess(St);
2317 if (HasComplexMemInst) {
2327 if (!Stores.
size()) {
2334 AccessAnalysis Accesses(TheLoop, AA, LI, DependentAccesses, *PSE);
2350 if (isInvariant(
Ptr)) {
2352 StoresToInvariantAddresses.push_back(ST);
2353 HasDependenceInvolvingLoopInvariantAddress |=
2360 if (Seen.
insert({Ptr, AccessTy}).second) {
2367 if (blockNeedsPredication(
ST->getParent(), TheLoop, DT))
2371 [&Accesses, AccessTy, Loc](
Value *
Ptr) {
2372 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2373 Accesses.addStore(NewLoc, AccessTy);
2378 if (IsAnnotatedParallel) {
2380 dbgs() <<
"LAA: A loop annotated parallel, ignore memory dependency "
2396 bool IsReadOnlyPtr =
false;
2398 if (Seen.
insert({Ptr, AccessTy}).second ||
2399 !
getPtrStride(*PSE,
LD->getType(),
Ptr, TheLoop, SymbolicStrides).value_or(0)) {
2401 IsReadOnlyPtr =
true;
2407 LLVM_DEBUG(
dbgs() <<
"LAA: Found an unsafe dependency between a uniform "
2408 "load and uniform store to the same address!\n");
2409 HasDependenceInvolvingLoopInvariantAddress =
true;
2416 if (blockNeedsPredication(
LD->getParent(), TheLoop, DT))
2420 [&Accesses, AccessTy, Loc, IsReadOnlyPtr](
Value *
Ptr) {
2421 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2422 Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr);
2428 if (NumReadWrites == 1 && NumReads == 0) {
2436 Accesses.buildDependenceSets();
2440 Value *UncomputablePtr =
nullptr;
2441 bool CanDoRTIfNeeded =
2442 Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->
getSE(), TheLoop,
2443 SymbolicStrides, UncomputablePtr,
false);
2444 if (!CanDoRTIfNeeded) {
2445 auto *
I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2446 recordAnalysis(
"CantIdentifyArrayBounds",
I)
2447 <<
"cannot identify array bounds";
2448 LLVM_DEBUG(
dbgs() <<
"LAA: We can't vectorize because we can't find "
2449 <<
"the array bounds.\n");
2455 dbgs() <<
"LAA: May be able to perform a memory runtime check if needed.\n");
2458 if (Accesses.isDependencyCheckNeeded()) {
2460 CanVecMem = DepChecker->areDepsSafe(
2461 DependentAccesses, Accesses.getDependenciesToCheck(), SymbolicStrides);
2463 if (!CanVecMem && DepChecker->shouldRetryWithRuntimeCheck()) {
2467 Accesses.resetDepChecks(*DepChecker);
2469 PtrRtChecking->reset();
2470 PtrRtChecking->Need =
true;
2472 auto *SE = PSE->
getSE();
2473 UncomputablePtr =
nullptr;
2474 CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(
2475 *PtrRtChecking, SE, TheLoop, SymbolicStrides, UncomputablePtr,
true);
2478 if (!CanDoRTIfNeeded) {
2479 auto *
I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2480 recordAnalysis(
"CantCheckMemDepsAtRunTime",
I)
2481 <<
"cannot check memory dependencies at runtime";
2482 LLVM_DEBUG(
dbgs() <<
"LAA: Can't vectorize with memory checks\n");
2491 if (HasConvergentOp) {
2492 recordAnalysis(
"CantInsertRuntimeCheckWithConvergent")
2493 <<
"cannot add control dependency to convergent operation";
2494 LLVM_DEBUG(
dbgs() <<
"LAA: We can't vectorize because a runtime check "
2495 "would be needed with a convergent operation\n");
2502 dbgs() <<
"LAA: No unsafe dependent memory operations in loop. We"
2503 << (PtrRtChecking->Need ?
"" :
" don't")
2504 <<
" need runtime memory checks.\n");
2506 emitUnsafeDependenceRemark();
2509void LoopAccessInfo::emitUnsafeDependenceRemark() {
2510 auto Deps = getDepChecker().getDependences();
2517 if (Found == Deps->end())
2521 LLVM_DEBUG(
dbgs() <<
"LAA: unsafe dependent memory operations in loop\n");
2524 bool HasForcedDistribution =
false;
2525 std::optional<const MDOperand *>
Value =
2529 assert(
Op && mdconst::hasa<ConstantInt>(*
Op) &&
"invalid metadata");
2530 HasForcedDistribution = mdconst::extract<ConstantInt>(*Op)->getZExtValue();
2533 const std::string
Info =
2534 HasForcedDistribution
2535 ?
"unsafe dependent memory operations in loop."
2536 :
"unsafe dependent memory operations in loop. Use "
2537 "#pragma loop distribute(enable) to allow loop distribution "
2538 "to attempt to isolate the offending operations into a separate "
2549 R <<
"\nBackward loop carried data dependence.";
2552 R <<
"\nForward loop carried data dependence that prevents "
2553 "store-to-load forwarding.";
2556 R <<
"\nBackward loop carried data dependence that prevents "
2557 "store-to-load forwarding.";
2560 R <<
"\nUnknown data dependence.";
2567 SourceLoc = DD->getDebugLoc();
2569 R <<
" Memory location is the same as accessed at "
2570 <<
ore::NV(
"Location", SourceLoc);
2585 assert(!Report &&
"Multiple reports generated");
2591 CodeRegion =
I->getParent();
2594 if (
I->getDebugLoc())
2595 DL =
I->getDebugLoc();
2598 Report = std::make_unique<OptimizationRemarkAnalysis>(
DEBUG_TYPE, RemarkName,
DL,
2604 auto *SE = PSE->
getSE();
2625 std::advance(GEPTI, LastOperand - 2);
2649 for (
unsigned i = 0, e =
GEP->getNumOperands(); i != e; ++i)
2650 if (i != InductionOperand &&
2653 return GEP->getOperand(InductionOperand);
2658 Value *UniqueCast =
nullptr;
2659 for (
User *U :
Ptr->users()) {
2660 CastInst *CI = dyn_cast<CastInst>(U);
2661 if (CI && CI->
getType() == Ty) {
2674 auto *PtrTy = dyn_cast<PointerType>(
Ptr->getType());
2675 if (!PtrTy || PtrTy->isAggregateType())
2684 int64_t PtrAccessSize = 1;
2692 V =
C->getOperand();
2709 if (OrigPtr ==
Ptr) {
2710 if (
const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
2711 if (M->getOperand(0)->getSCEVType() !=
scConstant)
2714 const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt();
2721 if (PtrAccessSize != StepVal)
2723 V = M->getOperand(1);
2735 const auto *
C = dyn_cast<SCEVIntegralCastExpr>(V);
2738 U = dyn_cast<SCEVUnknown>(
C->getOperand());
2750void LoopAccessInfo::collectStridedAccess(
Value *MemAccess) {
2765 LLVM_DEBUG(
dbgs() <<
"LAA: Found a strided access that is a candidate for "
2770 LLVM_DEBUG(
dbgs() <<
" Chose not to due to -laa-speculate-unit-stride\n");
2795 const SCEV *CastedStride = StrideExpr;
2796 const SCEV *CastedBECount = BETakenCount;
2798 if (BETypeSizeBits >= StrideTypeSizeBits)
2802 const SCEV *StrideMinusBETaken = SE->
getMinusSCEV(CastedStride, CastedBECount);
2808 dbgs() <<
"LAA: Stride>=TripCount; No point in versioning as the "
2809 "Stride==1 predicate will imply that the loop executes "
2813 LLVM_DEBUG(
dbgs() <<
"LAA: Found a strided access that we can version.\n");
2817 const SCEV *StrideBase = StrideExpr;
2818 if (
const auto *
C = dyn_cast<SCEVIntegralCastExpr>(StrideBase))
2819 StrideBase =
C->getOperand();
2820 SymbolicStrides[
Ptr] = cast<SCEVUnknown>(StrideBase);
2827 PtrRtChecking(nullptr),
2829 PtrRtChecking = std::make_unique<RuntimePointerChecking>(*DepChecker, SE);
2830 if (canAnalyzeLoop()) {
2831 analyzeLoop(AA, LI, TLI, DT);
2840 OS <<
" with a maximum safe vector width of "
2842 if (PtrRtChecking->Need)
2843 OS <<
" with run-time checks";
2847 if (HasConvergentOp)
2853 if (
auto *Dependences = DepChecker->getDependences()) {
2855 for (
const auto &Dep : *Dependences) {
2856 Dep.
print(
OS,
Depth + 2, DepChecker->getMemoryInstructions());
2863 PtrRtChecking->print(
OS,
Depth);
2866 OS.
indent(
Depth) <<
"Non vectorizable stores to invariant address were "
2867 << (HasDependenceInvolvingLoopInvariantAddress ?
"" :
"not ")
2868 <<
"found in loop.\n";
2880 auto I = LoopAccessInfoMap.insert({&L,
nullptr});
2884 std::make_unique<LoopAccessInfo>(&L, &SE, TLI, &AA, &DT, &LI);
2886 return *
I.first->second;
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements a class to represent arbitrary precision integral constant values and operations...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...
static cl::opt< bool, true > HoistRuntimeChecks("hoist-runtime-checks", cl::Hidden, cl::desc("Hoist inner loop runtime memory checks to outer loop if possible"), cl::location(VectorizerParams::HoistRuntimeChecks), cl::init(false))
static cl::opt< unsigned > MaxDependences("max-dependences", cl::Hidden, cl::desc("Maximum number of dependences collected by " "loop-access analysis (default = 100)"), cl::init(100))
We collect dependences up to this threshold.
static cl::opt< bool > EnableForwardingConflictDetection("store-to-load-forwarding-conflict-detection", cl::Hidden, cl::desc("Enable conflict detection in loop-access analysis"), cl::init(true))
Enable store-to-load forwarding conflict detection.
static void findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr, SmallVectorImpl< PointerIntPair< const SCEV *, 1, bool > > &ScevList, unsigned Depth)
static bool hasComputableBounds(PredicatedScalarEvolution &PSE, Value *Ptr, const SCEV *PtrScev, Loop *L, bool Assume)
Check whether a pointer can participate in a runtime bounds check.
static cl::opt< unsigned > MemoryCheckMergeThreshold("memory-check-merge-threshold", cl::Hidden, cl::desc("Maximum number of comparisons done when trying to merge " "runtime memory checks. (default = 100)"), cl::init(100))
The maximum iterations used to merge memory checks.
static bool isNoWrap(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &Strides, Value *Ptr, Type *AccessTy, Loop *L)
Check whether a pointer address cannot wrap.
static const SCEV * getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
Get the stride of a pointer access in a loop.
static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep)
Find the operand of the GEP that should be checked for consecutive stores.
static cl::opt< unsigned, true > VectorizationInterleave("force-vector-interleave", cl::Hidden, cl::desc("Sets the vectorization interleave count. " "Zero is autoselect."), cl::location(VectorizerParams::VectorizationInterleave))
static Value * getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty)
If a value has only one user that is a CastInst, return it.
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
static cl::opt< unsigned, true > RuntimeMemoryCheckThreshold("runtime-memory-check-threshold", cl::Hidden, cl::desc("When performing memory disambiguation checks at runtime do not " "generate more than this number of comparisons (default = 8)."), cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8))
static void visitPointers(Value *StartPtr, const Loop &InnermostLoop, function_ref< void(Value *)> AddPointer)
static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, PredicatedScalarEvolution &PSE, const Loop *L)
Return true if an AddRec pointer Ptr is unsigned non-wrapping, i.e.
static Value * stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
If the argument is a GEP, then returns the operand identified by getGEPInductionOperand.
static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, const SCEV &BackedgeTakenCount, const SCEV &Dist, uint64_t Stride, uint64_t TypeByteSize)
Given a dependence-distance Dist between two memory accesses, that have the same stride whose absolut...
static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, uint64_t TypeByteSize)
Check the dependence for two accesses with the same stride Stride.
static const SCEV * getMinFromExprs(const SCEV *I, const SCEV *J, ScalarEvolution *SE)
Compare I and J and return the minimum.
static cl::opt< unsigned > MaxForkedSCEVDepth("max-forked-scev-depth", cl::Hidden, cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"), cl::init(5))
static cl::opt< bool > SpeculateUnitStride("laa-speculate-unit-stride", cl::Hidden, cl::desc("Speculate that non-constant strides are unit in LAA"), cl::init(true))
static SmallVector< PointerIntPair< const SCEV *, 1, bool > > findForkedPointer(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &StridesMap, Value *Ptr, const Loop *L)
static cl::opt< bool > EnableMemAccessVersioning("enable-mem-access-versioning", cl::init(true), cl::Hidden, cl::desc("Enable symbolic stride memory access versioning"))
This enables versioning on the strides of symbolically striding memory accesses in code like the foll...
This header provides classes for managing per-loop analyses.
This file provides utility analysis objects describing memory locations.
FunctionAnalysisManager FAM
This header defines various interfaces for pass management in LLVM.
This file defines the PointerIntPair class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckType(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, const TargetLowering *TLI, const DataLayout &DL)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
static const X86InstrFMA3Group Groups[]
A manager for alias analyses.
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
APInt abs() const
Get the absolute value.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isNegative() const
Determine sign of this APInt.
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
int64_t getSExtValue() const
Get sign extended value.
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
API to communicate dependencies between analyses during invalidation.
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
This is the base class for all instructions that perform data casts.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
EquivalenceClasses - This represents a collection of equivalence classes and supports three efficient...
iterator findValue(const ElemTy &V) const
findValue - Return an iterator to the specified value.
iterator insert(const ElemTy &Data)
insert - Insert a new value into the union/find set, ignoring the request if the value already exists...
member_iterator member_end() const
typename std::set< ECValue, ECValueComparator >::const_iterator iterator
iterator* - Provides a way to iterate over all values in the set.
member_iterator member_begin(iterator I) const
member_iterator unionSets(const ElemTy &V1, const ElemTy &V2)
union - Merge the two equivalence sets for the specified values, inserting them if they do not alread...
const ElemTy & getLeaderValue(const ElemTy &V) const
getLeaderValue - Return the leader for the specified value that is in the set.
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Type * getResultElementType() const
PointerType * getType() const
Global values are always pointers.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Class to represent integer types.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
An instruction for reading from memory.
Value * getPointerOperand()
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
This analysis provides dependence information for the memory accesses of a loop.
Result run(Function &F, FunctionAnalysisManager &AM)
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
const LoopAccessInfo & getInfo(Loop &L)
Drive the analysis of memory accesses in the loop.
const MemoryDepChecker & getDepChecker() const
the Memory Dependence Checker which can determine the loop-independent and loop-carried dependences b...
bool isInvariant(Value *V) const
Returns true if value V is loop invariant.
void print(raw_ostream &OS, unsigned Depth=0) const
Print the information about the memory accesses in the loop.
LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetLibraryInfo *TLI, AAResults *AA, DominatorTree *DT, LoopInfo *LI)
static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, DominatorTree *DT)
Return true if the block BB needs to be predicated in order for the loop to be vectorized.
Analysis pass that exposes the LoopInfo for a function.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
unsigned getNumBackEdges() const
Calculate the number of back edges to the loop header.
BlockT * getHeader() const
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
Represents a single loop in the control flow graph.
bool isAnnotatedParallel() const
Returns true if the loop is annotated parallel.
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Tracking metadata reference owned by Metadata.
This class implements a map that also provides access to all stored values in a deterministic order.
Checks memory dependences among accesses to the same underlying object to determine whether there vec...
ArrayRef< unsigned > getOrderForAccess(Value *Ptr, bool IsWrite) const
Return the program order indices for the access location (Ptr, IsWrite).
bool isSafeForAnyVectorWidth() const
Return true if the number of elements that are safe to operate on simultaneously is not bounded.
const Loop * getInnermostLoop() const
uint64_t getMaxSafeVectorWidthInBits() const
Return the number of elements that are safe to operate on simultaneously, multiplied by the size of t...
bool isSafeForVectorization() const
No memory dependence was encountered that would inhibit vectorization.
bool areDepsSafe(DepCandidates &AccessSets, MemAccessInfoList &CheckDeps, const DenseMap< Value *, const SCEV * > &Strides)
Check whether the dependencies between the accesses are safe.
SmallVector< Instruction *, 4 > getInstructionsForAccess(Value *Ptr, bool isWrite) const
Find the set of instructions that read or write via Ptr.
VectorizationSafetyStatus
Type to keep track of the status of the dependence check.
@ PossiblySafeWithRtChecks
void addAccess(StoreInst *SI)
Register the location (instructions are given increasing numbers) of a write access.
PointerIntPair< Value *, 1, bool > MemAccessInfo
Representation for a specific memory location.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
const Value * Ptr
The address of the start of the location.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
const SCEVPredicate & getPredicate() const
bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
void print(raw_ostream &OS, unsigned Depth) const
Print the SCEV mappings done by the Predicated Scalar Evolution.
const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Holds information about the memory runtime legality checks to verify that a group of pointers do not ...
bool Need
This flag indicates if we need to add the runtime check.
void reset()
Reset the state of the pointer runtime information.
unsigned getNumberOfChecks() const
Returns the number of run-time checks required according to needsChecking.
void printChecks(raw_ostream &OS, const SmallVectorImpl< RuntimePointerCheck > &Checks, unsigned Depth=0) const
Print Checks.
bool needsChecking(const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const
Decide if we need to add a check between two groups of pointers, according to needsChecking.
void print(raw_ostream &OS, unsigned Depth=0) const
Print the list run-time memory checks necessary.
SmallVector< RuntimeCheckingPtrGroup, 2 > CheckingGroups
Holds a partitioning of pointers into "check groups".
void generateChecks(MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies)
Generate the checks and store it.
friend struct RuntimeCheckingPtrGroup
static bool arePointersInSamePartition(const SmallVectorImpl< int > &PtrToPartition, unsigned PtrIdx1, unsigned PtrIdx2)
Check if pointers are in the same partition.
SmallVector< PointerInfo, 2 > Pointers
Information about the pointers that may require checking.
void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr, Type *AccessTy, bool WritePtr, unsigned DepSetId, unsigned ASId, PredicatedScalarEvolution &PSE, bool NeedsFreeze)
Insert a pointer and calculate the start and end SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStart() const
const SCEV * evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const
Return the value of this chain of recurrences at the specified iteration number.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
const Loop * getLoop() const
This class represents a constant integer value.
This is the base class for unary integral cast operator classes.
This node represents multiplication of some number of SCEVs.
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
virtual void print(raw_ostream &OS, unsigned Depth=0) const =0
Prints a textual representation of this predicate with an indentation of Depth.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents an analyzed expression in the program.
Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
const SCEV * getConstant(ConstantInt *V)
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
StringRef getName() const
Return a constant reference to the value's name.
constexpr ScalarTy getFixedValue() const
An efficient, type-erasing, non-owning reference to a callable.
Type * getIndexedType() const
This class implements an extremely fast bulk output stream that can only output to a stream.
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
friend const_iterator end(StringRef path)
Get end iterator over path.
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
bool match(Val *V, const Pattern &P)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
std::optional< int > getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, Value *PtrB, const DataLayout &DL, ScalarEvolution &SE, bool StrictCheck=false, bool CheckType=true)
Returns the distance between the pointers PtrA and PtrB iff they are compatible and it is possible to...
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
std::optional< const MDOperand * > findStringMetadataForLoop(const Loop *TheLoop, StringRef Name)
Find string metadata for loop.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
std::optional< int64_t > getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr, const Loop *Lp, const DenseMap< Value *, const SCEV * > &StridesMap=DenseMap< Value *, const SCEV * >(), bool Assume=false, bool ShouldCheckWrap=true)
If the pointer has a constant stride return it in units of the access type size.
bool sortPtrAccesses(ArrayRef< Value * > VL, Type *ElemTy, const DataLayout &DL, ScalarEvolution &SE, SmallVectorImpl< unsigned > &SortedIndices)
Attempt to sort the pointers in VL and return the sorted indices in SortedIndices,...
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
const SCEV * replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &PtrToStride, Value *Ptr)
Return the SCEV corresponding to a pointer with the symbolic stride replaced with constant one,...
bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType=true)
Returns true if the memory operations A and B are consecutive.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
OutputIt copy(R &&Range, OutputIt Out)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
gep_type_iterator gep_type_begin(const User *GEP)
Type * getLoadStoreType(Value *I)
A helper function that returns the type of a load or store instruction.
Implement std::hash so that hash_code can be used in STL containers.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
MDNode * TBAA
The tag for type-based alias analysis.
A special type used by analysis passes to provide an address that identifies that particular analysis...
Dependece between memory access instructions.
DepType Type
The type of the dependence.
bool isPossiblyBackward() const
May be a lexically backward dependence type (includes Unknown).
bool isForward() const
Lexically forward dependence.
bool isBackward() const
Lexically backward dependence.
void print(raw_ostream &OS, unsigned Depth, const SmallVectorImpl< Instruction * > &Instrs) const
Print the dependence.
Instruction * getDestination(const LoopAccessInfo &LAI) const
Return the destination instruction of the dependence.
Instruction * getSource(const LoopAccessInfo &LAI) const
Return the source instruction of the dependence.
DepType
The type of the dependence.
@ BackwardVectorizableButPreventsForwarding
@ ForwardButPreventsForwarding
static const char * DepName[]
String version of the types.
static VectorizationSafetyStatus isSafeForVectorization(DepType Type)
Dependence types that don't prevent vectorization.
unsigned AddressSpace
Address space of the involved pointers.
bool addPointer(unsigned Index, RuntimePointerChecking &RtCheck)
Tries to add the pointer recorded in RtCheck at index Index to this pointer checking group.
bool NeedsFreeze
Whether the pointer needs to be frozen after expansion, e.g.
const SCEV * High
The SCEV expression which represents the upper bound of all the pointers in this group.
SmallVector< unsigned, 2 > Members
Indices of all the pointers that constitute this grouping.
RuntimeCheckingPtrGroup(unsigned Index, RuntimePointerChecking &RtCheck)
Create a new pointer checking group containing a single pointer, with index Index in RtCheck.
const SCEV * Low
The SCEV expression which represents the lower bound of all the pointers in this group.
bool IsWritePtr
Holds the information if this pointer is used for writing to memory.
unsigned DependencySetId
Holds the id of the set of pointers that could be dependent because of a shared underlying object.
unsigned AliasSetId
Holds the id of the disjoint alias set to which this pointer belongs.
static const unsigned MaxVectorWidth
Maximum SIMD width.
static unsigned VectorizationFactor
VF as overridden by the user.
static unsigned RuntimeMemoryCheckThreshold
\When performing memory disambiguation checks at runtime do not make more than this number of compari...
static bool isInterleaveForced()
True if force-vector-interleave was specified by the user.
static unsigned VectorizationInterleave
Interleave factor as overridden by the user.
static bool HoistRuntimeChecks
Function object to check whether the first component of a container supported by std::get (like std::...