60#define DEBUG_TYPE "atomic-expand"
64class AtomicExpandImpl {
83 Ctx.
emitError(DiagnosticInst ? DiagnosticInst : &FailedInst, Msg);
85 if (!FailedInst.getType()->isVoidTy())
87 FailedInst.eraseFromParent();
90 template <
typename Inst>
91 void handleUnsupportedAtomicSize(Inst *
I,
const Twine &AtomicOpName,
95 bool tryInsertTrailingSeqCstFence(
Instruction *AtomicI);
96 template <
typename AtomicInst>
97 bool tryInsertFencesForAtomic(AtomicInst *AtomicI,
bool OrderingRequiresFence,
101 bool tryExpandAtomicLoad(
LoadInst *LI);
102 bool expandAtomicLoadToLL(
LoadInst *LI);
103 bool expandAtomicLoadToCmpXchg(
LoadInst *LI);
113 void expandAtomicOpToLLSC(
117 void expandPartwordAtomicRMW(
125 Value *insertRMWCmpXchgLoop(
129 CreateCmpXchgInstFun CreateCmpXchg,
Instruction *MetadataSrc);
141 void expandAtomicLoadToLibcall(
LoadInst *LI);
142 void expandAtomicStoreToLibcall(
StoreInst *LI);
145 const Twine &AtomicOpName =
"cmpxchg",
149 CreateCmpXchgInstFun CreateCmpXchg);
174struct ReplacementIRBuilder
175 :
IRBuilder<InstSimplifyFolder, IRBuilderCallbackInserter> {
185 this->CollectMetadataToCopy(
I, {LLVMContext::MD_pcsections});
186 if (BB->getParent()->getAttributes().hasFnAttr(Attribute::StrictFP))
187 this->setIsFPConstrained(
true);
189 MMRAMD =
I->getMetadata(LLVMContext::MD_mmra);
194 I->setMetadata(LLVMContext::MD_mmra, MMRAMD);
200char AtomicExpandLegacy::ID = 0;
205 "Expand Atomic instructions",
false,
false)
214 return DL.getTypeStoreSize(LI->getType());
219 return DL.getTypeStoreSize(
SI->getValueOperand()->getType());
236 Source.getAllMetadata(MD);
240 for (
auto [
ID,
N] : MD) {
242 case LLVMContext::MD_dbg:
243 case LLVMContext::MD_tbaa:
244 case LLVMContext::MD_tbaa_struct:
245 case LLVMContext::MD_alias_scope:
246 case LLVMContext::MD_noalias:
247 case LLVMContext::MD_noalias_addrspace:
248 case LLVMContext::MD_access_group:
249 case LLVMContext::MD_mmra:
253 if (
ID == Ctx.getMDKindID(
"amdgpu.no.remote.memory"))
255 else if (
ID == Ctx.getMDKindID(
"amdgpu.no.fine.grained.memory"))
265template <
typename Inst>
268 Align Alignment =
I->getAlign();
270 return Alignment >=
Size &&
Size <= MaxSize;
273template <
typename Inst>
277 Align Alignment =
I->getAlign();
278 bool NeedSeparator =
false;
280 if (Alignment <
Size) {
281 OS <<
"instruction alignment " << Alignment.
value()
282 <<
" is smaller than the required " <<
Size
283 <<
"-byte alignment for this atomic operation";
284 NeedSeparator =
true;
288 if (
Size > MaxSize) {
291 OS <<
"target supports atomics up to " << MaxSize
292 <<
" bytes, but this atomic accesses " <<
Size <<
" bytes";
296template <
typename Inst>
297void AtomicExpandImpl::handleUnsupportedAtomicSize(
300 SmallString<128> FailureReason;
301 raw_svector_ostream OS(FailureReason);
303 handleFailure(*
I, Twine(
"unsupported ") + AtomicOpName +
": " + FailureReason,
307bool AtomicExpandImpl::tryInsertTrailingSeqCstFence(Instruction *AtomicI) {
313 Builder, AtomicI, AtomicOrdering::SequentiallyConsistent)) {
314 TrailingFence->moveAfter(AtomicI);
320template <
typename AtomicInst>
321bool AtomicExpandImpl::tryInsertFencesForAtomic(AtomicInst *AtomicI,
322 bool OrderingRequiresFence,
325 if (OrderingRequiresFence && ShouldInsertFences) {
327 AtomicI->setOrdering(NewOrdering);
328 return bracketInstWithFences(AtomicI, FenceOrdering);
330 if (!ShouldInsertFences)
331 return tryInsertTrailingSeqCstFence(AtomicI);
335bool AtomicExpandImpl::processAtomicInstr(Instruction *
I) {
341 expandAtomicLoadToLibcall(LI);
345 bool MadeChange =
false;
347 TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
348 LI = convertAtomicLoadToIntegerType(LI);
352 MadeChange |= tryInsertFencesForAtomic(
355 MadeChange |= tryExpandAtomicLoad(LI);
364 expandAtomicStoreToLibcall(SI);
368 bool MadeChange =
false;
370 TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
371 SI = convertAtomicStoreToIntegerType(SI);
375 MadeChange |= tryInsertFencesForAtomic(
378 MadeChange |= tryExpandAtomicStore(SI);
384 expandAtomicRMWToLibcall(RMWI);
388 bool MadeChange =
false;
390 TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
391 RMWI = convertAtomicXchgToIntegerType(RMWI);
395 MadeChange |= tryInsertFencesForAtomic(
405 MadeChange |= (
isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) ||
406 tryExpandAtomicRMW(RMWI);
412 expandAtomicCASToLibcall(CASI);
418 bool MadeChange =
false;
419 if (CASI->getCompareOperand()->getType()->isPointerTy()) {
422 CASI = convertCmpXchgToIntegerType(CASI);
428 if (CmpXchgExpansion == TargetLoweringBase::AtomicExpansionKind::None &&
439 CASI->setSuccessOrdering(CASOrdering);
440 CASI->setFailureOrdering(CASOrdering);
441 MadeChange |= bracketInstWithFences(CASI, FenceOrdering);
443 }
else if (CmpXchgExpansion !=
444 TargetLoweringBase::AtomicExpansionKind::LLSC) {
446 MadeChange |= tryInsertTrailingSeqCstFence(CASI);
449 MadeChange |= tryExpandAtomicCmpXchg(CASI);
456bool AtomicExpandImpl::run(
457 Function &
F,
const LibcallLoweringModuleAnalysisResult &LibcallResult,
458 const TargetMachine *TM) {
460 if (!Subtarget->enableAtomicExpand())
462 TLI = Subtarget->getTargetLowering();
464 DL = &
F.getDataLayout();
466 bool MadeChange =
false;
478 if (processAtomicInstr(&Inst)) {
490bool AtomicExpandLegacy::runOnFunction(Function &
F) {
492 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
495 auto *TM = &TPC->getTM<TargetMachine>();
497 const LibcallLoweringModuleAnalysisResult &LibcallResult =
498 getAnalysis<LibcallLoweringInfoWrapper>().getResult(*
F.getParent());
500 return AE.run(
F, LibcallResult, TM);
504 return new AtomicExpandLegacy();
514 if (!LibcallResult) {
516 "' analysis required");
522 bool Changed = AE.run(
F, *LibcallResult, TM);
529bool AtomicExpandImpl::bracketInstWithFences(
Instruction *
I,
531 ReplacementIRBuilder Builder(
I, *
DL);
541 return (LeadingFence || TrailingFence);
556LoadInst *AtomicExpandImpl::convertAtomicLoadToIntegerType(LoadInst *LI) {
558 Type *NewTy = getCorrespondingIntegerType(LI->
getType(),
M->getDataLayout());
560 ReplacementIRBuilder Builder(LI, *
DL);
564 auto *NewLI = Builder.CreateLoad(NewTy, Addr);
565 NewLI->setAlignment(LI->
getAlign());
568 LLVM_DEBUG(
dbgs() <<
"Replaced " << *LI <<
" with " << *NewLI <<
"\n");
571 ? Builder.CreateIntToPtr(NewLI, LI->
getType())
572 : Builder.CreateBitCast(NewLI, LI->
getType());
579AtomicExpandImpl::convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI) {
584 getCorrespondingIntegerType(RMWI->
getType(),
M->getDataLayout());
586 ReplacementIRBuilder Builder(RMWI, *
DL);
591 ? Builder.CreatePtrToInt(Val, NewTy)
592 : Builder.CreateBitCast(Val, NewTy);
599 LLVM_DEBUG(
dbgs() <<
"Replaced " << *RMWI <<
" with " << *NewRMWI <<
"\n");
602 ? Builder.CreateIntToPtr(NewRMWI, RMWI->
getType())
603 : Builder.CreateBitCast(NewRMWI, RMWI->
getType());
609bool AtomicExpandImpl::tryExpandAtomicLoad(LoadInst *LI) {
611 case TargetLoweringBase::AtomicExpansionKind::None:
613 case TargetLoweringBase::AtomicExpansionKind::LLSC:
614 expandAtomicOpToLLSC(
617 [](IRBuilderBase &Builder,
Value *Loaded) { return Loaded; });
619 case TargetLoweringBase::AtomicExpansionKind::LLOnly:
620 return expandAtomicLoadToLL(LI);
621 case TargetLoweringBase::AtomicExpansionKind::CmpXChg:
622 return expandAtomicLoadToCmpXchg(LI);
623 case TargetLoweringBase::AtomicExpansionKind::NotAtomic:
624 LI->
setAtomic(AtomicOrdering::NotAtomic);
626 case TargetLoweringBase::AtomicExpansionKind::CustomExpand:
634bool AtomicExpandImpl::tryExpandAtomicStore(StoreInst *SI) {
636 case TargetLoweringBase::AtomicExpansionKind::None:
638 case TargetLoweringBase::AtomicExpansionKind::CustomExpand:
641 case TargetLoweringBase::AtomicExpansionKind::Expand:
642 expandAtomicStoreToXChg(SI);
644 case TargetLoweringBase::AtomicExpansionKind::NotAtomic:
645 SI->setAtomic(AtomicOrdering::NotAtomic);
652bool AtomicExpandImpl::expandAtomicLoadToLL(LoadInst *LI) {
653 ReplacementIRBuilder Builder(LI, *
DL);
668bool AtomicExpandImpl::expandAtomicLoadToCmpXchg(LoadInst *LI) {
669 ReplacementIRBuilder Builder(LI, *
DL);
671 if (Order == AtomicOrdering::Unordered)
672 Order = AtomicOrdering::Monotonic;
678 Value *Pair = Builder.CreateAtomicCmpXchg(
679 Addr, DummyVal, DummyVal, LI->
getAlign(), Order,
681 Value *
Loaded = Builder.CreateExtractValue(Pair, 0,
"loaded");
697StoreInst *AtomicExpandImpl::convertAtomicStoreToIntegerType(StoreInst *SI) {
698 ReplacementIRBuilder Builder(SI, *
DL);
699 auto *
M =
SI->getModule();
700 Type *NewTy = getCorrespondingIntegerType(
SI->getValueOperand()->getType(),
702 Value *NewVal = Builder.CreateBitCast(
SI->getValueOperand(), NewTy);
704 Value *Addr =
SI->getPointerOperand();
706 StoreInst *NewSI = Builder.CreateStore(NewVal, Addr);
710 LLVM_DEBUG(
dbgs() <<
"Replaced " << *SI <<
" with " << *NewSI <<
"\n");
711 SI->eraseFromParent();
715void AtomicExpandImpl::expandAtomicStoreToXChg(StoreInst *SI) {
722 ReplacementIRBuilder Builder(SI, *
DL);
724 assert(Ordering != AtomicOrdering::NotAtomic);
726 ? AtomicOrdering::Monotonic
728 AtomicRMWInst *AI = Builder.CreateAtomicRMW(
730 SI->getAlign(), RMWOrdering);
731 SI->eraseFromParent();
734 tryExpandAtomicRMW(AI);
749 NewVal = Builder.CreateBitCast(NewVal, IntTy);
750 Loaded = Builder.CreateBitCast(Loaded, IntTy);
754 Addr, Loaded, NewVal, AddrAlign, MemOpOrder,
760 Success = Builder.CreateExtractValue(Pair, 1,
"success");
761 NewLoaded = Builder.CreateExtractValue(Pair, 0,
"newloaded");
764 NewLoaded = Builder.CreateBitCast(NewLoaded, OrigTy);
767bool AtomicExpandImpl::tryExpandAtomicRMW(AtomicRMWInst *AI) {
771 case TargetLoweringBase::AtomicExpansionKind::None:
773 case TargetLoweringBase::AtomicExpansionKind::LLSC: {
776 if (ValueSize < MinCASSize) {
777 expandPartwordAtomicRMW(AI,
778 TargetLoweringBase::AtomicExpansionKind::LLSC);
780 auto PerformOp = [&](IRBuilderBase &Builder,
Value *
Loaded) {
789 case TargetLoweringBase::AtomicExpansionKind::CmpXChg: {
792 if (ValueSize < MinCASSize) {
793 expandPartwordAtomicRMW(AI,
794 TargetLoweringBase::AtomicExpansionKind::CmpXChg);
803 return OptimizationRemark(
DEBUG_TYPE,
"Passed", AI)
804 <<
"A compare and swap loop was generated for an atomic "
806 << MemScope <<
" memory scope";
812 case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic: {
815 if (ValueSize < MinCASSize) {
820 tryExpandAtomicRMW(widenPartwordAtomicRMW(AI));
824 expandAtomicRMWToMaskedIntrinsic(AI);
827 case TargetLoweringBase::AtomicExpansionKind::BitTestIntrinsic: {
831 case TargetLoweringBase::AtomicExpansionKind::CmpArithIntrinsic: {
835 case TargetLoweringBase::AtomicExpansionKind::NotAtomic:
837 case TargetLoweringBase::AtomicExpansionKind::CustomExpand:
847struct PartwordMaskValues {
849 Type *WordType =
nullptr;
851 Type *IntValueType =
nullptr;
852 Value *AlignedAddr =
nullptr;
853 Align AlignedAddrAlignment;
855 Value *ShiftAmt =
nullptr;
856 Value *Mask =
nullptr;
857 Value *Inv_Mask =
nullptr;
861raw_ostream &
operator<<(raw_ostream &O,
const PartwordMaskValues &PMV) {
862 auto PrintObj = [&
O](
auto *
V) {
869 O <<
"PartwordMaskValues {\n";
871 PrintObj(PMV.WordType);
873 PrintObj(PMV.ValueType);
874 O <<
" AlignedAddr: ";
875 PrintObj(PMV.AlignedAddr);
876 O <<
" AlignedAddrAlignment: " << PMV.AlignedAddrAlignment.
value() <<
'\n';
878 PrintObj(PMV.ShiftAmt);
882 PrintObj(PMV.Inv_Mask);
908 unsigned MinWordSize) {
909 PartwordMaskValues PMV;
914 unsigned ValueSize =
DL.getTypeStoreSize(
ValueType);
916 PMV.ValueType = PMV.IntValueType =
ValueType;
921 PMV.WordType = MinWordSize > ValueSize ?
Type::getIntNTy(Ctx, MinWordSize * 8)
923 if (PMV.ValueType == PMV.WordType) {
924 PMV.AlignedAddr = Addr;
925 PMV.AlignedAddrAlignment = AddrAlign;
926 PMV.ShiftAmt = ConstantInt::get(PMV.ValueType, 0);
927 PMV.Mask = ConstantInt::get(PMV.ValueType, ~0,
true);
931 PMV.AlignedAddrAlignment =
Align(MinWordSize);
933 assert(ValueSize < MinWordSize);
936 IntegerType *IntTy =
DL.getIndexType(Ctx, PtrTy->getAddressSpace());
939 if (AddrAlign < MinWordSize) {
940 PMV.AlignedAddr = Builder.CreateIntrinsic(
941 Intrinsic::ptrmask, {PtrTy, IntTy},
943 nullptr,
"AlignedAddr");
945 Value *AddrInt = Builder.CreatePtrToInt(Addr, IntTy);
946 PtrLSB = Builder.CreateAnd(AddrInt, MinWordSize - 1,
"PtrLSB");
949 PMV.AlignedAddr = Addr;
953 if (
DL.isLittleEndian()) {
955 PMV.ShiftAmt = Builder.CreateShl(PtrLSB, 3);
958 PMV.ShiftAmt = Builder.CreateShl(
959 Builder.CreateXor(PtrLSB, MinWordSize - ValueSize), 3);
962 PMV.ShiftAmt = Builder.CreateTrunc(PMV.ShiftAmt, PMV.WordType,
"ShiftAmt");
963 PMV.Mask = Builder.CreateShl(
964 ConstantInt::get(PMV.WordType, (1 << (ValueSize * 8)) - 1), PMV.ShiftAmt,
967 PMV.Inv_Mask = Builder.CreateNot(PMV.Mask,
"Inv_Mask");
973 const PartwordMaskValues &PMV) {
974 assert(WideWord->
getType() == PMV.WordType &&
"Widened type mismatch");
975 if (PMV.WordType == PMV.ValueType)
978 Value *Shift = Builder.CreateLShr(WideWord, PMV.ShiftAmt,
"shifted");
979 Value *Trunc = Builder.CreateTrunc(Shift, PMV.IntValueType,
"extracted");
980 return Builder.CreateBitCast(Trunc, PMV.ValueType);
984 Value *Updated,
const PartwordMaskValues &PMV) {
985 assert(WideWord->
getType() == PMV.WordType &&
"Widened type mismatch");
986 assert(Updated->
getType() == PMV.ValueType &&
"Value type mismatch");
987 if (PMV.WordType == PMV.ValueType)
990 Updated = Builder.CreateBitCast(Updated, PMV.IntValueType);
992 Value *ZExt = Builder.CreateZExt(Updated, PMV.WordType,
"extended");
994 Builder.CreateShl(ZExt, PMV.ShiftAmt,
"shifted",
true);
995 Value *
And = Builder.CreateAnd(WideWord, PMV.Inv_Mask,
"unmasked");
996 Value *
Or = Builder.CreateOr(
And, Shift,
"inserted");
1006 const PartwordMaskValues &PMV) {
1012 Value *Loaded_MaskOut = Builder.CreateAnd(Loaded, PMV.Inv_Mask);
1013 Value *FinalVal = Builder.CreateOr(Loaded_MaskOut, Shifted_Inc);
1025 Value *NewVal_Masked = Builder.CreateAnd(NewVal, PMV.Mask);
1026 Value *Loaded_MaskOut = Builder.CreateAnd(Loaded, PMV.Inv_Mask);
1027 Value *FinalVal = Builder.CreateOr(Loaded_MaskOut, NewVal_Masked);
1066void AtomicExpandImpl::expandPartwordAtomicRMW(
1072 tryExpandAtomicRMW(widenPartwordAtomicRMW(AI));
1078 ReplacementIRBuilder Builder(AI, *
DL);
1080 PartwordMaskValues PMV =
1084 Value *ValOperand_Shifted =
nullptr;
1088 ValOperand_Shifted =
1089 Builder.CreateShl(Builder.CreateZExt(ValOp, PMV.WordType), PMV.ShiftAmt,
1090 "ValOperand_Shifted");
1093 auto PerformPartwordOp = [&](IRBuilderBase &Builder,
Value *
Loaded) {
1099 if (ExpansionKind == TargetLoweringBase::AtomicExpansionKind::CmpXChg) {
1100 OldResult = insertRMWCmpXchgLoop(Builder, PMV.WordType, PMV.AlignedAddr,
1101 PMV.AlignedAddrAlignment, MemOpOrder, SSID,
1105 assert(ExpansionKind == TargetLoweringBase::AtomicExpansionKind::LLSC);
1106 OldResult = insertRMWLLSCLoop(Builder, PMV.WordType, PMV.AlignedAddr,
1107 PMV.AlignedAddrAlignment, MemOpOrder,
1117AtomicRMWInst *AtomicExpandImpl::widenPartwordAtomicRMW(AtomicRMWInst *AI) {
1118 ReplacementIRBuilder Builder(AI, *
DL);
1123 "Unable to widen operation");
1125 PartwordMaskValues PMV =
1129 Value *ValOperand_Shifted =
1131 PMV.ShiftAmt,
"ValOperand_Shifted");
1137 Builder.
CreateOr(ValOperand_Shifted, PMV.Inv_Mask,
"AndOperand");
1139 NewOperand = ValOperand_Shifted;
1142 Op, PMV.AlignedAddr, NewOperand, PMV.AlignedAddrAlignment,
1153bool AtomicExpandImpl::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) {
1195 ReplacementIRBuilder Builder(CI, *
DL);
1206 std::prev(BB->
end())->eraseFromParent();
1209 PartwordMaskValues PMV =
1214 Value *NewVal_Shifted =
1216 Value *Cmp_Shifted =
1221 LoadInst *InitLoaded = Builder.
CreateLoad(PMV.WordType, PMV.AlignedAddr);
1222 Value *InitLoaded_MaskOut = Builder.
CreateAnd(InitLoaded, PMV.Inv_Mask);
1227 PHINode *Loaded_MaskOut = Builder.
CreatePHI(PMV.WordType, 2);
1228 Loaded_MaskOut->
addIncoming(InitLoaded_MaskOut, BB);
1241 processAtomicInstr(InitLoaded);
1245 Value *FullWord_NewVal = Builder.
CreateOr(Loaded_MaskOut, NewVal_Shifted);
1246 Value *FullWord_Cmp = Builder.
CreateOr(Loaded_MaskOut, Cmp_Shifted);
1248 PMV.AlignedAddr, FullWord_Cmp, FullWord_NewVal, PMV.AlignedAddrAlignment,
1276 Loaded_MaskOut->
addIncoming(OldVal_MaskOut, FailureBB);
1291void AtomicExpandImpl::expandAtomicOpToLLSC(
1292 Instruction *
I,
Type *ResultType,
Value *Addr, Align AddrAlign,
1294 function_ref<
Value *(IRBuilderBase &,
Value *)> PerformOp) {
1295 ReplacementIRBuilder Builder(
I, *
DL);
1296 Value *
Loaded = insertRMWLLSCLoop(Builder, ResultType, Addr, AddrAlign,
1297 MemOpOrder, PerformOp);
1299 I->replaceAllUsesWith(Loaded);
1300 I->eraseFromParent();
1303void AtomicExpandImpl::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) {
1304 ReplacementIRBuilder Builder(AI, *
DL);
1306 PartwordMaskValues PMV =
1316 CastOp = Instruction::SExt;
1320 PMV.ShiftAmt,
"ValOperand_Shifted");
1322 Builder, AI, PMV.AlignedAddr, ValOperand_Shifted, PMV.Mask, PMV.ShiftAmt,
1329void AtomicExpandImpl::expandAtomicCmpXchgToMaskedIntrinsic(
1330 AtomicCmpXchgInst *CI) {
1331 ReplacementIRBuilder Builder(CI, *
DL);
1344 Builder, CI, PMV.AlignedAddr, CmpVal_Shifted, NewVal_Shifted, PMV.Mask,
1350 CmpVal_Shifted, Builder.
CreateAnd(OldVal, PMV.Mask),
"Success");
1357Value *AtomicExpandImpl::insertRMWLLSCLoop(
1358 IRBuilderBase &Builder,
Type *ResultTy,
Value *Addr, Align AddrAlign,
1360 function_ref<
Value *(IRBuilderBase &,
Value *)> PerformOp) {
1365 assert(AddrAlign >=
F->getDataLayout().getTypeStoreSize(ResultTy) &&
1366 "Expected at least natural alignment at this point.");
1386 std::prev(BB->
end())->eraseFromParent();
1394 Value *NewVal = PerformOp(Builder, Loaded);
1396 Value *StoreSuccess =
1418AtomicExpandImpl::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
1421 M->getDataLayout());
1423 ReplacementIRBuilder Builder(CI, *
DL);
1435 LLVM_DEBUG(
dbgs() <<
"Replaced " << *CI <<
" with " << *NewCI <<
"\n");
1451bool AtomicExpandImpl::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
1457 LLVMContext &Ctx =
F->getContext();
1464 ? AtomicOrdering::Monotonic
1476 bool HasReleasedLoadBB = !CI->
isWeak() && ShouldInsertFencesForAtomic &&
1477 SuccessOrder != AtomicOrdering::Monotonic &&
1478 SuccessOrder != AtomicOrdering::Acquire &&
1483 bool UseUnconditionalReleaseBarrier =
F->hasMinSize() && !CI->
isWeak();
1537 auto ReleasedLoadBB =
1541 auto ReleasingStoreBB =
1545 ReplacementIRBuilder Builder(CI, *
DL);
1550 std::prev(BB->
end())->eraseFromParent();
1552 if (ShouldInsertFencesForAtomic && UseUnconditionalReleaseBarrier)
1555 PartwordMaskValues PMV =
1562 Value *UnreleasedLoad =
1563 TLI->
emitLoadLinked(Builder, PMV.WordType, PMV.AlignedAddr, MemOpOrder);
1564 Value *UnreleasedLoadExtract =
1571 Builder.
CreateCondBr(ShouldStore, ReleasingStoreBB, NoStoreBB,
1572 MDBuilder(
F->getContext()).createLikelyBranchWeights());
1575 if (ShouldInsertFencesForAtomic && !UseUnconditionalReleaseBarrier)
1580 PHINode *LoadedTryStore =
1581 Builder.
CreatePHI(PMV.WordType, 2,
"loaded.trystore");
1582 LoadedTryStore->
addIncoming(UnreleasedLoad, ReleasingStoreBB);
1583 Value *NewValueInsert =
1586 PMV.AlignedAddr, MemOpOrder);
1588 StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0),
"success");
1589 BasicBlock *RetryBB = HasReleasedLoadBB ? ReleasedLoadBB : StartBB;
1591 CI->
isWeak() ? FailureBB : RetryBB,
1592 MDBuilder(
F->getContext()).createLikelyBranchWeights());
1596 if (HasReleasedLoadBB) {
1598 TLI->
emitLoadLinked(Builder, PMV.WordType, PMV.AlignedAddr, MemOpOrder);
1606 ShouldStore, TryStoreBB, NoStoreBB,
1607 MDBuilder(
F->getContext()).createLikelyBranchWeights());
1609 LoadedTryStore->
addIncoming(SecondLoad, ReleasedLoadBB);
1616 if (ShouldInsertFencesForAtomic ||
1622 PHINode *LoadedNoStore =
1624 LoadedNoStore->
addIncoming(UnreleasedLoad, StartBB);
1625 if (HasReleasedLoadBB)
1626 LoadedNoStore->
addIncoming(SecondLoad, ReleasedLoadBB);
1635 PHINode *LoadedFailure =
1637 LoadedFailure->
addIncoming(LoadedNoStore, NoStoreBB);
1639 LoadedFailure->
addIncoming(LoadedTryStore, TryStoreBB);
1640 if (ShouldInsertFencesForAtomic)
1649 PHINode *LoadedExit =
1651 LoadedExit->
addIncoming(LoadedTryStore, SuccessBB);
1652 LoadedExit->
addIncoming(LoadedFailure, FailureBB);
1659 Value *LoadedFull = LoadedExit;
1667 for (
auto *User : CI->
users()) {
1673 "weird extraction from { iN, i1 }");
1684 for (
auto *EV : PrunedInsts)
1701bool AtomicExpandImpl::isIdempotentRMW(AtomicRMWInst *RMWI) {
1714 return C->isMinusOne();
1716 return C->isMaxValue(
true);
1718 return C->isMinValue(
true);
1720 return C->isMaxValue(
false);
1722 return C->isMinValue(
false);
1728bool AtomicExpandImpl::simplifyIdempotentRMW(AtomicRMWInst *RMWI) {
1730 tryExpandAtomicLoad(ResultingLoad);
1736Value *AtomicExpandImpl::insertRMWCmpXchgLoop(
1737 IRBuilderBase &Builder,
Type *ResultTy,
Value *Addr, Align AddrAlign,
1739 function_ref<
Value *(IRBuilderBase &,
Value *)> PerformOp,
1740 CreateCmpXchgInstFun CreateCmpXchg, Instruction *MetadataSrc) {
1767 std::prev(BB->
end())->eraseFromParent();
1775 Loaded->addIncoming(InitLoaded, BB);
1784 InitLoaded->
setAtomic(AtomicOrdering::Monotonic, SSID);
1788 processAtomicInstr(InitLoaded);
1791 Value *NewVal = PerformOp(Builder, Loaded);
1793 Value *NewLoaded =
nullptr;
1796 CreateCmpXchg(Builder, Addr, Loaded, NewVal, AddrAlign,
1797 MemOpOrder == AtomicOrdering::Unordered
1798 ? AtomicOrdering::Monotonic
1800 SSID, IsVolatile,
Success, NewLoaded, MetadataSrc);
1803 Loaded->addIncoming(NewLoaded, LoopBB);
1816bool AtomicExpandImpl::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
1823 case TargetLoweringBase::AtomicExpansionKind::None:
1824 if (ValueSize < MinCASSize)
1825 return expandPartwordCmpXchg(CI);
1827 case TargetLoweringBase::AtomicExpansionKind::LLSC: {
1828 return expandAtomicCmpXchg(CI);
1830 case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic:
1831 expandAtomicCmpXchgToMaskedIntrinsic(CI);
1833 case TargetLoweringBase::AtomicExpansionKind::NotAtomic:
1835 case TargetLoweringBase::AtomicExpansionKind::CustomExpand: {
1842bool AtomicExpandImpl::expandAtomicRMWToCmpXchg(
1843 AtomicRMWInst *AI, CreateCmpXchgInstFun CreateCmpXchg) {
1850 Value *
Loaded = AtomicExpandImpl::insertRMWCmpXchgLoop(
1853 [&](IRBuilderBase &Builder,
Value *Loaded) {
1854 return buildAtomicRMWValue(AI->getOperation(), Builder, Loaded,
1855 AI->getValOperand());
1878 unsigned LargestSize =
DL.getLargestLegalIntTypeSizeInBits() >= 64 ? 16 : 8;
1879 return Alignment >=
Size &&
1881 Size <= LargestSize;
1884void AtomicExpandImpl::expandAtomicLoadToLibcall(LoadInst *
I) {
1885 static const RTLIB::Libcall Libcalls[6] = {
1886 RTLIB::ATOMIC_LOAD, RTLIB::ATOMIC_LOAD_1, RTLIB::ATOMIC_LOAD_2,
1887 RTLIB::ATOMIC_LOAD_4, RTLIB::ATOMIC_LOAD_8, RTLIB::ATOMIC_LOAD_16};
1890 bool Expanded = expandAtomicOpToLibcall(
1891 I,
Size,
I->getAlign(),
I->getPointerOperand(),
nullptr,
nullptr,
1892 I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls);
1894 handleUnsupportedAtomicSize(
I,
"atomic load");
1897void AtomicExpandImpl::expandAtomicStoreToLibcall(StoreInst *
I) {
1898 static const RTLIB::Libcall Libcalls[6] = {
1899 RTLIB::ATOMIC_STORE, RTLIB::ATOMIC_STORE_1, RTLIB::ATOMIC_STORE_2,
1900 RTLIB::ATOMIC_STORE_4, RTLIB::ATOMIC_STORE_8, RTLIB::ATOMIC_STORE_16};
1903 bool Expanded = expandAtomicOpToLibcall(
1904 I,
Size,
I->getAlign(),
I->getPointerOperand(),
I->getValueOperand(),
1905 nullptr,
I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls);
1907 handleUnsupportedAtomicSize(
I,
"atomic store");
1910void AtomicExpandImpl::expandAtomicCASToLibcall(AtomicCmpXchgInst *
I,
1911 const Twine &AtomicOpName,
1912 Instruction *DiagnosticInst) {
1913 static const RTLIB::Libcall Libcalls[6] = {
1914 RTLIB::ATOMIC_COMPARE_EXCHANGE, RTLIB::ATOMIC_COMPARE_EXCHANGE_1,
1915 RTLIB::ATOMIC_COMPARE_EXCHANGE_2, RTLIB::ATOMIC_COMPARE_EXCHANGE_4,
1916 RTLIB::ATOMIC_COMPARE_EXCHANGE_8, RTLIB::ATOMIC_COMPARE_EXCHANGE_16};
1919 bool Expanded = expandAtomicOpToLibcall(
1920 I,
Size,
I->getAlign(),
I->getPointerOperand(),
I->getNewValOperand(),
1921 I->getCompareOperand(),
I->getSuccessOrdering(),
I->getFailureOrdering(),
1924 handleUnsupportedAtomicSize(
I, AtomicOpName, DiagnosticInst);
1928 static const RTLIB::Libcall LibcallsXchg[6] = {
1929 RTLIB::ATOMIC_EXCHANGE, RTLIB::ATOMIC_EXCHANGE_1,
1930 RTLIB::ATOMIC_EXCHANGE_2, RTLIB::ATOMIC_EXCHANGE_4,
1931 RTLIB::ATOMIC_EXCHANGE_8, RTLIB::ATOMIC_EXCHANGE_16};
1932 static const RTLIB::Libcall LibcallsAdd[6] = {
1933 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_ADD_1,
1934 RTLIB::ATOMIC_FETCH_ADD_2, RTLIB::ATOMIC_FETCH_ADD_4,
1935 RTLIB::ATOMIC_FETCH_ADD_8, RTLIB::ATOMIC_FETCH_ADD_16};
1936 static const RTLIB::Libcall LibcallsSub[6] = {
1937 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_SUB_1,
1938 RTLIB::ATOMIC_FETCH_SUB_2, RTLIB::ATOMIC_FETCH_SUB_4,
1939 RTLIB::ATOMIC_FETCH_SUB_8, RTLIB::ATOMIC_FETCH_SUB_16};
1940 static const RTLIB::Libcall LibcallsAnd[6] = {
1941 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_AND_1,
1942 RTLIB::ATOMIC_FETCH_AND_2, RTLIB::ATOMIC_FETCH_AND_4,
1943 RTLIB::ATOMIC_FETCH_AND_8, RTLIB::ATOMIC_FETCH_AND_16};
1944 static const RTLIB::Libcall LibcallsOr[6] = {
1945 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_OR_1,
1946 RTLIB::ATOMIC_FETCH_OR_2, RTLIB::ATOMIC_FETCH_OR_4,
1947 RTLIB::ATOMIC_FETCH_OR_8, RTLIB::ATOMIC_FETCH_OR_16};
1948 static const RTLIB::Libcall LibcallsXor[6] = {
1949 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_XOR_1,
1950 RTLIB::ATOMIC_FETCH_XOR_2, RTLIB::ATOMIC_FETCH_XOR_4,
1951 RTLIB::ATOMIC_FETCH_XOR_8, RTLIB::ATOMIC_FETCH_XOR_16};
1952 static const RTLIB::Libcall LibcallsNand[6] = {
1953 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_NAND_1,
1954 RTLIB::ATOMIC_FETCH_NAND_2, RTLIB::ATOMIC_FETCH_NAND_4,
1955 RTLIB::ATOMIC_FETCH_NAND_8, RTLIB::ATOMIC_FETCH_NAND_16};
1996void AtomicExpandImpl::expandAtomicRMWToLibcall(AtomicRMWInst *
I) {
2002 if (!Libcalls.
empty())
2003 Success = expandAtomicOpToLibcall(
2004 I,
Size,
I->getAlign(),
I->getPointerOperand(),
I->getValOperand(),
2005 nullptr,
I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls);
2012 expandAtomicRMWToCmpXchg(
2013 I, [
this,
I](IRBuilderBase &Builder,
Value *Addr,
Value *Loaded,
2016 Value *&NewLoaded, Instruction *MetadataSrc) {
2019 Addr, Loaded, NewVal, Alignment, MemOpOrder,
2029 expandAtomicCASToLibcall(
2043bool AtomicExpandImpl::expandAtomicOpToLibcall(
2044 Instruction *
I,
unsigned Size, Align Alignment,
Value *PointerOperand,
2049 LLVMContext &Ctx =
I->getContext();
2051 const DataLayout &
DL =
M->getDataLayout();
2053 IRBuilder<> AllocaBuilder(&
I->getFunction()->getEntryBlock().front());
2056 Type *SizedIntTy = Type::getIntNTy(Ctx,
Size * 8);
2058 if (
M->getTargetTriple().isOSWindows() &&
M->getTargetTriple().isX86_64() &&
2068 const Align AllocaAlignment =
DL.getPrefTypeAlign(SizedIntTy);
2072 assert(Ordering != AtomicOrdering::NotAtomic &&
"expect atomic MO");
2074 ConstantInt::get(Type::getInt32Ty(Ctx), (
int)
toCABI(Ordering));
2077 assert(Ordering2 != AtomicOrdering::NotAtomic &&
"expect atomic MO");
2079 ConstantInt::get(Type::getInt32Ty(Ctx), (
int)
toCABI(Ordering2));
2081 bool HasResult =
I->getType() != Type::getVoidTy(Ctx);
2083 RTLIB::Libcall RTLibType;
2084 if (UseSizedLibcall) {
2087 RTLibType = Libcalls[1];
2090 RTLibType = Libcalls[2];
2093 RTLibType = Libcalls[3];
2096 RTLibType = Libcalls[4];
2099 RTLibType = Libcalls[5];
2102 }
else if (Libcalls[0] != RTLIB::UNKNOWN_LIBCALL) {
2103 RTLibType = Libcalls[0];
2110 RTLIB::LibcallImpl LibcallImpl = LibcallLowering->
getLibcallImpl(RTLibType);
2111 if (LibcallImpl == RTLIB::Unsupported) {
2142 AllocaInst *AllocaCASExpected =
nullptr;
2143 AllocaInst *AllocaValue =
nullptr;
2144 AllocaInst *AllocaResult =
nullptr;
2151 if (!UseSizedLibcall) {
2153 Args.push_back(ConstantInt::get(
DL.getIntPtrType(Ctx),
Size));
2161 Value *PtrVal = PointerOperand;
2163 Args.push_back(PtrVal);
2167 AllocaCASExpected = AllocaBuilder.CreateAlloca(CASExpected->
getType());
2171 Args.push_back(AllocaCASExpected);
2176 if (UseSizedLibcall) {
2179 Args.push_back(IntValue);
2181 AllocaValue = AllocaBuilder.CreateAlloca(ValueOperand->
getType());
2185 Args.push_back(AllocaValue);
2190 if (!CASExpected && HasResult && !UseSizedLibcall) {
2191 AllocaResult = AllocaBuilder.CreateAlloca(
I->getType());
2194 Args.push_back(AllocaResult);
2198 Args.push_back(OrderingVal);
2202 Args.push_back(Ordering2Val);
2206 ResultTy = Type::getInt1Ty(Ctx);
2207 Attr = Attr.addRetAttribute(Ctx, Attribute::ZExt);
2208 }
else if (HasResult && UseSizedLibcall)
2209 ResultTy = SizedIntTy;
2211 ResultTy = Type::getVoidTy(Ctx);
2215 for (
Value *Arg : Args)
2217 FunctionType *FnType = FunctionType::get(ResultTy, ArgTys,
false);
2218 FunctionCallee LibcallFn =
M->getOrInsertFunction(
2226 if (ValueOperand && !UseSizedLibcall)
2232 Type *FinalResultTy =
I->getType();
2235 CASExpected->
getType(), AllocaCASExpected, AllocaAlignment);
2240 }
else if (HasResult) {
2242 if (UseSizedLibcall) {
2246 if (VTy && PtrTy && !
Result->getType()->isVectorTy()) {
2247 unsigned AS = PtrTy->getAddressSpace();
2249 Result, VTy->getWithNewType(
DL.getIntPtrType(Ctx, AS)));
2258 I->replaceAllUsesWith(V);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static Value * performMaskedAtomicOp(AtomicRMWInst::BinOp Op, IRBuilderBase &Builder, Value *Loaded, Value *Shifted_Inc, Value *Inc, const PartwordMaskValues &PMV)
Emit IR to implement a masked version of a given atomicrmw operation.
static PartwordMaskValues createMaskInstrs(IRBuilderBase &Builder, Instruction *I, Type *ValueType, Value *Addr, Align AddrAlign, unsigned MinWordSize)
This is a helper function which builds instructions to provide values necessary for partword atomic o...
static bool canUseSizedAtomicCall(unsigned Size, Align Alignment, const DataLayout &DL)
static void createCmpXchgInstFun(IRBuilderBase &Builder, Value *Addr, Value *Loaded, Value *NewVal, Align AddrAlign, AtomicOrdering MemOpOrder, SyncScope::ID SSID, bool IsVolatile, Value *&Success, Value *&NewLoaded, Instruction *MetadataSrc)
static Value * extractMaskedValue(IRBuilderBase &Builder, Value *WideWord, const PartwordMaskValues &PMV)
Expand Atomic static false unsigned getAtomicOpSize(LoadInst *LI)
static void writeUnsupportedAtomicSizeReason(const TargetLowering *TLI, Inst *I, raw_ostream &OS)
static bool atomicSizeSupported(const TargetLowering *TLI, Inst *I)
static Value * insertMaskedValue(IRBuilderBase &Builder, Value *WideWord, Value *Updated, const PartwordMaskValues &PMV)
static void copyMetadataForAtomic(Instruction &Dest, const Instruction &Source)
Copy metadata that's safe to preserve when widening atomics.
static ArrayRef< RTLIB::Libcall > GetRMWLibcall(AtomicRMWInst::BinOp Op)
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool runOnFunction(Function &F, bool PostInlining)
Module.h This file contains the declarations for the Module class.
static bool isIdempotentRMW(AtomicRMWInst &RMWI)
Return true if and only if the given instruction does not modify the memory location referenced.
Machine Check Debug Module
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
FunctionAnalysisManager FAM
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file contains the declarations for profiling metadata utility functions.
This file defines the SmallString class.
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
void setAlignment(Align Align)
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
Get the array size.
bool empty() const
Check if the array is empty.
An instruction that atomically checks whether a specified value is in a memory location,...
Value * getNewValOperand()
AtomicOrdering getMergedOrdering() const
Returns a single ordering which is at least as strong as both the success and failure orderings for t...
void setWeak(bool IsWeak)
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
Value * getCompareOperand()
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
Value * getPointerOperand()
static AtomicOrdering getStrongestFailureOrdering(AtomicOrdering SuccessOrdering)
Returns the strongest permitted ordering on failure, given the desired ordering on success.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
void setVolatile(bool V)
Specify whether this is a volatile cmpxchg.
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
an instruction that atomically reads a memory location, combines it with another value,...
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMaximumNum
*p = maximumnum(old, v) maximumnum matches the behavior of llvm.maximumnum.
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
@ FMinimumNum
*p = minimumnum(old, v) minimumnum matches the behavior of llvm.minimumnum.
Value * getPointerOperand()
BinOp getOperation() const
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
iterator begin()
Instruction iterator methods.
LLVM_ABI BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="")
Split the basic block into two basic blocks at the specified instruction.
const Function * getParent() const
Return the enclosing method, or null if none.
reverse_iterator rbegin()
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
InstListType::reverse_iterator reverse_iterator
void setAttributes(AttributeList A)
Set the attributes for this call.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getSigned(IntegerType *Ty, int64_t V, bool ImplicitTrunc=false)
Return a ConstantInt with the specified value for the specified type.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
FunctionPass class - This class is used to implement most global optimizations.
BasicBlockListType::iterator iterator
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Common base class shared among various IRBuilders.
AtomicCmpXchgInst * CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New, MaybeAlign Align, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SyncScope::ID SSID=SyncScope::System)
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
LLVM_ABI CallInst * CreateLifetimeStart(Value *Ptr)
Create a lifetime.start intrinsic.
LLVM_ABI CallInst * CreateLifetimeEnd(Value *Ptr)
Create a lifetime.end intrinsic.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
CondBrInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
UnreachableInst * CreateUnreachable()
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
BasicBlock::iterator GetInsertPoint() const
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, const Twine &Name="", MDNode *FPMathTag=nullptr, FMFSource FMFSource={})
BasicBlock * GetInsertBlock() const
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
UncondBrInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
Value * CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name="")
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
void setIsFPConstrained(bool IsCon)
Enable/Disable use of constrained floating point math.
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
LLVMContext & getContext() const
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
AtomicRMWInst * CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val, MaybeAlign Align, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System, bool Elementwise=false)
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
InstSimplifyFolder - Use InstructionSimplify to fold operations to existing values.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void moveAfter(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
This is an important class for using LLVM in a threaded context.
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
LLVM_ABI void getSyncScopeNames(SmallVectorImpl< StringRef > &SSNs) const
getSyncScopeNames - Populates client supplied SmallVector with synchronization scope names registered...
Tracks which library functions to use for a particular subtarget.
LLVM_ABI RTLIB::LibcallImpl getLibcallImpl(RTLIB::Libcall Call) const
Return the lowering's selection of implementation call for Call.
Record a mapping from subtarget to LibcallLoweringInfo.
const LibcallLoweringInfo & getLibcallLowering(const TargetSubtargetInfo &Subtarget) const
An instruction for reading from memory.
Value * getPointerOperand()
bool isVolatile() const
Return true if this is a load from a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
void setVolatile(bool V)
Specify whether this is a volatile load or not.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
A Module instance is used to store all the information related to an LLVM module.
LLVMContext & getContext() const
Get the global data context.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
virtual void getAnalysisUsage(AnalysisUsage &) const
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
void setVolatile(bool V)
Specify whether this is a volatile store or not.
void setAlignment(Align Align)
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
virtual Value * emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const
Perform a store-conditional operation to Addr.
EVT getMemValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
virtual void emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const
Perform a bit test atomicrmw using a target-specific intrinsic.
virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(const AtomicRMWInst *RMW) const
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
virtual bool shouldInsertFencesForAtomic(const Instruction *I) const
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
virtual AtomicOrdering atomicOperationOrderAfterFenceSplit(const Instruction *I) const
virtual void emitExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) const
Perform a cmpxchg expansion using a target-specific method.
unsigned getMinCmpXchgSizeInBits() const
Returns the size of the smallest cmpxchg or ll/sc instruction the backend supports.
virtual Value * emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const
Perform a masked atomicrmw using a target-specific intrinsic.
virtual AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(const AtomicCmpXchgInst *AI) const
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
virtual Value * emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, AtomicOrdering Ord) const
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.
virtual void emitExpandAtomicRMW(AtomicRMWInst *AI) const
Perform a atomicrmw expansion using a target-specific way.
virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const
virtual void emitExpandAtomicStore(StoreInst *SI) const
Perform a atomic store using a target-specific way.
virtual AtomicExpansionKind shouldCastAtomicRMWIInIR(AtomicRMWInst *RMWI) const
Returns how the given atomic atomicrmw should be cast by the IR-level AtomicExpand pass.
virtual bool shouldInsertTrailingSeqCstFenceForAtomicStore(const Instruction *I) const
Whether AtomicExpandPass should automatically insert a seq_cst trailing fence without reducing the or...
virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass.
virtual Value * emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const
Perform a masked cmpxchg using a target-specific intrinsic.
virtual bool shouldIssueAtomicLoadForAtomicEmulationLoop(void) const
unsigned getMaxAtomicSizeInBitsSupported() const
Returns the maximum atomic operation size (in bits) supported by the backend.
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
virtual void emitExpandAtomicLoad(LoadInst *LI) const
Perform a atomic load using a target-specific way.
virtual AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const
Returns how the given (atomic) store should be expanded by the IR-level AtomicExpand pass into.
virtual void emitCmpArithAtomicRMWIntrinsic(AtomicRMWInst *AI) const
Perform a atomicrmw which the result is only used by comparison, using a target-specific intrinsic.
virtual AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const
Returns how the given (atomic) store should be cast by the IR-level AtomicExpand pass into.
virtual Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
virtual AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const
Returns how the given (atomic) load should be cast by the IR-level AtomicExpand pass.
virtual Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
Inserts in the IR a target-specific intrinsic specifying a fence.
virtual LoadInst * lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const
On some platforms, an AtomicRMW that never actually modifies the value (such as fetch_add of 0) can b...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Primary interface to the complete machine description for the target machine.
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
Target-Independent Code Generator Pass Configuration Options.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
LLVMContext & getContext() const
All values hold a context through their type.
iterator_range< user_iterator > users()
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
LLVM_ABI void setExplicitlyUnknownBranchWeightsIfProfiled(Instruction &I, StringRef PassName, const Function *F=nullptr)
Like setExplicitlyUnknownBranchWeights(...), but only sets unknown branch weights in the new instruct...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
bool isReleaseOrStronger(AtomicOrdering AO)
AtomicOrderingCABI toCABI(AtomicOrdering AO)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
Value * buildAtomicRMWValue(AtomicRMWInst::BinOp Op, IRBuilderBase &Builder, Value *Loaded, Value *Val)
Emit IR to implement the given atomicrmw operation on values in registers, returning the new value.
AtomicOrdering
Atomic ordering for LLVM's memory model.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
FunctionAddr VTableAddr Next
DWARFExpression::Operation Op
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isAcquireOrStronger(AtomicOrdering AO)
constexpr unsigned BitWidth
bool lowerAtomicCmpXchgInst(AtomicCmpXchgInst *CXI)
Convert the given Cmpxchg into primitive load and compare.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool lowerAtomicRMWInst(AtomicRMWInst *RMWI)
Convert the given RMWI into primitive load and stores, assuming that doing so is legal.
PointerUnion< const Value *, const PseudoSourceValue * > ValueType
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI FunctionPass * createAtomicExpandLegacyPass()
AtomicExpandPass - At IR level this pass replace atomic instructions with __atomic_* library calls,...
LLVM_ABI char & AtomicExpandID
AtomicExpandID – Lowers atomic operations in terms of either cmpxchg load-linked/store-conditional lo...
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
TypeSize getStoreSizeInBits() const
Return the number of bits overwritten by a store of the specified value type.
static StringRef getLibcallImplName(RTLIB::LibcallImpl CallImpl)
Get the libcall routine name for the specified libcall implementation.