60#define DEBUG_TYPE "atomic-expand"
64class AtomicExpandImpl {
83 Ctx.
emitError(DiagnosticInst ? DiagnosticInst : &FailedInst, Msg);
85 if (!FailedInst.getType()->isVoidTy())
87 FailedInst.eraseFromParent();
90 template <
typename Inst>
91 void handleUnsupportedAtomicSize(Inst *
I,
const Twine &AtomicOpName,
95 bool tryInsertTrailingSeqCstFence(
Instruction *AtomicI);
96 template <
typename AtomicInst>
97 bool tryInsertFencesForAtomic(AtomicInst *AtomicI,
bool OrderingRequiresFence,
101 bool tryExpandAtomicLoad(
LoadInst *LI);
102 bool expandAtomicLoadToLL(
LoadInst *LI);
103 bool expandAtomicLoadToCmpXchg(
LoadInst *LI);
113 void expandAtomicOpToLLSC(
117 void expandPartwordAtomicRMW(
125 static Value *insertRMWCmpXchgLoop(
129 CreateCmpXchgInstFun CreateCmpXchg,
Instruction *MetadataSrc);
141 void expandAtomicLoadToLibcall(
LoadInst *LI);
142 void expandAtomicStoreToLibcall(
StoreInst *LI);
145 const Twine &AtomicOpName =
"cmpxchg",
149 CreateCmpXchgInstFun CreateCmpXchg);
174struct ReplacementIRBuilder
175 :
IRBuilder<InstSimplifyFolder, IRBuilderCallbackInserter> {
185 this->CollectMetadataToCopy(
I, {LLVMContext::MD_pcsections});
186 if (BB->getParent()->getAttributes().hasFnAttr(Attribute::StrictFP))
187 this->setIsFPConstrained(
true);
189 MMRAMD =
I->getMetadata(LLVMContext::MD_mmra);
194 I->setMetadata(LLVMContext::MD_mmra, MMRAMD);
200char AtomicExpandLegacy::ID = 0;
205 "Expand Atomic instructions",
false,
false)
214 return DL.getTypeStoreSize(LI->getType());
219 return DL.getTypeStoreSize(
SI->getValueOperand()->getType());
236 Source.getAllMetadata(MD);
240 for (
auto [
ID,
N] : MD) {
242 case LLVMContext::MD_dbg:
243 case LLVMContext::MD_tbaa:
244 case LLVMContext::MD_tbaa_struct:
245 case LLVMContext::MD_alias_scope:
246 case LLVMContext::MD_noalias:
247 case LLVMContext::MD_noalias_addrspace:
248 case LLVMContext::MD_access_group:
249 case LLVMContext::MD_mmra:
253 if (
ID == Ctx.getMDKindID(
"amdgpu.no.remote.memory"))
255 else if (
ID == Ctx.getMDKindID(
"amdgpu.no.fine.grained.memory"))
265template <
typename Inst>
268 Align Alignment =
I->getAlign();
270 return Alignment >=
Size &&
Size <= MaxSize;
273template <
typename Inst>
277 Align Alignment =
I->getAlign();
278 bool NeedSeparator =
false;
280 if (Alignment <
Size) {
281 OS <<
"instruction alignment " << Alignment.
value()
282 <<
" is smaller than the required " <<
Size
283 <<
"-byte alignment for this atomic operation";
284 NeedSeparator =
true;
288 if (
Size > MaxSize) {
291 OS <<
"target supports atomics up to " << MaxSize
292 <<
" bytes, but this atomic accesses " <<
Size <<
" bytes";
296template <
typename Inst>
297void AtomicExpandImpl::handleUnsupportedAtomicSize(
300 SmallString<128> FailureReason;
301 raw_svector_ostream OS(FailureReason);
303 handleFailure(*
I, Twine(
"unsupported ") + AtomicOpName +
": " + FailureReason,
307bool AtomicExpandImpl::tryInsertTrailingSeqCstFence(Instruction *AtomicI) {
313 Builder, AtomicI, AtomicOrdering::SequentiallyConsistent)) {
314 TrailingFence->moveAfter(AtomicI);
320template <
typename AtomicInst>
321bool AtomicExpandImpl::tryInsertFencesForAtomic(AtomicInst *AtomicI,
322 bool OrderingRequiresFence,
325 if (OrderingRequiresFence && ShouldInsertFences) {
327 AtomicI->setOrdering(NewOrdering);
328 return bracketInstWithFences(AtomicI, FenceOrdering);
330 if (!ShouldInsertFences)
331 return tryInsertTrailingSeqCstFence(AtomicI);
335bool AtomicExpandImpl::processAtomicInstr(Instruction *
I) {
341 expandAtomicLoadToLibcall(LI);
345 bool MadeChange =
false;
347 TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
348 LI = convertAtomicLoadToIntegerType(LI);
352 MadeChange |= tryInsertFencesForAtomic(
355 MadeChange |= tryExpandAtomicLoad(LI);
364 expandAtomicStoreToLibcall(SI);
368 bool MadeChange =
false;
370 TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
371 SI = convertAtomicStoreToIntegerType(SI);
375 MadeChange |= tryInsertFencesForAtomic(
378 MadeChange |= tryExpandAtomicStore(SI);
384 expandAtomicRMWToLibcall(RMWI);
388 bool MadeChange =
false;
390 TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
391 RMWI = convertAtomicXchgToIntegerType(RMWI);
395 MadeChange |= tryInsertFencesForAtomic(
405 MadeChange |= (
isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) ||
406 tryExpandAtomicRMW(RMWI);
412 expandAtomicCASToLibcall(CASI);
418 bool MadeChange =
false;
419 if (CASI->getCompareOperand()->getType()->isPointerTy()) {
422 CASI = convertCmpXchgToIntegerType(CASI);
428 if (CmpXchgExpansion == TargetLoweringBase::AtomicExpansionKind::None &&
439 CASI->setSuccessOrdering(CASOrdering);
440 CASI->setFailureOrdering(CASOrdering);
441 MadeChange |= bracketInstWithFences(CASI, FenceOrdering);
443 }
else if (CmpXchgExpansion !=
444 TargetLoweringBase::AtomicExpansionKind::LLSC) {
446 MadeChange |= tryInsertTrailingSeqCstFence(CASI);
449 MadeChange |= tryExpandAtomicCmpXchg(CASI);
456bool AtomicExpandImpl::run(
457 Function &
F,
const LibcallLoweringModuleAnalysisResult &LibcallResult,
458 const TargetMachine *TM) {
460 if (!Subtarget->enableAtomicExpand())
462 TLI = Subtarget->getTargetLowering();
464 DL = &
F.getDataLayout();
466 bool MadeChange =
false;
478 if (processAtomicInstr(&Inst)) {
490bool AtomicExpandLegacy::runOnFunction(Function &
F) {
492 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
495 auto *TM = &TPC->getTM<TargetMachine>();
497 const LibcallLoweringModuleAnalysisResult &LibcallResult =
498 getAnalysis<LibcallLoweringInfoWrapper>().getResult(*
F.getParent());
500 return AE.run(
F, LibcallResult, TM);
504 return new AtomicExpandLegacy();
514 if (!LibcallResult) {
516 "' analysis required");
522 bool Changed = AE.run(
F, *LibcallResult, TM);
529bool AtomicExpandImpl::bracketInstWithFences(
Instruction *
I,
531 ReplacementIRBuilder Builder(
I, *
DL);
541 return (LeadingFence || TrailingFence);
556LoadInst *AtomicExpandImpl::convertAtomicLoadToIntegerType(LoadInst *LI) {
558 Type *NewTy = getCorrespondingIntegerType(LI->
getType(),
M->getDataLayout());
560 ReplacementIRBuilder Builder(LI, *
DL);
564 auto *NewLI = Builder.CreateLoad(NewTy, Addr);
565 NewLI->setAlignment(LI->
getAlign());
568 LLVM_DEBUG(
dbgs() <<
"Replaced " << *LI <<
" with " << *NewLI <<
"\n");
570 Value *NewVal = Builder.CreateBitCast(NewLI, LI->
getType());
577AtomicExpandImpl::convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI) {
582 getCorrespondingIntegerType(RMWI->
getType(),
M->getDataLayout());
584 ReplacementIRBuilder Builder(RMWI, *
DL);
589 ? Builder.CreatePtrToInt(Val, NewTy)
590 : Builder.CreateBitCast(Val, NewTy);
597 LLVM_DEBUG(
dbgs() <<
"Replaced " << *RMWI <<
" with " << *NewRMWI <<
"\n");
600 ? Builder.CreateIntToPtr(NewRMWI, RMWI->
getType())
601 : Builder.CreateBitCast(NewRMWI, RMWI->
getType());
607bool AtomicExpandImpl::tryExpandAtomicLoad(LoadInst *LI) {
609 case TargetLoweringBase::AtomicExpansionKind::None:
611 case TargetLoweringBase::AtomicExpansionKind::LLSC:
612 expandAtomicOpToLLSC(
615 [](IRBuilderBase &Builder,
Value *Loaded) { return Loaded; });
617 case TargetLoweringBase::AtomicExpansionKind::LLOnly:
618 return expandAtomicLoadToLL(LI);
619 case TargetLoweringBase::AtomicExpansionKind::CmpXChg:
620 return expandAtomicLoadToCmpXchg(LI);
621 case TargetLoweringBase::AtomicExpansionKind::NotAtomic:
622 LI->
setAtomic(AtomicOrdering::NotAtomic);
624 case TargetLoweringBase::AtomicExpansionKind::CustomExpand:
632bool AtomicExpandImpl::tryExpandAtomicStore(StoreInst *SI) {
634 case TargetLoweringBase::AtomicExpansionKind::None:
636 case TargetLoweringBase::AtomicExpansionKind::CustomExpand:
639 case TargetLoweringBase::AtomicExpansionKind::Expand:
640 expandAtomicStoreToXChg(SI);
642 case TargetLoweringBase::AtomicExpansionKind::NotAtomic:
643 SI->setAtomic(AtomicOrdering::NotAtomic);
650bool AtomicExpandImpl::expandAtomicLoadToLL(LoadInst *LI) {
651 ReplacementIRBuilder Builder(LI, *
DL);
666bool AtomicExpandImpl::expandAtomicLoadToCmpXchg(LoadInst *LI) {
667 ReplacementIRBuilder Builder(LI, *
DL);
669 if (Order == AtomicOrdering::Unordered)
670 Order = AtomicOrdering::Monotonic;
676 Value *Pair = Builder.CreateAtomicCmpXchg(
677 Addr, DummyVal, DummyVal, LI->
getAlign(), Order,
679 Value *
Loaded = Builder.CreateExtractValue(Pair, 0,
"loaded");
695StoreInst *AtomicExpandImpl::convertAtomicStoreToIntegerType(StoreInst *SI) {
696 ReplacementIRBuilder Builder(SI, *
DL);
697 auto *
M =
SI->getModule();
698 Type *NewTy = getCorrespondingIntegerType(
SI->getValueOperand()->getType(),
700 Value *NewVal = Builder.CreateBitCast(
SI->getValueOperand(), NewTy);
702 Value *Addr =
SI->getPointerOperand();
704 StoreInst *NewSI = Builder.CreateStore(NewVal, Addr);
708 LLVM_DEBUG(
dbgs() <<
"Replaced " << *SI <<
" with " << *NewSI <<
"\n");
709 SI->eraseFromParent();
713void AtomicExpandImpl::expandAtomicStoreToXChg(StoreInst *SI) {
720 ReplacementIRBuilder Builder(SI, *
DL);
722 assert(Ordering != AtomicOrdering::NotAtomic);
724 ? AtomicOrdering::Monotonic
726 AtomicRMWInst *AI = Builder.CreateAtomicRMW(
728 SI->getAlign(), RMWOrdering);
729 SI->eraseFromParent();
732 tryExpandAtomicRMW(AI);
747 NewVal = Builder.CreateBitCast(NewVal, IntTy);
748 Loaded = Builder.CreateBitCast(Loaded, IntTy);
752 Addr, Loaded, NewVal, AddrAlign, MemOpOrder,
757 Success = Builder.CreateExtractValue(Pair, 1,
"success");
758 NewLoaded = Builder.CreateExtractValue(Pair, 0,
"newloaded");
761 NewLoaded = Builder.CreateBitCast(NewLoaded, OrigTy);
764bool AtomicExpandImpl::tryExpandAtomicRMW(AtomicRMWInst *AI) {
768 case TargetLoweringBase::AtomicExpansionKind::None:
770 case TargetLoweringBase::AtomicExpansionKind::LLSC: {
773 if (ValueSize < MinCASSize) {
774 expandPartwordAtomicRMW(AI,
775 TargetLoweringBase::AtomicExpansionKind::LLSC);
777 auto PerformOp = [&](IRBuilderBase &Builder,
Value *
Loaded) {
786 case TargetLoweringBase::AtomicExpansionKind::CmpXChg: {
789 if (ValueSize < MinCASSize) {
790 expandPartwordAtomicRMW(AI,
791 TargetLoweringBase::AtomicExpansionKind::CmpXChg);
800 return OptimizationRemark(
DEBUG_TYPE,
"Passed", AI)
801 <<
"A compare and swap loop was generated for an atomic "
803 << MemScope <<
" memory scope";
809 case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic: {
812 if (ValueSize < MinCASSize) {
817 tryExpandAtomicRMW(widenPartwordAtomicRMW(AI));
821 expandAtomicRMWToMaskedIntrinsic(AI);
824 case TargetLoweringBase::AtomicExpansionKind::BitTestIntrinsic: {
828 case TargetLoweringBase::AtomicExpansionKind::CmpArithIntrinsic: {
832 case TargetLoweringBase::AtomicExpansionKind::NotAtomic:
834 case TargetLoweringBase::AtomicExpansionKind::CustomExpand:
844struct PartwordMaskValues {
846 Type *WordType =
nullptr;
848 Type *IntValueType =
nullptr;
849 Value *AlignedAddr =
nullptr;
850 Align AlignedAddrAlignment;
852 Value *ShiftAmt =
nullptr;
853 Value *Mask =
nullptr;
854 Value *Inv_Mask =
nullptr;
858raw_ostream &
operator<<(raw_ostream &O,
const PartwordMaskValues &PMV) {
859 auto PrintObj = [&
O](
auto *
V) {
866 O <<
"PartwordMaskValues {\n";
868 PrintObj(PMV.WordType);
870 PrintObj(PMV.ValueType);
871 O <<
" AlignedAddr: ";
872 PrintObj(PMV.AlignedAddr);
873 O <<
" AlignedAddrAlignment: " << PMV.AlignedAddrAlignment.
value() <<
'\n';
875 PrintObj(PMV.ShiftAmt);
879 PrintObj(PMV.Inv_Mask);
905 unsigned MinWordSize) {
906 PartwordMaskValues PMV;
911 unsigned ValueSize =
DL.getTypeStoreSize(
ValueType);
913 PMV.ValueType = PMV.IntValueType =
ValueType;
918 PMV.WordType = MinWordSize > ValueSize ?
Type::getIntNTy(Ctx, MinWordSize * 8)
920 if (PMV.ValueType == PMV.WordType) {
921 PMV.AlignedAddr = Addr;
922 PMV.AlignedAddrAlignment = AddrAlign;
923 PMV.ShiftAmt = ConstantInt::get(PMV.ValueType, 0);
924 PMV.Mask = ConstantInt::get(PMV.ValueType, ~0,
true);
928 PMV.AlignedAddrAlignment =
Align(MinWordSize);
930 assert(ValueSize < MinWordSize);
933 IntegerType *IntTy =
DL.getIndexType(Ctx, PtrTy->getAddressSpace());
936 if (AddrAlign < MinWordSize) {
937 PMV.AlignedAddr = Builder.CreateIntrinsic(
938 Intrinsic::ptrmask, {PtrTy, IntTy},
940 nullptr,
"AlignedAddr");
942 Value *AddrInt = Builder.CreatePtrToInt(Addr, IntTy);
943 PtrLSB = Builder.CreateAnd(AddrInt, MinWordSize - 1,
"PtrLSB");
946 PMV.AlignedAddr = Addr;
950 if (
DL.isLittleEndian()) {
952 PMV.ShiftAmt = Builder.CreateShl(PtrLSB, 3);
955 PMV.ShiftAmt = Builder.CreateShl(
956 Builder.CreateXor(PtrLSB, MinWordSize - ValueSize), 3);
959 PMV.ShiftAmt = Builder.CreateTrunc(PMV.ShiftAmt, PMV.WordType,
"ShiftAmt");
960 PMV.Mask = Builder.CreateShl(
961 ConstantInt::get(PMV.WordType, (1 << (ValueSize * 8)) - 1), PMV.ShiftAmt,
964 PMV.Inv_Mask = Builder.CreateNot(PMV.Mask,
"Inv_Mask");
970 const PartwordMaskValues &PMV) {
971 assert(WideWord->
getType() == PMV.WordType &&
"Widened type mismatch");
972 if (PMV.WordType == PMV.ValueType)
975 Value *Shift = Builder.CreateLShr(WideWord, PMV.ShiftAmt,
"shifted");
976 Value *Trunc = Builder.CreateTrunc(Shift, PMV.IntValueType,
"extracted");
977 return Builder.CreateBitCast(Trunc, PMV.ValueType);
981 Value *Updated,
const PartwordMaskValues &PMV) {
982 assert(WideWord->
getType() == PMV.WordType &&
"Widened type mismatch");
983 assert(Updated->
getType() == PMV.ValueType &&
"Value type mismatch");
984 if (PMV.WordType == PMV.ValueType)
987 Updated = Builder.CreateBitCast(Updated, PMV.IntValueType);
989 Value *ZExt = Builder.CreateZExt(Updated, PMV.WordType,
"extended");
991 Builder.CreateShl(ZExt, PMV.ShiftAmt,
"shifted",
true);
992 Value *
And = Builder.CreateAnd(WideWord, PMV.Inv_Mask,
"unmasked");
993 Value *
Or = Builder.CreateOr(
And, Shift,
"inserted");
1003 const PartwordMaskValues &PMV) {
1009 Value *Loaded_MaskOut = Builder.CreateAnd(Loaded, PMV.Inv_Mask);
1010 Value *FinalVal = Builder.CreateOr(Loaded_MaskOut, Shifted_Inc);
1022 Value *NewVal_Masked = Builder.CreateAnd(NewVal, PMV.Mask);
1023 Value *Loaded_MaskOut = Builder.CreateAnd(Loaded, PMV.Inv_Mask);
1024 Value *FinalVal = Builder.CreateOr(Loaded_MaskOut, NewVal_Masked);
1063void AtomicExpandImpl::expandPartwordAtomicRMW(
1069 tryExpandAtomicRMW(widenPartwordAtomicRMW(AI));
1075 ReplacementIRBuilder Builder(AI, *
DL);
1077 PartwordMaskValues PMV =
1081 Value *ValOperand_Shifted =
nullptr;
1085 ValOperand_Shifted =
1086 Builder.CreateShl(Builder.CreateZExt(ValOp, PMV.WordType), PMV.ShiftAmt,
1087 "ValOperand_Shifted");
1090 auto PerformPartwordOp = [&](IRBuilderBase &Builder,
Value *
Loaded) {
1096 if (ExpansionKind == TargetLoweringBase::AtomicExpansionKind::CmpXChg) {
1097 OldResult = insertRMWCmpXchgLoop(
1098 Builder, PMV.WordType, PMV.AlignedAddr, PMV.AlignedAddrAlignment,
1101 assert(ExpansionKind == TargetLoweringBase::AtomicExpansionKind::LLSC);
1102 OldResult = insertRMWLLSCLoop(Builder, PMV.WordType, PMV.AlignedAddr,
1103 PMV.AlignedAddrAlignment, MemOpOrder,
1113AtomicRMWInst *AtomicExpandImpl::widenPartwordAtomicRMW(AtomicRMWInst *AI) {
1114 ReplacementIRBuilder Builder(AI, *
DL);
1119 "Unable to widen operation");
1121 PartwordMaskValues PMV =
1125 Value *ValOperand_Shifted =
1127 PMV.ShiftAmt,
"ValOperand_Shifted");
1133 Builder.
CreateOr(ValOperand_Shifted, PMV.Inv_Mask,
"AndOperand");
1135 NewOperand = ValOperand_Shifted;
1138 Op, PMV.AlignedAddr, NewOperand, PMV.AlignedAddrAlignment,
1149bool AtomicExpandImpl::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) {
1191 ReplacementIRBuilder Builder(CI, *
DL);
1202 std::prev(BB->
end())->eraseFromParent();
1205 PartwordMaskValues PMV =
1210 Value *NewVal_Shifted =
1212 Value *Cmp_Shifted =
1217 LoadInst *InitLoaded = Builder.
CreateLoad(PMV.WordType, PMV.AlignedAddr);
1219 Value *InitLoaded_MaskOut = Builder.
CreateAnd(InitLoaded, PMV.Inv_Mask);
1224 PHINode *Loaded_MaskOut = Builder.
CreatePHI(PMV.WordType, 2);
1225 Loaded_MaskOut->
addIncoming(InitLoaded_MaskOut, BB);
1228 Value *FullWord_NewVal = Builder.
CreateOr(Loaded_MaskOut, NewVal_Shifted);
1229 Value *FullWord_Cmp = Builder.
CreateOr(Loaded_MaskOut, Cmp_Shifted);
1231 PMV.AlignedAddr, FullWord_Cmp, FullWord_NewVal, PMV.AlignedAddrAlignment,
1259 Loaded_MaskOut->
addIncoming(OldVal_MaskOut, FailureBB);
1274void AtomicExpandImpl::expandAtomicOpToLLSC(
1275 Instruction *
I,
Type *ResultType,
Value *Addr, Align AddrAlign,
1277 function_ref<
Value *(IRBuilderBase &,
Value *)> PerformOp) {
1278 ReplacementIRBuilder Builder(
I, *
DL);
1279 Value *
Loaded = insertRMWLLSCLoop(Builder, ResultType, Addr, AddrAlign,
1280 MemOpOrder, PerformOp);
1282 I->replaceAllUsesWith(Loaded);
1283 I->eraseFromParent();
1286void AtomicExpandImpl::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) {
1287 ReplacementIRBuilder Builder(AI, *
DL);
1289 PartwordMaskValues PMV =
1299 CastOp = Instruction::SExt;
1303 PMV.ShiftAmt,
"ValOperand_Shifted");
1305 Builder, AI, PMV.AlignedAddr, ValOperand_Shifted, PMV.Mask, PMV.ShiftAmt,
1312void AtomicExpandImpl::expandAtomicCmpXchgToMaskedIntrinsic(
1313 AtomicCmpXchgInst *CI) {
1314 ReplacementIRBuilder Builder(CI, *
DL);
1327 Builder, CI, PMV.AlignedAddr, CmpVal_Shifted, NewVal_Shifted, PMV.Mask,
1333 CmpVal_Shifted, Builder.
CreateAnd(OldVal, PMV.Mask),
"Success");
1340Value *AtomicExpandImpl::insertRMWLLSCLoop(
1341 IRBuilderBase &Builder,
Type *ResultTy,
Value *Addr, Align AddrAlign,
1343 function_ref<
Value *(IRBuilderBase &,
Value *)> PerformOp) {
1348 assert(AddrAlign >=
F->getDataLayout().getTypeStoreSize(ResultTy) &&
1349 "Expected at least natural alignment at this point.");
1369 std::prev(BB->
end())->eraseFromParent();
1377 Value *NewVal = PerformOp(Builder, Loaded);
1379 Value *StoreSuccess =
1401AtomicExpandImpl::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
1404 M->getDataLayout());
1406 ReplacementIRBuilder Builder(CI, *
DL);
1418 LLVM_DEBUG(
dbgs() <<
"Replaced " << *CI <<
" with " << *NewCI <<
"\n");
1434bool AtomicExpandImpl::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
1440 LLVMContext &Ctx =
F->getContext();
1447 ? AtomicOrdering::Monotonic
1459 bool HasReleasedLoadBB = !CI->
isWeak() && ShouldInsertFencesForAtomic &&
1460 SuccessOrder != AtomicOrdering::Monotonic &&
1461 SuccessOrder != AtomicOrdering::Acquire &&
1466 bool UseUnconditionalReleaseBarrier =
F->hasMinSize() && !CI->
isWeak();
1520 auto ReleasedLoadBB =
1524 auto ReleasingStoreBB =
1528 ReplacementIRBuilder Builder(CI, *
DL);
1533 std::prev(BB->
end())->eraseFromParent();
1535 if (ShouldInsertFencesForAtomic && UseUnconditionalReleaseBarrier)
1538 PartwordMaskValues PMV =
1545 Value *UnreleasedLoad =
1546 TLI->
emitLoadLinked(Builder, PMV.WordType, PMV.AlignedAddr, MemOpOrder);
1547 Value *UnreleasedLoadExtract =
1554 Builder.
CreateCondBr(ShouldStore, ReleasingStoreBB, NoStoreBB,
1555 MDBuilder(
F->getContext()).createLikelyBranchWeights());
1558 if (ShouldInsertFencesForAtomic && !UseUnconditionalReleaseBarrier)
1563 PHINode *LoadedTryStore =
1564 Builder.
CreatePHI(PMV.WordType, 2,
"loaded.trystore");
1565 LoadedTryStore->
addIncoming(UnreleasedLoad, ReleasingStoreBB);
1566 Value *NewValueInsert =
1569 PMV.AlignedAddr, MemOpOrder);
1571 StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0),
"success");
1572 BasicBlock *RetryBB = HasReleasedLoadBB ? ReleasedLoadBB : StartBB;
1574 CI->
isWeak() ? FailureBB : RetryBB,
1575 MDBuilder(
F->getContext()).createLikelyBranchWeights());
1579 if (HasReleasedLoadBB) {
1581 TLI->
emitLoadLinked(Builder, PMV.WordType, PMV.AlignedAddr, MemOpOrder);
1589 ShouldStore, TryStoreBB, NoStoreBB,
1590 MDBuilder(
F->getContext()).createLikelyBranchWeights());
1592 LoadedTryStore->
addIncoming(SecondLoad, ReleasedLoadBB);
1599 if (ShouldInsertFencesForAtomic ||
1605 PHINode *LoadedNoStore =
1607 LoadedNoStore->
addIncoming(UnreleasedLoad, StartBB);
1608 if (HasReleasedLoadBB)
1609 LoadedNoStore->
addIncoming(SecondLoad, ReleasedLoadBB);
1618 PHINode *LoadedFailure =
1620 LoadedFailure->
addIncoming(LoadedNoStore, NoStoreBB);
1622 LoadedFailure->
addIncoming(LoadedTryStore, TryStoreBB);
1623 if (ShouldInsertFencesForAtomic)
1632 PHINode *LoadedExit =
1634 LoadedExit->
addIncoming(LoadedTryStore, SuccessBB);
1635 LoadedExit->
addIncoming(LoadedFailure, FailureBB);
1642 Value *LoadedFull = LoadedExit;
1650 for (
auto *User : CI->
users()) {
1656 "weird extraction from { iN, i1 }");
1667 for (
auto *EV : PrunedInsts)
1684bool AtomicExpandImpl::isIdempotentRMW(AtomicRMWInst *RMWI) {
1697 return C->isMinusOne();
1699 return C->isMaxValue(
true);
1701 return C->isMinValue(
true);
1703 return C->isMaxValue(
false);
1705 return C->isMinValue(
false);
1711bool AtomicExpandImpl::simplifyIdempotentRMW(AtomicRMWInst *RMWI) {
1713 tryExpandAtomicLoad(ResultingLoad);
1719Value *AtomicExpandImpl::insertRMWCmpXchgLoop(
1720 IRBuilderBase &Builder,
Type *ResultTy,
Value *Addr, Align AddrAlign,
1722 function_ref<
Value *(IRBuilderBase &,
Value *)> PerformOp,
1723 CreateCmpXchgInstFun CreateCmpXchg, Instruction *MetadataSrc) {
1750 std::prev(BB->
end())->eraseFromParent();
1761 Loaded->addIncoming(InitLoaded, BB);
1763 Value *NewVal = PerformOp(Builder, Loaded);
1765 Value *NewLoaded =
nullptr;
1768 CreateCmpXchg(Builder, Addr, Loaded, NewVal, AddrAlign,
1769 MemOpOrder == AtomicOrdering::Unordered
1770 ? AtomicOrdering::Monotonic
1772 SSID,
Success, NewLoaded, MetadataSrc);
1775 Loaded->addIncoming(NewLoaded, LoopBB);
1788bool AtomicExpandImpl::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
1795 case TargetLoweringBase::AtomicExpansionKind::None:
1796 if (ValueSize < MinCASSize)
1797 return expandPartwordCmpXchg(CI);
1799 case TargetLoweringBase::AtomicExpansionKind::LLSC: {
1800 return expandAtomicCmpXchg(CI);
1802 case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic:
1803 expandAtomicCmpXchgToMaskedIntrinsic(CI);
1805 case TargetLoweringBase::AtomicExpansionKind::NotAtomic:
1807 case TargetLoweringBase::AtomicExpansionKind::CustomExpand: {
1814bool AtomicExpandImpl::expandAtomicRMWToCmpXchg(
1815 AtomicRMWInst *AI, CreateCmpXchgInstFun CreateCmpXchg) {
1822 Value *
Loaded = AtomicExpandImpl::insertRMWCmpXchgLoop(
1825 [&](IRBuilderBase &Builder,
Value *Loaded) {
1826 return buildAtomicRMWValue(AI->getOperation(), Builder, Loaded,
1827 AI->getValOperand());
1850 unsigned LargestSize =
DL.getLargestLegalIntTypeSizeInBits() >= 64 ? 16 : 8;
1851 return Alignment >=
Size &&
1853 Size <= LargestSize;
1856void AtomicExpandImpl::expandAtomicLoadToLibcall(LoadInst *
I) {
1857 static const RTLIB::Libcall Libcalls[6] = {
1858 RTLIB::ATOMIC_LOAD, RTLIB::ATOMIC_LOAD_1, RTLIB::ATOMIC_LOAD_2,
1859 RTLIB::ATOMIC_LOAD_4, RTLIB::ATOMIC_LOAD_8, RTLIB::ATOMIC_LOAD_16};
1862 bool Expanded = expandAtomicOpToLibcall(
1863 I,
Size,
I->getAlign(),
I->getPointerOperand(),
nullptr,
nullptr,
1864 I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls);
1866 handleUnsupportedAtomicSize(
I,
"atomic load");
1869void AtomicExpandImpl::expandAtomicStoreToLibcall(StoreInst *
I) {
1870 static const RTLIB::Libcall Libcalls[6] = {
1871 RTLIB::ATOMIC_STORE, RTLIB::ATOMIC_STORE_1, RTLIB::ATOMIC_STORE_2,
1872 RTLIB::ATOMIC_STORE_4, RTLIB::ATOMIC_STORE_8, RTLIB::ATOMIC_STORE_16};
1875 bool Expanded = expandAtomicOpToLibcall(
1876 I,
Size,
I->getAlign(),
I->getPointerOperand(),
I->getValueOperand(),
1877 nullptr,
I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls);
1879 handleUnsupportedAtomicSize(
I,
"atomic store");
1882void AtomicExpandImpl::expandAtomicCASToLibcall(AtomicCmpXchgInst *
I,
1883 const Twine &AtomicOpName,
1884 Instruction *DiagnosticInst) {
1885 static const RTLIB::Libcall Libcalls[6] = {
1886 RTLIB::ATOMIC_COMPARE_EXCHANGE, RTLIB::ATOMIC_COMPARE_EXCHANGE_1,
1887 RTLIB::ATOMIC_COMPARE_EXCHANGE_2, RTLIB::ATOMIC_COMPARE_EXCHANGE_4,
1888 RTLIB::ATOMIC_COMPARE_EXCHANGE_8, RTLIB::ATOMIC_COMPARE_EXCHANGE_16};
1891 bool Expanded = expandAtomicOpToLibcall(
1892 I,
Size,
I->getAlign(),
I->getPointerOperand(),
I->getNewValOperand(),
1893 I->getCompareOperand(),
I->getSuccessOrdering(),
I->getFailureOrdering(),
1896 handleUnsupportedAtomicSize(
I, AtomicOpName, DiagnosticInst);
1900 static const RTLIB::Libcall LibcallsXchg[6] = {
1901 RTLIB::ATOMIC_EXCHANGE, RTLIB::ATOMIC_EXCHANGE_1,
1902 RTLIB::ATOMIC_EXCHANGE_2, RTLIB::ATOMIC_EXCHANGE_4,
1903 RTLIB::ATOMIC_EXCHANGE_8, RTLIB::ATOMIC_EXCHANGE_16};
1904 static const RTLIB::Libcall LibcallsAdd[6] = {
1905 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_ADD_1,
1906 RTLIB::ATOMIC_FETCH_ADD_2, RTLIB::ATOMIC_FETCH_ADD_4,
1907 RTLIB::ATOMIC_FETCH_ADD_8, RTLIB::ATOMIC_FETCH_ADD_16};
1908 static const RTLIB::Libcall LibcallsSub[6] = {
1909 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_SUB_1,
1910 RTLIB::ATOMIC_FETCH_SUB_2, RTLIB::ATOMIC_FETCH_SUB_4,
1911 RTLIB::ATOMIC_FETCH_SUB_8, RTLIB::ATOMIC_FETCH_SUB_16};
1912 static const RTLIB::Libcall LibcallsAnd[6] = {
1913 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_AND_1,
1914 RTLIB::ATOMIC_FETCH_AND_2, RTLIB::ATOMIC_FETCH_AND_4,
1915 RTLIB::ATOMIC_FETCH_AND_8, RTLIB::ATOMIC_FETCH_AND_16};
1916 static const RTLIB::Libcall LibcallsOr[6] = {
1917 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_OR_1,
1918 RTLIB::ATOMIC_FETCH_OR_2, RTLIB::ATOMIC_FETCH_OR_4,
1919 RTLIB::ATOMIC_FETCH_OR_8, RTLIB::ATOMIC_FETCH_OR_16};
1920 static const RTLIB::Libcall LibcallsXor[6] = {
1921 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_XOR_1,
1922 RTLIB::ATOMIC_FETCH_XOR_2, RTLIB::ATOMIC_FETCH_XOR_4,
1923 RTLIB::ATOMIC_FETCH_XOR_8, RTLIB::ATOMIC_FETCH_XOR_16};
1924 static const RTLIB::Libcall LibcallsNand[6] = {
1925 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_NAND_1,
1926 RTLIB::ATOMIC_FETCH_NAND_2, RTLIB::ATOMIC_FETCH_NAND_4,
1927 RTLIB::ATOMIC_FETCH_NAND_8, RTLIB::ATOMIC_FETCH_NAND_16};
1968void AtomicExpandImpl::expandAtomicRMWToLibcall(AtomicRMWInst *
I) {
1974 if (!Libcalls.
empty())
1975 Success = expandAtomicOpToLibcall(
1976 I,
Size,
I->getAlign(),
I->getPointerOperand(),
I->getValOperand(),
1977 nullptr,
I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls);
1984 expandAtomicRMWToCmpXchg(
1985 I, [
this,
I](IRBuilderBase &Builder,
Value *Addr,
Value *Loaded,
1988 Instruction *MetadataSrc) {
1991 Addr, Loaded, NewVal, Alignment, MemOpOrder,
2000 expandAtomicCASToLibcall(
2014bool AtomicExpandImpl::expandAtomicOpToLibcall(
2015 Instruction *
I,
unsigned Size, Align Alignment,
Value *PointerOperand,
2020 LLVMContext &Ctx =
I->getContext();
2022 const DataLayout &
DL =
M->getDataLayout();
2024 IRBuilder<> AllocaBuilder(&
I->getFunction()->getEntryBlock().front());
2027 Type *SizedIntTy = Type::getIntNTy(Ctx,
Size * 8);
2029 if (
M->getTargetTriple().isOSWindows() &&
M->getTargetTriple().isX86_64() &&
2039 const Align AllocaAlignment =
DL.getPrefTypeAlign(SizedIntTy);
2043 assert(Ordering != AtomicOrdering::NotAtomic &&
"expect atomic MO");
2045 ConstantInt::get(Type::getInt32Ty(Ctx), (
int)
toCABI(Ordering));
2048 assert(Ordering2 != AtomicOrdering::NotAtomic &&
"expect atomic MO");
2050 ConstantInt::get(Type::getInt32Ty(Ctx), (
int)
toCABI(Ordering2));
2052 bool HasResult =
I->getType() != Type::getVoidTy(Ctx);
2054 RTLIB::Libcall RTLibType;
2055 if (UseSizedLibcall) {
2058 RTLibType = Libcalls[1];
2061 RTLibType = Libcalls[2];
2064 RTLibType = Libcalls[3];
2067 RTLibType = Libcalls[4];
2070 RTLibType = Libcalls[5];
2073 }
else if (Libcalls[0] != RTLIB::UNKNOWN_LIBCALL) {
2074 RTLibType = Libcalls[0];
2081 RTLIB::LibcallImpl LibcallImpl = LibcallLowering->
getLibcallImpl(RTLibType);
2082 if (LibcallImpl == RTLIB::Unsupported) {
2113 AllocaInst *AllocaCASExpected =
nullptr;
2114 AllocaInst *AllocaValue =
nullptr;
2115 AllocaInst *AllocaResult =
nullptr;
2122 if (!UseSizedLibcall) {
2124 Args.push_back(ConstantInt::get(
DL.getIntPtrType(Ctx),
Size));
2132 Value *PtrVal = PointerOperand;
2134 Args.push_back(PtrVal);
2138 AllocaCASExpected = AllocaBuilder.CreateAlloca(CASExpected->
getType());
2142 Args.push_back(AllocaCASExpected);
2147 if (UseSizedLibcall) {
2150 Args.push_back(IntValue);
2152 AllocaValue = AllocaBuilder.CreateAlloca(ValueOperand->
getType());
2156 Args.push_back(AllocaValue);
2161 if (!CASExpected && HasResult && !UseSizedLibcall) {
2162 AllocaResult = AllocaBuilder.CreateAlloca(
I->getType());
2165 Args.push_back(AllocaResult);
2169 Args.push_back(OrderingVal);
2173 Args.push_back(Ordering2Val);
2177 ResultTy = Type::getInt1Ty(Ctx);
2178 Attr = Attr.addRetAttribute(Ctx, Attribute::ZExt);
2179 }
else if (HasResult && UseSizedLibcall)
2180 ResultTy = SizedIntTy;
2182 ResultTy = Type::getVoidTy(Ctx);
2186 for (
Value *Arg : Args)
2188 FunctionType *FnType = FunctionType::get(ResultTy, ArgTys,
false);
2189 FunctionCallee LibcallFn =
M->getOrInsertFunction(
2197 if (ValueOperand && !UseSizedLibcall)
2203 Type *FinalResultTy =
I->getType();
2206 CASExpected->
getType(), AllocaCASExpected, AllocaAlignment);
2211 }
else if (HasResult) {
2213 if (UseSizedLibcall)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static Value * performMaskedAtomicOp(AtomicRMWInst::BinOp Op, IRBuilderBase &Builder, Value *Loaded, Value *Shifted_Inc, Value *Inc, const PartwordMaskValues &PMV)
Emit IR to implement a masked version of a given atomicrmw operation.
static PartwordMaskValues createMaskInstrs(IRBuilderBase &Builder, Instruction *I, Type *ValueType, Value *Addr, Align AddrAlign, unsigned MinWordSize)
This is a helper function which builds instructions to provide values necessary for partword atomic o...
static bool canUseSizedAtomicCall(unsigned Size, Align Alignment, const DataLayout &DL)
static Value * extractMaskedValue(IRBuilderBase &Builder, Value *WideWord, const PartwordMaskValues &PMV)
static void createCmpXchgInstFun(IRBuilderBase &Builder, Value *Addr, Value *Loaded, Value *NewVal, Align AddrAlign, AtomicOrdering MemOpOrder, SyncScope::ID SSID, Value *&Success, Value *&NewLoaded, Instruction *MetadataSrc)
Expand Atomic static false unsigned getAtomicOpSize(LoadInst *LI)
static void writeUnsupportedAtomicSizeReason(const TargetLowering *TLI, Inst *I, raw_ostream &OS)
static bool atomicSizeSupported(const TargetLowering *TLI, Inst *I)
static Value * insertMaskedValue(IRBuilderBase &Builder, Value *WideWord, Value *Updated, const PartwordMaskValues &PMV)
static void copyMetadataForAtomic(Instruction &Dest, const Instruction &Source)
Copy metadata that's safe to preserve when widening atomics.
static ArrayRef< RTLIB::Libcall > GetRMWLibcall(AtomicRMWInst::BinOp Op)
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool runOnFunction(Function &F, bool PostInlining)
Module.h This file contains the declarations for the Module class.
static bool isIdempotentRMW(AtomicRMWInst &RMWI)
Return true if and only if the given instruction does not modify the memory location referenced.
Machine Check Debug Module
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
FunctionAnalysisManager FAM
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file contains the declarations for profiling metadata utility functions.
This file defines the SmallString class.
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
void setAlignment(Align Align)
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
An instruction that atomically checks whether a specified value is in a memory location,...
Value * getNewValOperand()
AtomicOrdering getMergedOrdering() const
Returns a single ordering which is at least as strong as both the success and failure orderings for t...
void setWeak(bool IsWeak)
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
Value * getCompareOperand()
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
Value * getPointerOperand()
static AtomicOrdering getStrongestFailureOrdering(AtomicOrdering SuccessOrdering)
Returns the strongest permitted ordering on failure, given the desired ordering on success.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
void setVolatile(bool V)
Specify whether this is a volatile cmpxchg.
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
an instruction that atomically reads a memory location, combines it with another value,...
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMaximumNum
*p = maximumnum(old, v) maximumnum matches the behavior of llvm.maximumnum.
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
@ FMinimumNum
*p = minimumnum(old, v) minimumnum matches the behavior of llvm.minimumnum.
Value * getPointerOperand()
BinOp getOperation() const
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
iterator begin()
Instruction iterator methods.
LLVM_ABI BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="")
Split the basic block into two basic blocks at the specified instruction.
const Function * getParent() const
Return the enclosing method, or null if none.
reverse_iterator rbegin()
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
InstListType::reverse_iterator reverse_iterator
void setAttributes(AttributeList A)
Set the attributes for this call.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getSigned(IntegerType *Ty, int64_t V, bool ImplicitTrunc=false)
Return a ConstantInt with the specified value for the specified type.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
FunctionPass class - This class is used to implement most global optimizations.
BasicBlockListType::iterator iterator
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Common base class shared among various IRBuilders.
AtomicCmpXchgInst * CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New, MaybeAlign Align, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SyncScope::ID SSID=SyncScope::System)
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
LLVM_ABI CallInst * CreateLifetimeStart(Value *Ptr)
Create a lifetime.start intrinsic.
LLVM_ABI CallInst * CreateLifetimeEnd(Value *Ptr)
Create a lifetime.end intrinsic.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
CondBrInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
UnreachableInst * CreateUnreachable()
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
BasicBlock::iterator GetInsertPoint() const
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, const Twine &Name="", MDNode *FPMathTag=nullptr, FMFSource FMFSource={})
BasicBlock * GetInsertBlock() const
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
UncondBrInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
Value * CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name="")
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
void setIsFPConstrained(bool IsCon)
Enable/Disable use of constrained floating point math.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
LLVMContext & getContext() const
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
AtomicRMWInst * CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val, MaybeAlign Align, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
InstSimplifyFolder - Use InstructionSimplify to fold operations to existing values.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void moveAfter(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
This is an important class for using LLVM in a threaded context.
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
LLVM_ABI void getSyncScopeNames(SmallVectorImpl< StringRef > &SSNs) const
getSyncScopeNames - Populates client supplied SmallVector with synchronization scope names registered...
Tracks which library functions to use for a particular subtarget.
LLVM_ABI RTLIB::LibcallImpl getLibcallImpl(RTLIB::Libcall Call) const
Return the lowering's selection of implementation call for Call.
Record a mapping from subtarget to LibcallLoweringInfo.
const LibcallLoweringInfo & getLibcallLowering(const TargetSubtargetInfo &Subtarget) const
An instruction for reading from memory.
Value * getPointerOperand()
bool isVolatile() const
Return true if this is a load from a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
void setVolatile(bool V)
Specify whether this is a volatile load or not.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
A Module instance is used to store all the information related to an LLVM module.
LLVMContext & getContext() const
Get the global data context.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
virtual void getAnalysisUsage(AnalysisUsage &) const
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
void setVolatile(bool V)
Specify whether this is a volatile store or not.
void setAlignment(Align Align)
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
virtual Value * emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const
Perform a store-conditional operation to Addr.
EVT getMemValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
virtual void emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const
Perform a bit test atomicrmw using a target-specific intrinsic.
virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(const AtomicRMWInst *RMW) const
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
virtual bool shouldInsertFencesForAtomic(const Instruction *I) const
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
virtual AtomicOrdering atomicOperationOrderAfterFenceSplit(const Instruction *I) const
virtual void emitExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) const
Perform a cmpxchg expansion using a target-specific method.
unsigned getMinCmpXchgSizeInBits() const
Returns the size of the smallest cmpxchg or ll/sc instruction the backend supports.
virtual Value * emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const
Perform a masked atomicrmw using a target-specific intrinsic.
virtual AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(const AtomicCmpXchgInst *AI) const
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
virtual Value * emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, AtomicOrdering Ord) const
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.
virtual void emitExpandAtomicRMW(AtomicRMWInst *AI) const
Perform a atomicrmw expansion using a target-specific way.
virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const
virtual void emitExpandAtomicStore(StoreInst *SI) const
Perform a atomic store using a target-specific way.
virtual AtomicExpansionKind shouldCastAtomicRMWIInIR(AtomicRMWInst *RMWI) const
Returns how the given atomic atomicrmw should be cast by the IR-level AtomicExpand pass.
virtual bool shouldInsertTrailingSeqCstFenceForAtomicStore(const Instruction *I) const
Whether AtomicExpandPass should automatically insert a seq_cst trailing fence without reducing the or...
virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass.
virtual Value * emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const
Perform a masked cmpxchg using a target-specific intrinsic.
unsigned getMaxAtomicSizeInBitsSupported() const
Returns the maximum atomic operation size (in bits) supported by the backend.
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
virtual void emitExpandAtomicLoad(LoadInst *LI) const
Perform a atomic load using a target-specific way.
virtual AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const
Returns how the given (atomic) store should be expanded by the IR-level AtomicExpand pass into.
virtual void emitCmpArithAtomicRMWIntrinsic(AtomicRMWInst *AI) const
Perform a atomicrmw which the result is only used by comparison, using a target-specific intrinsic.
virtual AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const
Returns how the given (atomic) store should be cast by the IR-level AtomicExpand pass into.
virtual Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
virtual AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const
Returns how the given (atomic) load should be cast by the IR-level AtomicExpand pass.
virtual Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
Inserts in the IR a target-specific intrinsic specifying a fence.
virtual LoadInst * lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const
On some platforms, an AtomicRMW that never actually modifies the value (such as fetch_add of 0) can b...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Primary interface to the complete machine description for the target machine.
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
Target-Independent Code Generator Pass Configuration Options.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
LLVMContext & getContext() const
All values hold a context through their type.
iterator_range< user_iterator > users()
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
LLVM_ABI void setExplicitlyUnknownBranchWeightsIfProfiled(Instruction &I, StringRef PassName, const Function *F=nullptr)
Like setExplicitlyUnknownBranchWeights(...), but only sets unknown branch weights in the new instruct...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
bool isReleaseOrStronger(AtomicOrdering AO)
AtomicOrderingCABI toCABI(AtomicOrdering AO)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
Value * buildAtomicRMWValue(AtomicRMWInst::BinOp Op, IRBuilderBase &Builder, Value *Loaded, Value *Val)
Emit IR to implement the given atomicrmw operation on values in registers, returning the new value.
AtomicOrdering
Atomic ordering for LLVM's memory model.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
FunctionAddr VTableAddr Next
DWARFExpression::Operation Op
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isAcquireOrStronger(AtomicOrdering AO)
constexpr unsigned BitWidth
bool lowerAtomicCmpXchgInst(AtomicCmpXchgInst *CXI)
Convert the given Cmpxchg into primitive load and compare.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool lowerAtomicRMWInst(AtomicRMWInst *RMWI)
Convert the given RMWI into primitive load and stores, assuming that doing so is legal.
PointerUnion< const Value *, const PseudoSourceValue * > ValueType
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI FunctionPass * createAtomicExpandLegacyPass()
AtomicExpandPass - At IR level this pass replace atomic instructions with __atomic_* library calls,...
LLVM_ABI char & AtomicExpandID
AtomicExpandID – Lowers atomic operations in terms of either cmpxchg load-linked/store-conditional lo...
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
TypeSize getStoreSizeInBits() const
Return the number of bits overwritten by a store of the specified value type.
static StringRef getLibcallImplName(RTLIB::LibcallImpl CallImpl)
Get the libcall routine name for the specified libcall implementation.