20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Intrinsics.h"
23 #include "llvm/IR/Operator.h"
25 using namespace clang;
26 using namespace CodeGen;
33 uint64_t AtomicSizeInBits;
34 uint64_t ValueSizeInBits;
44 : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
51 ValueTy = ATy->getValueType();
56 uint64_t ValueAlignInBits;
57 uint64_t AtomicAlignInBits;
58 TypeInfo ValueTI = C.getTypeInfo(ValueTy);
59 ValueSizeInBits = ValueTI.
Width;
60 ValueAlignInBits = ValueTI.
Align;
62 TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
63 AtomicSizeInBits = AtomicTI.
Width;
64 AtomicAlignInBits = AtomicTI.
Align;
66 assert(ValueSizeInBits <= AtomicSizeInBits);
67 assert(ValueAlignInBits <= AtomicAlignInBits);
69 AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
70 ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
77 ValueSizeInBits = C.getTypeSize(ValueTy);
80 AtomicSizeInBits = C.toBits(
81 C.toCharUnitsFromBits(
Offset + OrigBFI.Size + C.getCharWidth() - 1)
85 (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.
getAlignment()) *
87 VoidPtrAddr = CGF.
Builder.CreateConstGEP1_64(
88 VoidPtrAddr, OffsetInChars.getQuantity());
91 CGF.
Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
92 "atomic_bitfield_base");
95 BFI.StorageSize = AtomicSizeInBits;
96 BFI.StorageOffset += OffsetInChars;
101 AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
102 if (AtomicTy.isNull()) {
105 C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
112 ValueSizeInBits = C.getTypeSize(ValueTy);
114 AtomicSizeInBits = C.getTypeSize(AtomicTy);
120 ValueSizeInBits = C.getTypeSize(ValueTy);
124 AtomicSizeInBits = C.getTypeSize(AtomicTy);
128 UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
132 QualType getAtomicType()
const {
return AtomicTy; }
133 QualType getValueType()
const {
return ValueTy; }
134 CharUnits getAtomicAlignment()
const {
return AtomicAlign; }
135 CharUnits getValueAlignment()
const {
return ValueAlign; }
136 uint64_t getAtomicSizeInBits()
const {
return AtomicSizeInBits; }
137 uint64_t getValueSizeInBits()
const {
return ValueSizeInBits; }
139 bool shouldUseLibcall()
const {
return UseLibcall; }
140 const LValue &getAtomicLValue()
const {
return LVal; }
143 return LVal.getPointer();
144 else if (LVal.isBitField())
145 return LVal.getBitFieldPointer();
146 else if (LVal.isVectorElt())
147 return LVal.getVectorPointer();
148 assert(LVal.isExtVectorElt());
149 return LVal.getExtVectorPointer();
151 Address getAtomicAddress()
const {
152 return Address(getAtomicPointer(), getAtomicAlignment());
155 Address getAtomicAddressAsAtomicIntPointer()
const {
156 return emitCastToAtomicIntPointer(getAtomicAddress());
165 bool hasPadding()
const {
166 return (ValueSizeInBits != AtomicSizeInBits);
169 bool emitMemSetZeroIfNecessary()
const;
197 void emitCopyIntoMemory(
RValue rvalue)
const;
200 LValue projectValue()
const {
201 assert(LVal.isSimple());
202 Address addr = getAtomicAddress();
207 LVal.getAlignmentSource(), LVal.getTBAAInfo());
213 bool AsValue, llvm::AtomicOrdering AO,
224 std::pair<RValue, llvm::Value *>
225 EmitAtomicCompareExchange(
RValue Expected,
RValue Desired,
226 llvm::AtomicOrdering Success =
227 llvm::AtomicOrdering::SequentiallyConsistent,
228 llvm::AtomicOrdering Failure =
229 llvm::AtomicOrdering::SequentiallyConsistent,
230 bool IsWeak =
false);
235 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
240 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
RValue UpdateRVal,
247 Address CreateTempAlloca()
const;
253 void EmitAtomicLoadLibcall(
llvm::Value *AddForLoaded,
254 llvm::AtomicOrdering AO,
bool IsVolatile);
256 llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO,
bool IsVolatile);
260 llvm::AtomicOrdering Success =
261 llvm::AtomicOrdering::SequentiallyConsistent,
262 llvm::AtomicOrdering Failure =
263 llvm::AtomicOrdering::SequentiallyConsistent);
265 std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
267 llvm::AtomicOrdering Success =
268 llvm::AtomicOrdering::SequentiallyConsistent,
269 llvm::AtomicOrdering Failure =
270 llvm::AtomicOrdering::SequentiallyConsistent,
271 bool IsWeak =
false);
274 EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
278 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
282 void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
RValue UpdateRVal,
285 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
RValue UpdateRal,
290 Address AtomicInfo::CreateTempAlloca()
const {
292 (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
294 getAtomicAlignment(),
297 if (LVal.isBitField())
299 TempAlloca, getAtomicAddress().getType());
316 uint64_t expectedSize) {
317 return (CGM.
getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
325 if (hasPadding())
return true;
328 switch (getEvaluationKind()) {
335 AtomicSizeInBits / 2);
341 llvm_unreachable(
"bad evaluation kind");
344 bool AtomicInfo::emitMemSetZeroIfNecessary()
const {
345 assert(LVal.isSimple());
347 if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
351 addr, llvm::ConstantInt::get(CGF.
Int8Ty, 0),
353 LVal.getAlignment().getQuantity());
361 llvm::AtomicOrdering SuccessOrder,
362 llvm::AtomicOrdering FailureOrder) {
367 llvm::AtomicCmpXchgInst *Pair = CGF.
Builder.CreateAtomicCmpXchg(
368 Ptr.
getPointer(), Expected, Desired, SuccessOrder, FailureOrder);
370 Pair->setWeak(IsWeak);
379 llvm::BasicBlock *StoreExpectedBB =
384 llvm::BasicBlock *ContinueBB =
389 CGF.
Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
391 CGF.
Builder.SetInsertPoint(StoreExpectedBB);
395 CGF.
Builder.CreateBr(ContinueBB);
397 CGF.
Builder.SetInsertPoint(ContinueBB);
410 llvm::AtomicOrdering SuccessOrder) {
411 llvm::AtomicOrdering FailureOrder;
412 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
413 auto FOS = FO->getSExtValue();
414 if (!llvm::isValidAtomicOrderingCABI(FOS))
415 FailureOrder = llvm::AtomicOrdering::Monotonic;
417 switch ((llvm::AtomicOrderingCABI)FOS) {
418 case llvm::AtomicOrderingCABI::relaxed:
419 case llvm::AtomicOrderingCABI::release:
420 case llvm::AtomicOrderingCABI::acq_rel:
421 FailureOrder = llvm::AtomicOrdering::Monotonic;
423 case llvm::AtomicOrderingCABI::consume:
424 case llvm::AtomicOrderingCABI::acquire:
425 FailureOrder = llvm::AtomicOrdering::Acquire;
427 case llvm::AtomicOrderingCABI::seq_cst:
428 FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
431 if (isStrongerThan(FailureOrder, SuccessOrder)) {
435 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
443 llvm::BasicBlock *MonotonicBB =
nullptr, *AcquireBB =
nullptr,
446 if (SuccessOrder != llvm::AtomicOrdering::Monotonic &&
447 SuccessOrder != llvm::AtomicOrdering::Release)
449 if (SuccessOrder == llvm::AtomicOrdering::SequentiallyConsistent)
454 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
461 CGF.
Builder.SetInsertPoint(MonotonicBB);
463 Size, SuccessOrder, llvm::AtomicOrdering::Monotonic);
467 CGF.
Builder.SetInsertPoint(AcquireBB);
469 Size, SuccessOrder, llvm::AtomicOrdering::Acquire);
471 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::consume),
473 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acquire),
477 CGF.
Builder.SetInsertPoint(SeqCstBB);
479 llvm::AtomicOrdering::SequentiallyConsistent);
481 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::seq_cst),
485 CGF.
Builder.SetInsertPoint(ContBB);
491 uint64_t Size, llvm::AtomicOrdering Order) {
492 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
493 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
495 switch (E->
getOp()) {
496 case AtomicExpr::AO__c11_atomic_init:
497 llvm_unreachable(
"Already handled!");
499 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
501 FailureOrder, Size, Order);
503 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
505 FailureOrder, Size, Order);
507 case AtomicExpr::AO__atomic_compare_exchange:
508 case AtomicExpr::AO__atomic_compare_exchange_n: {
509 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
511 Val1, Val2, FailureOrder, Size, Order);
514 llvm::BasicBlock *StrongBB =
517 llvm::BasicBlock *ContBB =
520 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(IsWeak, WeakBB);
521 SI->addCase(CGF.
Builder.getInt1(
false), StrongBB);
523 CGF.
Builder.SetInsertPoint(StrongBB);
525 FailureOrder, Size, Order);
528 CGF.
Builder.SetInsertPoint(WeakBB);
530 FailureOrder, Size, Order);
533 CGF.
Builder.SetInsertPoint(ContBB);
537 case AtomicExpr::AO__c11_atomic_load:
538 case AtomicExpr::AO__atomic_load_n:
539 case AtomicExpr::AO__atomic_load: {
541 Load->setAtomic(Order);
547 case AtomicExpr::AO__c11_atomic_store:
548 case AtomicExpr::AO__atomic_store:
549 case AtomicExpr::AO__atomic_store_n: {
552 Store->setAtomic(Order);
557 case AtomicExpr::AO__c11_atomic_exchange:
558 case AtomicExpr::AO__atomic_exchange_n:
559 case AtomicExpr::AO__atomic_exchange:
560 Op = llvm::AtomicRMWInst::Xchg;
563 case AtomicExpr::AO__atomic_add_fetch:
564 PostOp = llvm::Instruction::Add;
566 case AtomicExpr::AO__c11_atomic_fetch_add:
567 case AtomicExpr::AO__atomic_fetch_add:
568 Op = llvm::AtomicRMWInst::Add;
571 case AtomicExpr::AO__atomic_sub_fetch:
572 PostOp = llvm::Instruction::Sub;
574 case AtomicExpr::AO__c11_atomic_fetch_sub:
575 case AtomicExpr::AO__atomic_fetch_sub:
576 Op = llvm::AtomicRMWInst::Sub;
579 case AtomicExpr::AO__atomic_and_fetch:
582 case AtomicExpr::AO__c11_atomic_fetch_and:
583 case AtomicExpr::AO__atomic_fetch_and:
587 case AtomicExpr::AO__atomic_or_fetch:
588 PostOp = llvm::Instruction::Or;
590 case AtomicExpr::AO__c11_atomic_fetch_or:
591 case AtomicExpr::AO__atomic_fetch_or:
592 Op = llvm::AtomicRMWInst::Or;
595 case AtomicExpr::AO__atomic_xor_fetch:
596 PostOp = llvm::Instruction::Xor;
598 case AtomicExpr::AO__c11_atomic_fetch_xor:
599 case AtomicExpr::AO__atomic_fetch_xor:
600 Op = llvm::AtomicRMWInst::Xor;
603 case AtomicExpr::AO__atomic_nand_fetch:
606 case AtomicExpr::AO__atomic_fetch_nand:
607 Op = llvm::AtomicRMWInst::Nand;
612 llvm::AtomicRMWInst *RMWI =
620 Result = CGF.
Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
621 if (E->
getOp() == AtomicExpr::AO__atomic_nand_fetch)
622 Result = CGF.
Builder.CreateNot(Result);
640 if (UseOptimizedLibcall) {
647 SizeInBits)->getPointerTo();
665 MemTy = AT->getValueType();
670 bool UseLibcall = (sizeChars != alignChars ||
673 llvm::Value *IsWeak =
nullptr, *OrderFail =
nullptr;
680 if (E->
getOp() == AtomicExpr::AO__c11_atomic_init) {
688 switch (E->
getOp()) {
689 case AtomicExpr::AO__c11_atomic_init:
690 llvm_unreachable(
"Already handled above with EmitAtomicInit!");
692 case AtomicExpr::AO__c11_atomic_load:
693 case AtomicExpr::AO__atomic_load_n:
696 case AtomicExpr::AO__atomic_load:
700 case AtomicExpr::AO__atomic_store:
704 case AtomicExpr::AO__atomic_exchange:
709 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
710 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
711 case AtomicExpr::AO__atomic_compare_exchange_n:
712 case AtomicExpr::AO__atomic_compare_exchange:
714 if (E->
getOp() == AtomicExpr::AO__atomic_compare_exchange)
723 case AtomicExpr::AO__c11_atomic_fetch_add:
724 case AtomicExpr::AO__c11_atomic_fetch_sub:
741 case AtomicExpr::AO__atomic_fetch_add:
742 case AtomicExpr::AO__atomic_fetch_sub:
743 case AtomicExpr::AO__atomic_add_fetch:
744 case AtomicExpr::AO__atomic_sub_fetch:
745 case AtomicExpr::AO__c11_atomic_store:
746 case AtomicExpr::AO__c11_atomic_exchange:
747 case AtomicExpr::AO__atomic_store_n:
748 case AtomicExpr::AO__atomic_exchange_n:
749 case AtomicExpr::AO__c11_atomic_fetch_and:
750 case AtomicExpr::AO__c11_atomic_fetch_or:
751 case AtomicExpr::AO__c11_atomic_fetch_xor:
752 case AtomicExpr::AO__atomic_fetch_and:
753 case AtomicExpr::AO__atomic_fetch_or:
754 case AtomicExpr::AO__atomic_fetch_xor:
755 case AtomicExpr::AO__atomic_fetch_nand:
756 case AtomicExpr::AO__atomic_and_fetch:
757 case AtomicExpr::AO__atomic_or_fetch:
758 case AtomicExpr::AO__atomic_xor_fetch:
759 case AtomicExpr::AO__atomic_nand_fetch:
770 AtomicInfo Atomics(*
this, AtomicVal);
772 Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
773 if (Val1.
isValid()) Val1 = Atomics.convertToAtomicIntPointer(Val1);
774 if (Val2.
isValid()) Val2 = Atomics.convertToAtomicIntPointer(Val2);
776 Dest = Atomics.emitCastToAtomicIntPointer(Dest);
780 Dest = Atomics.emitCastToAtomicIntPointer(Atomics.CreateTempAlloca());
784 bool UseOptimizedLibcall =
false;
785 switch (E->
getOp()) {
786 case AtomicExpr::AO__c11_atomic_init:
787 llvm_unreachable(
"Already handled above with EmitAtomicInit!");
789 case AtomicExpr::AO__c11_atomic_fetch_add:
790 case AtomicExpr::AO__atomic_fetch_add:
791 case AtomicExpr::AO__c11_atomic_fetch_and:
792 case AtomicExpr::AO__atomic_fetch_and:
793 case AtomicExpr::AO__c11_atomic_fetch_or:
794 case AtomicExpr::AO__atomic_fetch_or:
795 case AtomicExpr::AO__atomic_fetch_nand:
796 case AtomicExpr::AO__c11_atomic_fetch_sub:
797 case AtomicExpr::AO__atomic_fetch_sub:
798 case AtomicExpr::AO__c11_atomic_fetch_xor:
799 case AtomicExpr::AO__atomic_fetch_xor:
800 case AtomicExpr::AO__atomic_add_fetch:
801 case AtomicExpr::AO__atomic_and_fetch:
802 case AtomicExpr::AO__atomic_nand_fetch:
803 case AtomicExpr::AO__atomic_or_fetch:
804 case AtomicExpr::AO__atomic_sub_fetch:
805 case AtomicExpr::AO__atomic_xor_fetch:
807 UseOptimizedLibcall =
true;
810 case AtomicExpr::AO__c11_atomic_load:
811 case AtomicExpr::AO__c11_atomic_store:
812 case AtomicExpr::AO__c11_atomic_exchange:
813 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
814 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
815 case AtomicExpr::AO__atomic_load_n:
816 case AtomicExpr::AO__atomic_load:
817 case AtomicExpr::AO__atomic_store_n:
818 case AtomicExpr::AO__atomic_store:
819 case AtomicExpr::AO__atomic_exchange_n:
820 case AtomicExpr::AO__atomic_exchange:
821 case AtomicExpr::AO__atomic_compare_exchange_n:
822 case AtomicExpr::AO__atomic_compare_exchange:
824 if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
825 UseOptimizedLibcall =
true;
830 if (!UseOptimizedLibcall) {
839 std::string LibCallName;
843 bool HaveRetTy =
false;
844 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
845 switch (E->
getOp()) {
846 case AtomicExpr::AO__c11_atomic_init:
847 llvm_unreachable(
"Already handled!");
856 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
857 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
858 case AtomicExpr::AO__atomic_compare_exchange:
859 case AtomicExpr::AO__atomic_compare_exchange_n:
860 LibCallName =
"__atomic_compare_exchange";
873 case AtomicExpr::AO__c11_atomic_exchange:
874 case AtomicExpr::AO__atomic_exchange_n:
875 case AtomicExpr::AO__atomic_exchange:
876 LibCallName =
"__atomic_exchange";
882 case AtomicExpr::AO__c11_atomic_store:
883 case AtomicExpr::AO__atomic_store:
884 case AtomicExpr::AO__atomic_store_n:
885 LibCallName =
"__atomic_store";
893 case AtomicExpr::AO__c11_atomic_load:
894 case AtomicExpr::AO__atomic_load:
895 case AtomicExpr::AO__atomic_load_n:
896 LibCallName =
"__atomic_load";
900 case AtomicExpr::AO__atomic_add_fetch:
901 PostOp = llvm::Instruction::Add;
903 case AtomicExpr::AO__c11_atomic_fetch_add:
904 case AtomicExpr::AO__atomic_fetch_add:
905 LibCallName =
"__atomic_fetch_add";
911 case AtomicExpr::AO__atomic_and_fetch:
914 case AtomicExpr::AO__c11_atomic_fetch_and:
915 case AtomicExpr::AO__atomic_fetch_and:
916 LibCallName =
"__atomic_fetch_and";
922 case AtomicExpr::AO__atomic_or_fetch:
923 PostOp = llvm::Instruction::Or;
925 case AtomicExpr::AO__c11_atomic_fetch_or:
926 case AtomicExpr::AO__atomic_fetch_or:
927 LibCallName =
"__atomic_fetch_or";
933 case AtomicExpr::AO__atomic_sub_fetch:
934 PostOp = llvm::Instruction::Sub;
936 case AtomicExpr::AO__c11_atomic_fetch_sub:
937 case AtomicExpr::AO__atomic_fetch_sub:
938 LibCallName =
"__atomic_fetch_sub";
944 case AtomicExpr::AO__atomic_xor_fetch:
945 PostOp = llvm::Instruction::Xor;
947 case AtomicExpr::AO__c11_atomic_fetch_xor:
948 case AtomicExpr::AO__atomic_fetch_xor:
949 LibCallName =
"__atomic_fetch_xor";
955 case AtomicExpr::AO__atomic_nand_fetch:
958 case AtomicExpr::AO__atomic_fetch_nand:
959 LibCallName =
"__atomic_fetch_nand";
966 if (UseOptimizedLibcall)
967 LibCallName +=
"_" + llvm::utostr(Size);
970 if (UseOptimizedLibcall) {
989 assert(UseOptimizedLibcall || !PostOp);
1001 llvm::Value *LoadVal1 = Args[1].RV.getScalarVal();
1002 ResVal =
Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
1004 if (E->
getOp() == AtomicExpr::AO__atomic_nand_fetch)
1005 ResVal =
Builder.CreateNot(ResVal);
1020 bool IsStore = E->
getOp() == AtomicExpr::AO__c11_atomic_store ||
1021 E->
getOp() == AtomicExpr::AO__atomic_store ||
1022 E->
getOp() == AtomicExpr::AO__atomic_store_n;
1023 bool IsLoad = E->
getOp() == AtomicExpr::AO__c11_atomic_load ||
1024 E->
getOp() == AtomicExpr::AO__atomic_load ||
1025 E->
getOp() == AtomicExpr::AO__atomic_load_n;
1027 if (isa<llvm::ConstantInt>(Order)) {
1028 auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1031 if (llvm::isValidAtomicOrderingCABI(ord))
1032 switch ((llvm::AtomicOrderingCABI)ord) {
1033 case llvm::AtomicOrderingCABI::relaxed:
1034 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1035 llvm::AtomicOrdering::Monotonic);
1037 case llvm::AtomicOrderingCABI::consume:
1038 case llvm::AtomicOrderingCABI::acquire:
1041 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1042 llvm::AtomicOrdering::Acquire);
1044 case llvm::AtomicOrderingCABI::release:
1047 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1048 llvm::AtomicOrdering::Release);
1050 case llvm::AtomicOrderingCABI::acq_rel:
1051 if (IsLoad || IsStore)
1053 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1054 llvm::AtomicOrdering::AcquireRelease);
1056 case llvm::AtomicOrderingCABI::seq_cst:
1057 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1058 llvm::AtomicOrdering::SequentiallyConsistent);
1072 llvm::BasicBlock *MonotonicBB =
nullptr, *AcquireBB =
nullptr,
1073 *ReleaseBB =
nullptr, *AcqRelBB =
nullptr,
1074 *SeqCstBB =
nullptr;
1080 if (!IsLoad && !IsStore)
1089 Order =
Builder.CreateIntCast(Order,
Builder.getInt32Ty(),
false);
1090 llvm::SwitchInst *SI =
Builder.CreateSwitch(Order, MonotonicBB);
1093 Builder.SetInsertPoint(MonotonicBB);
1094 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1095 Size, llvm::AtomicOrdering::Monotonic);
1098 Builder.SetInsertPoint(AcquireBB);
1099 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1100 Size, llvm::AtomicOrdering::Acquire);
1102 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::consume),
1104 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acquire),
1108 Builder.SetInsertPoint(ReleaseBB);
1109 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1110 Size, llvm::AtomicOrdering::Release);
1112 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::release),
1115 if (!IsLoad && !IsStore) {
1116 Builder.SetInsertPoint(AcqRelBB);
1117 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1118 Size, llvm::AtomicOrdering::AcquireRelease);
1120 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acq_rel),
1123 Builder.SetInsertPoint(SeqCstBB);
1124 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1125 Size, llvm::AtomicOrdering::SequentiallyConsistent);
1127 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::seq_cst),
1131 Builder.SetInsertPoint(ContBB);
1135 assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1141 Address AtomicInfo::emitCastToAtomicIntPointer(
Address addr)
const {
1142 unsigned addrspace =
1143 cast<llvm::PointerType>(addr.
getPointer()->getType())->getAddressSpace();
1144 llvm::IntegerType *ty =
1149 Address AtomicInfo::convertToAtomicIntPointer(
Address Addr)
const {
1152 if (SourceSizeInBits != AtomicSizeInBits) {
1153 Address Tmp = CreateTempAlloca();
1155 std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1159 return emitCastToAtomicIntPointer(Addr);
1165 bool asValue)
const {
1166 if (LVal.isSimple()) {
1181 if (LVal.isBitField())
1184 LVal.getAlignmentSource()));
1185 if (LVal.isVectorElt())
1188 LVal.getAlignmentSource()), loc);
1189 assert(LVal.isExtVectorElt());
1191 addr, LVal.getExtVectorElts(), LVal.
getType(),
1192 LVal.getAlignmentSource()));
1198 bool AsValue)
const {
1200 assert(IntVal->getType()->isIntegerTy() &&
"Expected integer value");
1202 (((!LVal.isBitField() ||
1203 LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1206 auto *ValTy = AsValue
1208 : getAtomicAddress().getType()->getPointerElementType();
1209 if (ValTy->isIntegerTy()) {
1210 assert(IntVal->getType() == ValTy &&
"Different integer types.");
1212 }
else if (ValTy->isPointerTy())
1214 else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1221 bool TempIsVolatile =
false;
1227 Temp = CreateTempAlloca();
1231 Address CastTemp = emitCastToAtomicIntPointer(Temp);
1233 ->setVolatile(TempIsVolatile);
1235 return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1238 void AtomicInfo::EmitAtomicLoadLibcall(
llvm::Value *AddForLoaded,
1239 llvm::AtomicOrdering AO,
bool) {
1253 llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1256 Address Addr = getAtomicAddressAsAtomicIntPointer();
1258 Load->setAtomic(AO);
1262 Load->setVolatile(
true);
1263 if (LVal.getTBAAInfo())
1273 AtomicInfo AI(*
this, LV);
1276 bool AtomicIsInline = !AI.shouldUseLibcall();
1281 return IsVolatile && AtomicIsInline;
1286 llvm::AtomicOrdering AO;
1289 AO = llvm::AtomicOrdering::SequentiallyConsistent;
1291 AO = llvm::AtomicOrdering::Acquire;
1298 bool AsValue, llvm::AtomicOrdering AO,
1301 if (shouldUseLibcall()) {
1303 if (LVal.isSimple() && !ResultSlot.
isIgnored()) {
1307 TempAddr = CreateTempAlloca();
1309 EmitAtomicLoadLibcall(TempAddr.
getPointer(), AO, IsVolatile);
1313 return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1317 auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1325 return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1331 llvm::AtomicOrdering AO,
bool IsVolatile,
1333 AtomicInfo Atomics(*
this, src);
1334 return Atomics.EmitAtomicLoad(resultSlot, loc,
true, AO,
1340 void AtomicInfo::emitCopyIntoMemory(
RValue rvalue)
const {
1341 assert(LVal.isSimple());
1350 || LVal.isVolatileQualified()));
1357 emitMemSetZeroIfNecessary();
1360 LValue TempLVal = projectValue();
1373 Address AtomicInfo::materializeRValue(
RValue rvalue)
const {
1381 AtomicInfo Atomics(CGF, TempLV);
1382 Atomics.emitCopyIntoMemory(rvalue);
1389 if (RVal.
isScalar() && (!hasPadding() || !LVal.isSimple())) {
1391 if (isa<llvm::IntegerType>(Value->getType()))
1394 llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1396 LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1397 if (isa<llvm::PointerType>(Value->getType()))
1398 return CGF.
Builder.CreatePtrToInt(Value, InputIntTy);
1399 else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1405 Address Addr = materializeRValue(RVal);
1408 Addr = emitCastToAtomicIntPointer(Addr);
1412 std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1414 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure,
bool IsWeak) {
1416 Address Addr = getAtomicAddressAsAtomicIntPointer();
1418 ExpectedVal, DesiredVal,
1421 Inst->setVolatile(LVal.isVolatileQualified());
1422 Inst->setWeak(IsWeak);
1425 auto *PreviousVal = CGF.
Builder.CreateExtractValue(Inst, 0);
1426 auto *SuccessFailureVal = CGF.
Builder.CreateExtractValue(Inst, 1);
1427 return std::make_pair(PreviousVal, SuccessFailureVal);
1431 AtomicInfo::EmitAtomicCompareExchangeLibcall(
llvm::Value *ExpectedAddr,
1433 llvm::AtomicOrdering Success,
1434 llvm::AtomicOrdering Failure) {
1446 llvm::ConstantInt::get(CGF.
IntTy, (
int)llvm::toCABI(Success))),
1449 llvm::ConstantInt::get(CGF.
IntTy, (
int)llvm::toCABI(Failure))),
1457 std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1458 RValue Expected,
RValue Desired, llvm::AtomicOrdering Success,
1459 llvm::AtomicOrdering Failure,
bool IsWeak) {
1460 if (isStrongerThan(Failure, Success))
1463 Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1466 if (shouldUseLibcall()) {
1468 Address ExpectedAddr = materializeRValue(Expected);
1469 Address DesiredAddr = materializeRValue(Desired);
1470 auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.
getPointer(),
1473 return std::make_pair(
1481 auto *ExpectedVal = convertRValueToInt(Expected);
1482 auto *DesiredVal = convertRValueToInt(Desired);
1483 auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1485 return std::make_pair(
1496 LValue AtomicLVal = Atomics.getAtomicLValue();
1503 Address Ptr = Atomics.materializeRValue(OldRVal);
1535 RValue NewRVal = UpdateOp(UpRVal);
1545 void AtomicInfo::EmitAtomicUpdateLibcall(
1546 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1548 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1550 Address ExpectedAddr = CreateTempAlloca();
1552 EmitAtomicLoadLibcall(ExpectedAddr.
getPointer(), AO, IsVolatile);
1556 Address DesiredAddr = CreateTempAlloca();
1557 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1558 requiresMemSetZero(getAtomicAddress().getElementType())) {
1562 auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1567 EmitAtomicCompareExchangeLibcall(ExpectedAddr.
getPointer(),
1570 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1574 void AtomicInfo::EmitAtomicUpdateOp(
1575 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1577 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1580 auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1584 auto *CurBB = CGF.
Builder.GetInsertBlock();
1586 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
1588 PHI->addIncoming(OldVal, CurBB);
1589 Address NewAtomicAddr = CreateTempAlloca();
1590 Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1591 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1592 requiresMemSetZero(getAtomicAddress().getElementType())) {
1600 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1601 PHI->addIncoming(Res.first, CGF.
Builder.GetInsertBlock());
1602 CGF.
Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1608 LValue AtomicLVal = Atomics.getAtomicLValue();
1633 void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1634 RValue UpdateRVal,
bool IsVolatile) {
1635 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1637 Address ExpectedAddr = CreateTempAlloca();
1639 EmitAtomicLoadLibcall(ExpectedAddr.
getPointer(), AO, IsVolatile);
1643 Address DesiredAddr = CreateTempAlloca();
1644 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1645 requiresMemSetZero(getAtomicAddress().getElementType())) {
1651 EmitAtomicCompareExchangeLibcall(ExpectedAddr.
getPointer(),
1654 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1658 void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
RValue UpdateRVal,
1660 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1663 auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1667 auto *CurBB = CGF.
Builder.GetInsertBlock();
1669 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
1671 PHI->addIncoming(OldVal, CurBB);
1672 Address NewAtomicAddr = CreateTempAlloca();
1673 Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1674 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1675 requiresMemSetZero(getAtomicAddress().getElementType())) {
1681 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1682 PHI->addIncoming(Res.first, CGF.
Builder.GetInsertBlock());
1683 CGF.
Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1687 void AtomicInfo::EmitAtomicUpdate(
1688 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1690 if (shouldUseLibcall()) {
1691 EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1693 EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1697 void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO,
RValue UpdateRVal,
1699 if (shouldUseLibcall()) {
1700 EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1702 EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1709 llvm::AtomicOrdering AO;
1711 AO = llvm::AtomicOrdering::SequentiallyConsistent;
1713 AO = llvm::AtomicOrdering::Release;
1725 llvm::AtomicOrdering AO,
bool IsVolatile,
1733 AtomicInfo atomics(*
this, dest);
1734 LValue LVal = atomics.getAtomicLValue();
1739 atomics.emitCopyIntoMemory(rvalue);
1744 if (atomics.shouldUseLibcall()) {
1746 Address srcAddr = atomics.materializeRValue(rvalue);
1764 llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
1768 atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
1769 intValue =
Builder.CreateIntCast(
1775 store->setAtomic(AO);
1779 store->setVolatile(
true);
1786 atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
1793 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure,
bool IsWeak,
1803 AtomicInfo Atomics(*
this, Obj);
1805 return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
1810 LValue LVal, llvm::AtomicOrdering AO,
1811 const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
bool IsVolatile) {
1812 AtomicInfo Atomics(*
this, LVal);
1813 Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
1817 AtomicInfo atomics(*
this, dest);
1819 switch (atomics.getEvaluationKind()) {
1835 bool Zeroed =
false;
1837 Zeroed = atomics.emitMemSetZeroIfNecessary();
1838 dest = atomics.projectValue();
1853 llvm_unreachable(
"bad evaluation kind");
ReturnValueSlot - Contains the address where the return value of a function can be stored...
Defines the clang::ASTContext interface.
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Emit a compare-and-exchange op for atomic type.
llvm::IntegerType * IntTy
int
A (possibly-)qualified type.
CodeGenTypes & getTypes()
llvm::Type * ConvertTypeForMem(QualType T)
static LValue MakeBitfield(Address Addr, const CGBitFieldInfo &Info, QualType type, AlignmentSource alignSource)
Create a new object to represent a bit-field access.
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, IsZeroed_t isZeroed=IsNotZeroed)
void setAlignment(CharUnits A)
const TargetInfo & getTarget() const
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth, signed/unsigned.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Address getAddress() const
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void setTBAAInfo(llvm::MDNode *N)
const llvm::DataLayout & getDataLayout() const
const void * Store
Store - This opaque type encapsulates an immutable mapping from locations to values.
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
llvm::Type * getElementType() const
Return the type of the values stored in this address.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
bool isVolatileQualified() const
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
QualType getExtVectorType(QualType VectorType, unsigned NumElts) const
Return the unique reference to an extended vector type of the specified element type and size...
llvm::IntegerType * SizeTy
void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref< RValue(RValue)> &UpdateOp, bool IsVolatile)
RValue EmitCall(const CGFunctionInfo &FnInfo, llvm::Value *Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, CGCalleeInfo CalleeInfo=CGCalleeInfo(), llvm::Instruction **callOrInvoke=nullptr)
EmitCall - Generate a call of the given function, expecting the given result type, and using the given argument list which specifies both the LLVM arguments and the types they were derived from.
RValue EmitLoadOfExtVectorElementLValue(LValue V)
CharUnits getAlignment() const
CharUnits - This is an opaque type for sizes expressed in character units.
QualType getIntPtrType() const
Return a type compatible with "intptr_t" (C99 7.18.1.4), as defined by the target.
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal, const llvm::function_ref< RValue(RValue)> &UpdateOp, Address DesiredAddr)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitAtomicInit(Expr *E, LValue lvalue)
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
RValue EmitAtomicExpr(AtomicExpr *E)
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
bool isExtVectorElt() const
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
static LValue MakeExtVectorElt(Address vecAddress, llvm::Constant *Elts, QualType type, AlignmentSource alignSource)
std::pair< CharUnits, CharUnits > getTypeInfoInChars(const Type *T) const
static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource AlignSource=AlignmentSource::Type)
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee...
static unsigned getNumSubExprs(AtomicOp Op)
Determine the number of arguments the specified atomic builtin should have.
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, llvm::MDNode *TBAAInfo, bool ConvertTypeToTag=true)
Decorate the instruction with a TBAA tag.
static TypeEvaluationKind getEvaluationKind(QualType T)
hasAggregateLLVMType - Return true if the specified AST type will map into an aggregate LLVM type or ...
llvm::Value * getPointer() const
Expr - This represents one expression.
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource AlignSource=AlignmentSource::Type, llvm::MDNode *TBAAInfo=nullptr, bool isInit=false, QualType TBAABaseTy=QualType(), uint64_t TBAAOffset=0, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
bool isAtomicType() const
static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx, QualType type, AlignmentSource alignSource)
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation...
static LValue MakeAddr(Address address, QualType type, ASTContext &Context, AlignmentSource alignSource, llvm::MDNode *TBAAInfo=nullptr)
void add(RValue rvalue, QualType type, bool needscopy=false)
char __ovld __cnfn min(char x, char y)
Returns y if y < x, otherwise it returns x.
llvm::LLVMContext & getLLVMContext()
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
Address EmitPointerWithAlignment(const Expr *Addr, AlignmentSource *Source=nullptr)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Represents a GCC generic vector type.
llvm::Value * EmitCastToVoidPtr(llvm::Value *value)
Emit a cast to void* in the appropriate address space.
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *IsWeak, llvm::Value *FailureOrder, uint64_t Size, llvm::AtomicOrdering Order)
static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *FailureOrderVal, uint64_t Size, llvm::AtomicOrdering SuccessOrder)
Given an ordering required on success, emit all possible cmpxchg instructions to cope with the provid...
The result type of a method or function.
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
The l-value was considered opaque, so the alignment was determined from a type.
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
llvm::Constant * CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeSet ExtraAttrs=llvm::AttributeSet())
Create a new runtime function with the specified type and name.
llvm::Value * getBitFieldPointer() const
ASTContext & getContext() const
Encodes a location in the source.
bool LValueIsSuitableForInlineAtomic(LValue Src)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation...
const CGBitFieldInfo & getBitFieldInfo() const
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource AlignSource=AlignmentSource::Type, llvm::MDNode *TBAAInfo=nullptr, QualType TBAABaseTy=QualType(), uint64_t TBAAOffset=0, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
llvm::MDNode * getTBAAInfo() const
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load, __atomic_store, and __atomic_compare_exchange_*, for the similarly-named C++11 instructions, and __c11 variants for <stdatomic.h>.
const CodeGenOptions & getCodeGenOpts() const
Address getExtVectorAddress() const
AlignmentSource getAlignmentSource() const
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type, returning the result.
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
bool isZero() const
isZero - Test whether the quantity equals zero.
Address CreateMemTemp(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignment...
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored...
Address CreateStructGEP(Address Addr, unsigned Index, CharUnits Offset, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
detail::InMemoryDirectory::const_iterator E
void EmitAggregateCopy(Address DestPtr, Address SrcPtr, QualType EltTy, bool isVolatile=false, bool isAssignment=false)
EmitAggregateCopy - Emit an aggregate copy.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
unsigned getMaxAtomicInlineWidth() const
Return the maximum width lock-free atomic operation which can be inlined given the supported features...
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type. ...
llvm::Constant * getExtVectorElts() const
llvm::PointerType * getType() const
Return the type of the pointer value.
static void AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args, bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy, SourceLocation Loc, CharUnits SizeInChars)
const T * getAs() const
Member-template getAs<specific type>'.
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
Address getAddress() const
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type, returning the result.
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::Value * getVectorIdx() const
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
RValue EmitLoadOfBitfieldLValue(LValue LV)
bool hasVolatileMember(QualType T)
hasVolatileMember - returns true if aggregate type has a volatile member.
static RValue get(llvm::Value *V)
static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::AtomicOrdering FailureOrder)
bool isVolatileQualified() const
static RValue emitAtomicLibcall(CodeGenFunction &CGF, StringRef fnName, QualType resultType, CallArgList &args)
static RValue getAggregate(Address addr, bool isVolatile=false)
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
LValue - This represents an lvalue references.
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
CallArgList - Type for representing both the value and type of arguments in a call.
static Address EmitValToTemp(CodeGenFunction &CGF, Expr *E)
Expr * getOrderFail() const
Structure with information about how a bitfield should be accessed.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
bool isPointerType() const
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.