182#include "llvm/IR/IntrinsicsX86.h"
211#define DEBUG_TYPE "msan"
214 "Controls which checks to insert");
217 "Controls which instruction to instrument");
235 "msan-track-origins",
240 cl::desc(
"keep going after reporting a UMR"),
249 "msan-poison-stack-with-call",
254 "msan-poison-stack-pattern",
255 cl::desc(
"poison uninitialized stack variables with the given pattern"),
260 cl::desc(
"Print name of local stack variable"),
269 cl::desc(
"propagate shadow through ICmpEQ and ICmpNE"),
274 cl::desc(
"exact handling of relational integer ICmp"),
278 "msan-handle-lifetime-intrinsics",
280 "when possible, poison scoped variables at the beginning of the scope "
281 "(slower, but more precise)"),
292 "msan-handle-asm-conservative",
303 "msan-check-access-address",
304 cl::desc(
"report accesses through a pointer which has poisoned shadow"),
309 cl::desc(
"check arguments and return values at function call boundaries"),
313 "msan-dump-strict-instructions",
314 cl::desc(
"print out instructions with default strict semantics"),
318 "msan-instrumentation-with-call-threshold",
320 "If the function being instrumented requires more than "
321 "this number of checks and origin stores, use callbacks instead of "
322 "inline checks (-1 means never use callbacks)."),
327 cl::desc(
"Enable KernelMemorySanitizer instrumentation"),
337 cl::desc(
"Insert checks for constant shadow values"),
344 cl::desc(
"Place MSan constructors in comdat sections"),
350 cl::desc(
"Define custom MSan AndMask"),
354 cl::desc(
"Define custom MSan XorMask"),
358 cl::desc(
"Define custom MSan ShadowBase"),
362 cl::desc(
"Define custom MSan OriginBase"),
367 cl::desc(
"Define threshold for number of checks per "
368 "debug location to force origin update."),
380struct MemoryMapParams {
387struct PlatformMemoryMapParams {
388 const MemoryMapParams *bits32;
389 const MemoryMapParams *bits64;
535class MemorySanitizer {
544 MemorySanitizer(MemorySanitizer &&) =
delete;
545 MemorySanitizer &operator=(MemorySanitizer &&) =
delete;
546 MemorySanitizer(
const MemorySanitizer &) =
delete;
547 MemorySanitizer &operator=(
const MemorySanitizer &) =
delete;
552 friend struct MemorySanitizerVisitor;
553 friend struct VarArgHelperBase;
554 friend struct VarArgAMD64Helper;
555 friend struct VarArgMIPS64Helper;
556 friend struct VarArgAArch64Helper;
557 friend struct VarArgPowerPC64Helper;
558 friend struct VarArgSystemZHelper;
560 void initializeModule(
Module &M);
565 template <
typename... ArgsTy>
592 Value *ParamOriginTLS;
598 Value *RetvalOriginTLS;
604 Value *VAArgOriginTLS;
607 Value *VAArgOverflowSizeTLS;
610 bool CallbacksInitialized =
false;
655 Value *MsanMetadataAlloca;
661 const MemoryMapParams *MapParams;
665 MemoryMapParams CustomMapParams;
670 MDNode *OriginStoreWeights;
673void insertModuleCtor(
Module &M) {
701 Recover(getOptOrDefault(
ClKeepGoing, Kernel || R)),
716 MemorySanitizer Msan(*
F.getParent(),
Options);
735 OS, MapClassName2PassName);
742 OS <<
"eager-checks;";
743 OS <<
"track-origins=" <<
Options.TrackOrigins;
759template <
typename... ArgsTy>
767 std::forward<ArgsTy>(Args)...);
770 return M.getOrInsertFunction(
Name, MsanMetadata,
771 std::forward<ArgsTy>(Args)...);
780 RetvalOriginTLS =
nullptr;
782 ParamOriginTLS =
nullptr;
784 VAArgOriginTLS =
nullptr;
785 VAArgOverflowSizeTLS =
nullptr;
787 WarningFn =
M.getOrInsertFunction(
"__msan_warning",
789 IRB.getVoidTy(), IRB.getInt32Ty());
800 MsanGetContextStateFn =
M.getOrInsertFunction(
806 for (
int ind = 0, size = 1; ind < 4; ind++,
size <<= 1) {
807 std::string name_load =
808 "__msan_metadata_ptr_for_load_" + std::to_string(size);
809 std::string name_store =
810 "__msan_metadata_ptr_for_store_" + std::to_string(size);
811 MsanMetadataPtrForLoad_1_8[ind] = getOrInsertMsanMetadataFunction(
813 MsanMetadataPtrForStore_1_8[ind] = getOrInsertMsanMetadataFunction(
817 MsanMetadataPtrForLoadN = getOrInsertMsanMetadataFunction(
820 MsanMetadataPtrForStoreN = getOrInsertMsanMetadataFunction(
821 M,
"__msan_metadata_ptr_for_store_n",
825 MsanPoisonAllocaFn =
M.getOrInsertFunction(
826 "__msan_poison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
827 MsanUnpoisonAllocaFn =
M.getOrInsertFunction(
828 "__msan_unpoison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy);
832 return M.getOrInsertGlobal(
Name, Ty, [&] {
834 nullptr,
Name,
nullptr,
847 StringRef WarningFnName = Recover ?
"__msan_warning_with_origin"
848 :
"__msan_warning_with_origin_noreturn";
849 WarningFn =
M.getOrInsertFunction(WarningFnName,
851 IRB.getVoidTy(), IRB.getInt32Ty());
854 Recover ?
"__msan_warning" :
"__msan_warning_noreturn";
855 WarningFn =
M.getOrInsertFunction(WarningFnName, IRB.getVoidTy());
881 VAArgOverflowSizeTLS =
886 unsigned AccessSize = 1 << AccessSizeIndex;
887 std::string FunctionName =
"__msan_maybe_warning_" + itostr(AccessSize);
888 MaybeWarningFn[AccessSizeIndex] =
M.getOrInsertFunction(
890 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt32Ty());
892 FunctionName =
"__msan_maybe_store_origin_" + itostr(AccessSize);
893 MaybeStoreOriginFn[AccessSizeIndex] =
M.getOrInsertFunction(
895 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), PtrTy,
899 MsanSetAllocaOriginWithDescriptionFn =
900 M.getOrInsertFunction(
"__msan_set_alloca_origin_with_descr",
901 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy, PtrTy);
902 MsanSetAllocaOriginNoDescriptionFn =
903 M.getOrInsertFunction(
"__msan_set_alloca_origin_no_descr",
904 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
905 MsanPoisonStackFn =
M.getOrInsertFunction(
"__msan_poison_stack",
906 IRB.getVoidTy(), PtrTy, IntptrTy);
912 if (CallbacksInitialized)
918 MsanChainOriginFn =
M.getOrInsertFunction(
919 "__msan_chain_origin",
922 MsanSetOriginFn =
M.getOrInsertFunction(
924 IRB.getVoidTy(), PtrTy, IntptrTy, IRB.getInt32Ty());
926 M.getOrInsertFunction(
"__msan_memmove", PtrTy, PtrTy, PtrTy, IntptrTy);
928 M.getOrInsertFunction(
"__msan_memcpy", PtrTy, PtrTy, PtrTy, IntptrTy);
929 MemsetFn =
M.getOrInsertFunction(
"__msan_memset",
931 PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
933 MsanInstrumentAsmStoreFn =
934 M.getOrInsertFunction(
"__msan_instrument_asm_store", IRB.getVoidTy(),
938 createKernelApi(M, TLI);
940 createUserspaceApi(M, TLI);
942 CallbacksInitialized =
true;
948 isStore ? MsanMetadataPtrForStore_1_8 : MsanMetadataPtrForLoad_1_8;
966void MemorySanitizer::initializeModule(
Module &M) {
967 auto &
DL =
M.getDataLayout();
969 TargetTriple =
Triple(
M.getTargetTriple());
971 bool ShadowPassed =
ClShadowBase.getNumOccurrences() > 0;
972 bool OriginPassed =
ClOriginBase.getNumOccurrences() > 0;
974 if (ShadowPassed || OriginPassed) {
979 MapParams = &CustomMapParams;
981 switch (TargetTriple.getOS()) {
983 switch (TargetTriple.getArch()) {
998 switch (TargetTriple.getArch()) {
1007 switch (TargetTriple.getArch()) {
1041 C = &(
M.getContext());
1043 IntptrTy = IRB.getIntPtrTy(
DL);
1044 OriginTy = IRB.getInt32Ty();
1045 PtrTy = IRB.getPtrTy();
1050 if (!CompileKernel) {
1052 M.getOrInsertGlobal(
"__msan_track_origins", IRB.getInt32Ty(), [&] {
1053 return new GlobalVariable(
1054 M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
1055 IRB.getInt32(TrackOrigins),
"__msan_track_origins");
1059 M.getOrInsertGlobal(
"__msan_keep_going", IRB.getInt32Ty(), [&] {
1060 return new GlobalVariable(M, IRB.getInt32Ty(), true,
1061 GlobalValue::WeakODRLinkage,
1062 IRB.getInt32(Recover),
"__msan_keep_going");
1077struct VarArgHelper {
1078 virtual ~VarArgHelper() =
default;
1093 virtual void finalizeInstrumentation() = 0;
1096struct MemorySanitizerVisitor;
1101 MemorySanitizerVisitor &Visitor);
1108 if (TypeSizeFixed <= 8)
1117class NextNodeIRBuilder :
public IRBuilder<> {
1130struct MemorySanitizerVisitor :
public InstVisitor<MemorySanitizerVisitor> {
1132 MemorySanitizer &MS;
1135 std::unique_ptr<VarArgHelper> VAHelper;
1142 bool PropagateShadow;
1146 struct ShadowOriginAndInsertPoint {
1152 : Shadow(S), Origin(
O), OrigIns(
I) {}
1160 int64_t SplittableBlocksCount = 0;
1162 MemorySanitizerVisitor(
Function &
F, MemorySanitizer &MS,
1165 bool SanitizeFunction =
1167 InsertChecks = SanitizeFunction;
1168 PropagateShadow = SanitizeFunction;
1178 MS.initializeCallbacks(*
F.getParent(), TLI);
1179 FnPrologueEnd =
IRBuilder<>(
F.getEntryBlock().getFirstNonPHI())
1182 if (MS.CompileKernel) {
1184 insertKmsanPrologue(IRB);
1188 <<
"MemorySanitizer is not inserting checks into '"
1189 <<
F.getName() <<
"'\n");
1192 bool instrumentWithCalls(
Value *V) {
1194 if (isa<Constant>(V))
1197 ++SplittableBlocksCount;
1203 return I.getParent() == FnPrologueEnd->
getParent() &&
1204 (&
I == FnPrologueEnd ||
I.comesBefore(FnPrologueEnd));
1212 if (MS.TrackOrigins <= 1)
1214 return IRB.
CreateCall(MS.MsanChainOriginFn, V);
1219 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1231 const Align IntptrAlignment =
DL.getABITypeAlign(MS.IntptrTy);
1232 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1242 auto [InsertPt,
Index] =
1254 Align CurrentAlignment = Alignment;
1255 if (Alignment >= IntptrAlignment && IntptrSize >
kOriginSize) {
1256 Value *IntptrOrigin = originToIntptr(IRB, Origin);
1257 Value *IntptrOriginPtr =
1259 for (
unsigned i = 0; i <
Size / IntptrSize; ++i) {
1264 CurrentAlignment = IntptrAlignment;
1281 Value *ConvertedShadow = convertShadowToScalar(Shadow, IRB);
1282 if (
auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
1290 paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
1299 if (instrumentWithCalls(ConvertedShadow) &&
1302 Value *ConvertedShadow2 =
1308 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1312 paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), OriginPtr, StoreSize,
1317 void materializeStores() {
1320 Value *Val =
SI->getValueOperand();
1322 Value *Shadow =
SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
1323 Value *ShadowPtr, *OriginPtr;
1325 const Align Alignment =
SI->getAlign();
1327 std::tie(ShadowPtr, OriginPtr) =
1328 getShadowOriginPtr(
Addr, IRB, ShadowTy, Alignment,
true);
1337 if (MS.TrackOrigins && !
SI->isAtomic())
1338 storeOrigin(IRB,
Addr, Shadow, getOrigin(Val), OriginPtr,
1345 if (MS.TrackOrigins < 2)
1348 if (LazyWarningDebugLocationCount.
empty())
1349 for (
const auto &
I : InstrumentationList)
1350 ++LazyWarningDebugLocationCount[
I.OrigIns->getDebugLoc()];
1364 if (
Instruction *OI = dyn_cast_or_null<Instruction>(Origin)) {
1366 auto NewDebugLoc = OI->getDebugLoc();
1373 IRBOrigin.SetCurrentDebugLocation(NewDebugLoc);
1374 Origin = updateOrigin(Origin, IRBOrigin);
1379 if (MS.CompileKernel || MS.TrackOrigins)
1393 if (instrumentWithCalls(ConvertedShadow) &&
1396 Value *ConvertedShadow2 =
1399 Fn, {ConvertedShadow2,
1400 MS.TrackOrigins && Origin ? Origin : (
Value *)IRB.
getInt32(0)});
1404 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1407 !MS.Recover, MS.ColdCallWeights);
1410 insertWarningFn(IRB, Origin);
1415 void materializeInstructionChecks(
1420 bool Combine = !MS.TrackOrigins;
1422 Value *Shadow =
nullptr;
1423 for (
const auto &ShadowData : InstructionChecks) {
1427 Value *ConvertedShadow = ShadowData.Shadow;
1429 if (
auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
1436 insertWarningFn(IRB, ShadowData.Origin);
1446 materializeOneCheck(IRB, ConvertedShadow, ShadowData.Origin);
1451 Shadow = ConvertedShadow;
1455 Shadow = convertToBool(Shadow, IRB,
"_mscmp");
1456 ConvertedShadow = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1457 Shadow = IRB.
CreateOr(Shadow, ConvertedShadow,
"_msor");
1463 materializeOneCheck(IRB, Shadow,
nullptr);
1467 void materializeChecks() {
1473 for (
auto I = InstrumentationList.begin();
1474 I != InstrumentationList.end();) {
1475 auto OrigIns =
I->OrigIns;
1479 auto J = std::find_if(
I + 1, InstrumentationList.end(),
1480 [OrigIns](
const ShadowOriginAndInsertPoint &R) {
1481 return OrigIns != R.OrigIns;
1495 MS.ParamTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1496 {Zero, IRB.getInt32(0)},
"param_shadow");
1497 MS.RetvalTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1498 {Zero, IRB.getInt32(1)},
"retval_shadow");
1499 MS.VAArgTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1500 {Zero, IRB.getInt32(2)},
"va_arg_shadow");
1501 MS.VAArgOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1502 {Zero, IRB.getInt32(3)},
"va_arg_origin");
1503 MS.VAArgOverflowSizeTLS =
1504 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1505 {Zero, IRB.getInt32(4)},
"va_arg_overflow_size");
1506 MS.ParamOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1507 {Zero, IRB.getInt32(5)},
"param_origin");
1508 MS.RetvalOriginTLS =
1509 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1510 {Zero, IRB.getInt32(6)},
"retval_origin");
1512 MS.MsanMetadataAlloca = IRB.
CreateAlloca(MS.MsanMetadata, 0u);
1524 for (
PHINode *PN : ShadowPHINodes) {
1525 PHINode *PNS = cast<PHINode>(getShadow(PN));
1526 PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr;
1527 size_t NumValues = PN->getNumIncomingValues();
1528 for (
size_t v = 0;
v < NumValues;
v++) {
1529 PNS->
addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
1531 PNO->
addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
1535 VAHelper->finalizeInstrumentation();
1539 if (InstrumentLifetimeStart) {
1540 for (
auto Item : LifetimeStartList) {
1541 instrumentAlloca(*Item.second, Item.first);
1542 AllocaSet.
remove(Item.second);
1548 instrumentAlloca(*AI);
1551 materializeChecks();
1555 materializeStores();
1561 Type *getShadowTy(
Value *V) {
return getShadowTy(
V->getType()); }
1573 if (
VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
1574 uint32_t EltSize =
DL.getTypeSizeInBits(VT->getElementType());
1576 VT->getElementCount());
1578 if (
ArrayType *AT = dyn_cast<ArrayType>(OrigTy)) {
1579 return ArrayType::get(getShadowTy(AT->getElementType()),
1580 AT->getNumElements());
1582 if (
StructType *ST = dyn_cast<StructType>(OrigTy)) {
1584 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
1585 Elements.push_back(getShadowTy(
ST->getElementType(i)));
1587 LLVM_DEBUG(
dbgs() <<
"getShadowTy: " << *ST <<
" ===> " << *Res <<
"\n");
1603 Value *ShadowBool = convertToBool(ShadowItem, IRB);
1605 if (Aggregator != FalseVal)
1606 Aggregator = IRB.
CreateOr(Aggregator, ShadowBool);
1608 Aggregator = ShadowBool;
1617 if (!
Array->getNumElements())
1621 Value *Aggregator = convertShadowToScalar(FirstItem, IRB);
1625 Value *ShadowInner = convertShadowToScalar(ShadowItem, IRB);
1626 Aggregator = IRB.
CreateOr(Aggregator, ShadowInner);
1636 return collapseStructShadow(
Struct, V, IRB);
1637 if (
ArrayType *Array = dyn_cast<ArrayType>(
V->getType()))
1638 return collapseArrayShadow(Array, V, IRB);
1639 if (isa<VectorType>(
V->getType())) {
1640 if (isa<ScalableVectorType>(
V->getType()))
1643 V->getType()->getPrimitiveSizeInBits().getFixedValue();
1651 Type *VTy =
V->getType();
1653 return convertToBool(convertShadowToScalar(V, IRB), IRB,
name);
1660 Type *ptrToIntPtrType(
Type *PtrTy)
const {
1661 if (
VectorType *VectTy = dyn_cast<VectorType>(PtrTy)) {
1662 return VectorType::get(ptrToIntPtrType(VectTy->getElementType()),
1663 VectTy->getElementCount());
1669 Type *getPtrToShadowPtrType(
Type *IntPtrTy,
Type *ShadowTy)
const {
1670 if (
VectorType *VectTy = dyn_cast<VectorType>(IntPtrTy)) {
1671 return VectorType::get(
1672 getPtrToShadowPtrType(VectTy->getElementType(), ShadowTy),
1673 VectTy->getElementCount());
1675 assert(IntPtrTy == MS.IntptrTy);
1676 return PointerType::get(*MS.C, 0);
1680 if (
VectorType *VectTy = dyn_cast<VectorType>(IntPtrTy)) {
1682 VectTy->getElementCount(), constToIntPtr(VectTy->getElementType(),
C));
1684 assert(IntPtrTy == MS.IntptrTy);
1685 return ConstantInt::get(MS.IntptrTy,
C);
1696 Type *IntptrTy = ptrToIntPtrType(
Addr->getType());
1699 if (
uint64_t AndMask = MS.MapParams->AndMask)
1700 OffsetLong = IRB.
CreateAnd(OffsetLong, constToIntPtr(IntptrTy, ~AndMask));
1702 if (
uint64_t XorMask = MS.MapParams->XorMask)
1703 OffsetLong = IRB.
CreateXor(OffsetLong, constToIntPtr(IntptrTy, XorMask));
1715 std::pair<Value *, Value *>
1722 assert(VectTy->getElementType()->isPointerTy());
1724 Type *IntptrTy = ptrToIntPtrType(
Addr->getType());
1725 Value *ShadowOffset = getShadowPtrOffset(
Addr, IRB);
1726 Value *ShadowLong = ShadowOffset;
1727 if (
uint64_t ShadowBase = MS.MapParams->ShadowBase) {
1729 IRB.
CreateAdd(ShadowLong, constToIntPtr(IntptrTy, ShadowBase));
1732 ShadowLong, getPtrToShadowPtrType(IntptrTy, ShadowTy));
1734 Value *OriginPtr =
nullptr;
1735 if (MS.TrackOrigins) {
1736 Value *OriginLong = ShadowOffset;
1737 uint64_t OriginBase = MS.MapParams->OriginBase;
1738 if (OriginBase != 0)
1740 IRB.
CreateAdd(OriginLong, constToIntPtr(IntptrTy, OriginBase));
1743 OriginLong = IRB.
CreateAnd(OriginLong, constToIntPtr(IntptrTy, ~Mask));
1746 OriginLong, getPtrToShadowPtrType(IntptrTy, MS.OriginTy));
1748 return std::make_pair(ShadowPtr, OriginPtr);
1751 template <
typename... ArgsTy>
1756 {MS.MsanMetadataAlloca, std::forward<ArgsTy>(Args)...});
1757 return IRB.
CreateLoad(MS.MsanMetadata, MS.MsanMetadataAlloca);
1760 return IRB.
CreateCall(Callee, {std::forward<ArgsTy>(Args)...});
1763 std::pair<Value *, Value *> getShadowOriginPtrKernelNoVec(
Value *
Addr,
1767 Value *ShadowOriginPtrs;
1775 ShadowOriginPtrs = createMetadataCall(IRB, Getter, AddrCast);
1777 Value *SizeVal = ConstantInt::get(MS.IntptrTy,
Size);
1778 ShadowOriginPtrs = createMetadataCall(
1780 isStore ? MS.MsanMetadataPtrForStoreN : MS.MsanMetadataPtrForLoadN,
1787 return std::make_pair(ShadowPtr, OriginPtr);
1793 std::pair<Value *, Value *> getShadowOriginPtrKernel(
Value *
Addr,
1800 return getShadowOriginPtrKernelNoVec(
Addr, IRB, ShadowTy,
isStore);
1804 unsigned NumElements = cast<FixedVectorType>(VectTy)->getNumElements();
1805 Value *ShadowPtrs = ConstantInt::getNullValue(
1807 Value *OriginPtrs =
nullptr;
1808 if (MS.TrackOrigins)
1809 OriginPtrs = ConstantInt::getNullValue(
1811 for (
unsigned i = 0; i < NumElements; ++i) {
1814 auto [ShadowPtr, OriginPtr] =
1815 getShadowOriginPtrKernelNoVec(OneAddr, IRB, ShadowTy,
isStore);
1818 ShadowPtrs, ShadowPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1819 if (MS.TrackOrigins)
1821 OriginPtrs, OriginPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1823 return {ShadowPtrs, OriginPtrs};
1830 if (MS.CompileKernel)
1831 return getShadowOriginPtrKernel(
Addr, IRB, ShadowTy,
isStore);
1832 return getShadowOriginPtrUserspace(
Addr, IRB, ShadowTy, Alignment);
1847 if (!MS.TrackOrigins)
1861 Value *getOriginPtrForRetval() {
1863 return MS.RetvalOriginTLS;
1868 assert(!ShadowMap.
count(V) &&
"Values may only have one shadow");
1869 ShadowMap[
V] = PropagateShadow ? SV : getCleanShadow(V);
1874 if (!MS.TrackOrigins)
1876 assert(!OriginMap.
count(V) &&
"Values may only have one origin");
1877 LLVM_DEBUG(
dbgs() <<
"ORIGIN: " << *V <<
" ==> " << *Origin <<
"\n");
1878 OriginMap[
V] = Origin;
1882 Type *ShadowTy = getShadowTy(OrigTy);
1892 Constant *getCleanShadow(
Value *V) {
return getCleanShadow(
V->getType()); }
1897 if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
1899 if (
ArrayType *AT = dyn_cast<ArrayType>(ShadowTy)) {
1901 getPoisonedShadow(AT->getElementType()));
1904 if (
StructType *ST = dyn_cast<StructType>(ShadowTy)) {
1906 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
1907 Vals.
push_back(getPoisonedShadow(
ST->getElementType(i)));
1915 Type *ShadowTy = getShadowTy(V);
1918 return getPoisonedShadow(ShadowTy);
1930 if (!PropagateShadow ||
I->getMetadata(LLVMContext::MD_nosanitize))
1931 return getCleanShadow(V);
1933 Value *Shadow = ShadowMap[
V];
1935 LLVM_DEBUG(
dbgs() <<
"No shadow: " << *V <<
"\n" << *(
I->getParent()));
1937 assert(Shadow &&
"No shadow for a value");
1941 if (
UndefValue *U = dyn_cast<UndefValue>(V)) {
1942 Value *
AllOnes = (PropagateShadow && PoisonUndef) ? getPoisonedShadow(V)
1943 : getCleanShadow(V);
1948 if (
Argument *
A = dyn_cast<Argument>(V)) {
1950 Value *&ShadowPtr = ShadowMap[
V];
1955 unsigned ArgOffset = 0;
1957 for (
auto &FArg :
F->args()) {
1958 if (!FArg.getType()->isSized()) {
1963 unsigned Size = FArg.hasByValAttr()
1964 ?
DL.getTypeAllocSize(FArg.getParamByValType())
1965 :
DL.getTypeAllocSize(FArg.getType());
1969 if (FArg.hasByValAttr()) {
1973 const Align ArgAlign =
DL.getValueOrABITypeAlignment(
1974 FArg.getParamAlign(), FArg.getParamByValType());
1975 Value *CpShadowPtr, *CpOriginPtr;
1976 std::tie(CpShadowPtr, CpOriginPtr) =
1977 getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign,
1979 if (!PropagateShadow || Overflow) {
1981 EntryIRB.CreateMemSet(
1985 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
1987 Value *Cpy = EntryIRB.CreateMemCpy(CpShadowPtr, CopyAlign,
Base,
1992 if (MS.TrackOrigins) {
1994 getOriginPtrForArgument(EntryIRB, ArgOffset);
1998 EntryIRB.CreateMemCpy(
2007 if (!PropagateShadow || Overflow || FArg.hasByValAttr() ||
2008 (MS.EagerChecks && FArg.hasAttribute(Attribute::NoUndef))) {
2009 ShadowPtr = getCleanShadow(V);
2010 setOrigin(
A, getCleanOrigin());
2013 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2014 ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg),
Base,
2016 if (MS.TrackOrigins) {
2018 getOriginPtrForArgument(EntryIRB, ArgOffset);
2019 setOrigin(
A, EntryIRB.CreateLoad(MS.OriginTy, OriginPtr));
2023 <<
" ARG: " << FArg <<
" ==> " << *ShadowPtr <<
"\n");
2029 assert(ShadowPtr &&
"Could not find shadow for an argument");
2033 return getCleanShadow(V);
2038 return getShadow(
I->getOperand(i));
2043 if (!MS.TrackOrigins)
2045 if (!PropagateShadow || isa<Constant>(V) || isa<InlineAsm>(V))
2046 return getCleanOrigin();
2047 assert((isa<Instruction>(V) || isa<Argument>(V)) &&
2048 "Unexpected value type in getOrigin()");
2050 if (
I->getMetadata(LLVMContext::MD_nosanitize))
2051 return getCleanOrigin();
2053 Value *Origin = OriginMap[
V];
2054 assert(Origin &&
"Missing origin");
2060 return getOrigin(
I->getOperand(i));
2073 LLVM_DEBUG(
dbgs() <<
"Skipping check of " << *Shadow <<
" before "
2074 << *OrigIns <<
"\n");
2079 assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy) ||
2080 isa<StructType>(ShadowTy) || isa<ArrayType>(ShadowTy)) &&
2081 "Can only insert checks for integer, vector, and aggregate shadow "
2084 InstrumentationList.push_back(
2085 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
2094 Value *Shadow, *Origin;
2096 Shadow = getShadow(Val);
2099 Origin = getOrigin(Val);
2101 Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
2104 Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
2106 insertShadowCheck(Shadow, Origin, OrigIns);
2111 case AtomicOrdering::NotAtomic:
2112 return AtomicOrdering::NotAtomic;
2113 case AtomicOrdering::Unordered:
2114 case AtomicOrdering::Monotonic:
2115 case AtomicOrdering::Release:
2116 return AtomicOrdering::Release;
2117 case AtomicOrdering::Acquire:
2118 case AtomicOrdering::AcquireRelease:
2119 return AtomicOrdering::AcquireRelease;
2120 case AtomicOrdering::SequentiallyConsistent:
2121 return AtomicOrdering::SequentiallyConsistent;
2127 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2128 uint32_t OrderingTable[NumOrderings] = {};
2130 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2131 OrderingTable[(
int)AtomicOrderingCABI::release] =
2132 (int)AtomicOrderingCABI::release;
2133 OrderingTable[(int)AtomicOrderingCABI::consume] =
2134 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2135 OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
2136 (
int)AtomicOrderingCABI::acq_rel;
2137 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2138 (
int)AtomicOrderingCABI::seq_cst;
2145 case AtomicOrdering::NotAtomic:
2146 return AtomicOrdering::NotAtomic;
2147 case AtomicOrdering::Unordered:
2148 case AtomicOrdering::Monotonic:
2149 case AtomicOrdering::Acquire:
2150 return AtomicOrdering::Acquire;
2151 case AtomicOrdering::Release:
2152 case AtomicOrdering::AcquireRelease:
2153 return AtomicOrdering::AcquireRelease;
2154 case AtomicOrdering::SequentiallyConsistent:
2155 return AtomicOrdering::SequentiallyConsistent;
2161 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2162 uint32_t OrderingTable[NumOrderings] = {};
2164 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2165 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2166 OrderingTable[(int)AtomicOrderingCABI::consume] =
2167 (
int)AtomicOrderingCABI::acquire;
2168 OrderingTable[(int)AtomicOrderingCABI::release] =
2169 OrderingTable[(
int)AtomicOrderingCABI::acq_rel] =
2170 (int)AtomicOrderingCABI::acq_rel;
2171 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2172 (
int)AtomicOrderingCABI::seq_cst;
2180 if (
I.getMetadata(LLVMContext::MD_nosanitize))
2183 if (isInPrologue(
I))
2188 setShadow(&
I, getCleanShadow(&
I));
2189 setOrigin(&
I, getCleanOrigin());
2200 assert(
I.getType()->isSized() &&
"Load type must have size");
2201 assert(!
I.getMetadata(LLVMContext::MD_nosanitize));
2202 NextNodeIRBuilder IRB(&
I);
2203 Type *ShadowTy = getShadowTy(&
I);
2205 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
2206 const Align Alignment =
I.getAlign();
2207 if (PropagateShadow) {
2208 std::tie(ShadowPtr, OriginPtr) =
2209 getShadowOriginPtr(
Addr, IRB, ShadowTy, Alignment,
false);
2213 setShadow(&
I, getCleanShadow(&
I));
2217 insertShadowCheck(
I.getPointerOperand(), &
I);
2222 if (MS.TrackOrigins) {
2223 if (PropagateShadow) {
2228 setOrigin(&
I, getCleanOrigin());
2238 StoreList.push_back(&
I);
2240 insertShadowCheck(
I.getPointerOperand(), &
I);
2244 assert(isa<AtomicRMWInst>(
I) || isa<AtomicCmpXchgInst>(
I));
2248 Value *Val =
I.getOperand(1);
2249 Value *ShadowPtr = getShadowOriginPtr(
Addr, IRB, getShadowTy(Val),
Align(1),
2254 insertShadowCheck(
Addr, &
I);
2259 if (isa<AtomicCmpXchgInst>(
I))
2260 insertShadowCheck(Val, &
I);
2264 setShadow(&
I, getCleanShadow(&
I));
2265 setOrigin(&
I, getCleanOrigin());
2280 insertShadowCheck(
I.getOperand(1), &
I);
2284 setOrigin(&
I, getOrigin(&
I, 0));
2288 insertShadowCheck(
I.getOperand(2), &
I);
2290 auto *Shadow0 = getShadow(&
I, 0);
2291 auto *Shadow1 = getShadow(&
I, 1);
2294 setOriginForNaryOp(
I);
2299 auto *Shadow0 = getShadow(&
I, 0);
2300 auto *Shadow1 = getShadow(&
I, 1);
2303 setOriginForNaryOp(
I);
2309 setShadow(&
I, IRB.
CreateSExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2310 setOrigin(&
I, getOrigin(&
I, 0));
2315 setShadow(&
I, IRB.
CreateZExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2316 setOrigin(&
I, getOrigin(&
I, 0));
2321 setShadow(&
I, IRB.
CreateTrunc(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2322 setOrigin(&
I, getOrigin(&
I, 0));
2329 if (
auto *CI = dyn_cast<CallInst>(
I.getOperand(0)))
2330 if (CI->isMustTailCall())
2334 setOrigin(&
I, getOrigin(&
I, 0));
2340 "_msprop_ptrtoint"));
2341 setOrigin(&
I, getOrigin(&
I, 0));
2347 "_msprop_inttoptr"));
2348 setOrigin(&
I, getOrigin(&
I, 0));
2351 void visitFPToSIInst(
CastInst &
I) { handleShadowOr(
I); }
2352 void visitFPToUIInst(
CastInst &
I) { handleShadowOr(
I); }
2353 void visitSIToFPInst(
CastInst &
I) { handleShadowOr(
I); }
2354 void visitUIToFPInst(
CastInst &
I) { handleShadowOr(
I); }
2355 void visitFPExtInst(
CastInst &
I) { handleShadowOr(
I); }
2356 void visitFPTruncInst(
CastInst &
I) { handleShadowOr(
I); }
2371 Value *S2 = getShadow(&
I, 1);
2372 Value *V1 =
I.getOperand(0);
2381 setShadow(&
I, IRB.
CreateOr({S1S2, V1S2, S1V2}));
2382 setOriginForNaryOp(
I);
2393 Value *S2 = getShadow(&
I, 1);
2403 setShadow(&
I, IRB.
CreateOr({S1S2, V1S2, S1V2}));
2404 setOriginForNaryOp(
I);
2422 template <
bool CombineShadow>
class Combiner {
2423 Value *Shadow =
nullptr;
2424 Value *Origin =
nullptr;
2426 MemorySanitizerVisitor *MSV;
2430 : IRB(IRB), MSV(MSV) {}
2434 if (CombineShadow) {
2439 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
2440 Shadow = IRB.
CreateOr(Shadow, OpShadow,
"_msprop");
2444 if (MSV->MS.TrackOrigins) {
2449 Constant *ConstOrigin = dyn_cast<Constant>(OpOrigin);
2451 if (!ConstOrigin || !ConstOrigin->
isNullValue()) {
2452 Value *
Cond = MSV->convertToBool(OpShadow, IRB);
2462 Value *OpShadow = MSV->getShadow(V);
2463 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) :
nullptr;
2464 return Add(OpShadow, OpOrigin);
2470 if (CombineShadow) {
2472 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(
I));
2473 MSV->setShadow(
I, Shadow);
2475 if (MSV->MS.TrackOrigins) {
2477 MSV->setOrigin(
I, Origin);
2487 if (!MS.TrackOrigins)
2490 OriginCombiner
OC(
this, IRB);
2491 for (
Use &
Op :
I.operands())
2496 size_t VectorOrPrimitiveTypeSizeInBits(
Type *Ty) {
2498 "Vector of pointers is not a valid shadow type");
2499 return Ty->
isVectorTy() ? cast<FixedVectorType>(Ty)->getNumElements() *
2508 Type *srcTy =
V->getType();
2509 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
2510 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
2511 if (srcSizeInBits > 1 && dstSizeInBits == 1)
2517 cast<VectorType>(dstTy)->getElementCount() ==
2518 cast<VectorType>(srcTy)->getElementCount())
2529 Type *ShadowTy = getShadowTy(V);
2530 if (
V->getType() == ShadowTy)
2532 if (
V->getType()->isPtrOrPtrVectorTy())
2541 ShadowAndOriginCombiner
SC(
this, IRB);
2542 for (
Use &
Op :
I.operands())
2562 if (
auto *VTy = dyn_cast<VectorType>(Ty)) {
2563 unsigned NumElements = cast<FixedVectorType>(VTy)->getNumElements();
2564 Type *EltTy = VTy->getElementType();
2566 for (
unsigned Idx = 0;
Idx < NumElements; ++
Idx) {
2569 const APInt &
V = Elt->getValue();
2571 Elements.push_back(ConstantInt::get(EltTy, V2));
2573 Elements.push_back(ConstantInt::get(EltTy, 1));
2578 if (
ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg)) {
2579 const APInt &
V = Elt->getValue();
2581 ShadowMul = ConstantInt::get(Ty, V2);
2583 ShadowMul = ConstantInt::get(Ty, 1);
2589 IRB.
CreateMul(getShadow(OtherArg), ShadowMul,
"msprop_mul_cst"));
2590 setOrigin(&
I, getOrigin(OtherArg));
2594 Constant *constOp0 = dyn_cast<Constant>(
I.getOperand(0));
2595 Constant *constOp1 = dyn_cast<Constant>(
I.getOperand(1));
2596 if (constOp0 && !constOp1)
2597 handleMulByConstant(
I, constOp0,
I.getOperand(1));
2598 else if (constOp1 && !constOp0)
2599 handleMulByConstant(
I, constOp1,
I.getOperand(0));
2614 insertShadowCheck(
I.getOperand(1), &
I);
2615 setShadow(&
I, getShadow(&
I, 0));
2616 setOrigin(&
I, getOrigin(&
I, 0));
2633 void handleEqualityComparison(
ICmpInst &
I) {
2637 Value *Sa = getShadow(
A);
2638 Value *Sb = getShadow(
B);
2664 setOriginForNaryOp(
I);
2706 void handleRelationalComparisonExact(
ICmpInst &
I) {
2710 Value *Sa = getShadow(
A);
2711 Value *Sb = getShadow(
B);
2722 bool IsSigned =
I.isSigned();
2724 getLowestPossibleValue(IRB,
A, Sa, IsSigned),
2725 getHighestPossibleValue(IRB,
B, Sb, IsSigned));
2727 getHighestPossibleValue(IRB,
A, Sa, IsSigned),
2728 getLowestPossibleValue(IRB,
B, Sb, IsSigned));
2731 setOriginForNaryOp(
I);
2738 void handleSignedRelationalComparison(
ICmpInst &
I) {
2742 if ((constOp = dyn_cast<Constant>(
I.getOperand(1)))) {
2743 op =
I.getOperand(0);
2744 pre =
I.getPredicate();
2745 }
else if ((constOp = dyn_cast<Constant>(
I.getOperand(0)))) {
2746 op =
I.getOperand(1);
2747 pre =
I.getSwappedPredicate();
2760 setShadow(&
I, Shadow);
2761 setOrigin(&
I, getOrigin(
op));
2772 if (
I.isEquality()) {
2773 handleEqualityComparison(
I);
2779 handleRelationalComparisonExact(
I);
2783 handleSignedRelationalComparison(
I);
2788 if ((isa<Constant>(
I.getOperand(0)) || isa<Constant>(
I.getOperand(1)))) {
2789 handleRelationalComparisonExact(
I);
2796 void visitFCmpInst(
FCmpInst &
I) { handleShadowOr(
I); }
2803 Value *S2 = getShadow(&
I, 1);
2808 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
2809 setOriginForNaryOp(
I);
2820 Value *S0 = getShadow(&
I, 0);
2822 Value *S2 = getShadow(&
I, 2);
2827 I.getModule(),
I.getIntrinsicID(), S2Conv->
getType());
2829 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
2830 setOriginForNaryOp(
I);
2844 getShadow(
I.getArgOperand(1));
2847 {I.getArgOperand(0), I.getArgOperand(1),
2848 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2849 I.eraseFromParent();
2867 getShadow(
I.getArgOperand(1));
2870 {I.getArgOperand(0), I.getArgOperand(1),
2871 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2872 I.eraseFromParent();
2880 {I.getArgOperand(0),
2881 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
2882 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2883 I.eraseFromParent();
2886 void visitVAStartInst(
VAStartInst &
I) { VAHelper->visitVAStartInst(
I); }
2888 void visitVACopyInst(
VACopyInst &
I) { VAHelper->visitVACopyInst(
I); }
2897 Value *Shadow = getShadow(&
I, 1);
2898 Value *ShadowPtr, *OriginPtr;
2902 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
2907 insertShadowCheck(
Addr, &
I);
2910 if (MS.TrackOrigins)
2923 Type *ShadowTy = getShadowTy(&
I);
2924 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
2925 if (PropagateShadow) {
2929 std::tie(ShadowPtr, OriginPtr) =
2930 getShadowOriginPtr(
Addr, IRB, ShadowTy, Alignment,
false);
2934 setShadow(&
I, getCleanShadow(&
I));
2938 insertShadowCheck(
Addr, &
I);
2940 if (MS.TrackOrigins) {
2941 if (PropagateShadow)
2942 setOrigin(&
I, IRB.
CreateLoad(MS.OriginTy, OriginPtr));
2944 setOrigin(&
I, getCleanOrigin());
2957 if (!(
RetTy->isIntOrIntVectorTy() ||
RetTy->isFPOrFPVectorTy() ||
2958 RetTy->isX86_MMXTy()))
2961 unsigned NumArgOperands =
I.arg_size();
2962 for (
unsigned i = 0; i < NumArgOperands; ++i) {
2963 Type *Ty =
I.getArgOperand(i)->getType();
2969 ShadowAndOriginCombiner
SC(
this, IRB);
2970 for (
unsigned i = 0; i < NumArgOperands; ++i)
2971 SC.Add(
I.getArgOperand(i));
2988 unsigned NumArgOperands =
I.arg_size();
2989 if (NumArgOperands == 0)
2992 if (NumArgOperands == 2 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
2993 I.getArgOperand(1)->getType()->isVectorTy() &&
2994 I.getType()->isVoidTy() && !
I.onlyReadsMemory()) {
2996 return handleVectorStoreIntrinsic(
I);
2999 if (NumArgOperands == 1 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
3000 I.getType()->isVectorTy() &&
I.onlyReadsMemory()) {
3002 return handleVectorLoadIntrinsic(
I);
3005 if (
I.doesNotAccessMemory())
3006 if (maybeHandleSimpleNomemIntrinsic(
I))
3014 setShadow(&
I, getShadow(&
I, 0));
3015 setOrigin(&
I, getOrigin(&
I, 0));
3023 InstrumentLifetimeStart =
false;
3024 LifetimeStartList.push_back(std::make_pair(&
I, AI));
3030 Type *OpType =
Op->getType();
3032 F.getParent(), Intrinsic::bswap,
ArrayRef(&OpType, 1));
3034 setOrigin(&
I, getOrigin(
Op));
3039 Value *Src =
I.getArgOperand(0);
3045 Constant *IsZeroPoison = cast<Constant>(
I.getOperand(1));
3048 BoolShadow = IRB.
CreateOr(BoolShadow, BoolZeroPoison,
"_mscz_bs");
3051 Value *OutputShadow =
3052 IRB.
CreateSExt(BoolShadow, getShadowTy(Src),
"_mscz_os");
3054 setShadow(&
I, OutputShadow);
3055 setOriginForNaryOp(
I);
3073 void handleVectorConvertIntrinsic(
IntrinsicInst &
I,
int NumUsedElements,
3074 bool HasRoundingMode =
false) {
3076 Value *CopyOp, *ConvertOp;
3078 assert((!HasRoundingMode ||
3079 isa<ConstantInt>(
I.getArgOperand(
I.arg_size() - 1))) &&
3080 "Invalid rounding mode");
3082 switch (
I.arg_size() - HasRoundingMode) {
3084 CopyOp =
I.getArgOperand(0);
3085 ConvertOp =
I.getArgOperand(1);
3088 ConvertOp =
I.getArgOperand(0);
3102 Value *ConvertShadow = getShadow(ConvertOp);
3103 Value *AggShadow =
nullptr;
3106 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), 0));
3107 for (
int i = 1; i < NumUsedElements; ++i) {
3109 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), i));
3110 AggShadow = IRB.
CreateOr(AggShadow, MoreShadow);
3113 AggShadow = ConvertShadow;
3116 insertShadowCheck(AggShadow, getOrigin(ConvertOp), &
I);
3123 Value *ResultShadow = getShadow(CopyOp);
3124 Type *EltTy = cast<VectorType>(ResultShadow->
getType())->getElementType();
3125 for (
int i = 0; i < NumUsedElements; ++i) {
3127 ResultShadow, ConstantInt::getNullValue(EltTy),
3130 setShadow(&
I, ResultShadow);
3131 setOrigin(&
I, getOrigin(CopyOp));
3133 setShadow(&
I, getCleanShadow(&
I));
3134 setOrigin(&
I, getCleanOrigin());
3142 S = CreateShadowCast(IRB, S, IRB.
getInt64Ty(),
true);
3145 return CreateShadowCast(IRB, S2,
T,
true);
3153 return CreateShadowCast(IRB, S2,
T,
true);
3170 void handleVectorShiftIntrinsic(
IntrinsicInst &
I,
bool Variable) {
3176 Value *S2 = getShadow(&
I, 1);
3177 Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2)
3178 : Lower64ShadowExtend(IRB, S2, getShadowTy(&
I));
3179 Value *V1 =
I.getOperand(0);
3182 {IRB.CreateBitCast(S1, V1->getType()), V2});
3184 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3185 setOriginForNaryOp(
I);
3189 Type *getMMXVectorTy(
unsigned EltSizeInBits) {
3190 const unsigned X86_MMXSizeInBits = 64;
3191 assert(EltSizeInBits != 0 && (X86_MMXSizeInBits % EltSizeInBits) == 0 &&
3192 "Illegal MMX vector element size");
3194 X86_MMXSizeInBits / EltSizeInBits);
3201 case Intrinsic::x86_sse2_packsswb_128:
3202 case Intrinsic::x86_sse2_packuswb_128:
3203 return Intrinsic::x86_sse2_packsswb_128;
3205 case Intrinsic::x86_sse2_packssdw_128:
3206 case Intrinsic::x86_sse41_packusdw:
3207 return Intrinsic::x86_sse2_packssdw_128;
3209 case Intrinsic::x86_avx2_packsswb:
3210 case Intrinsic::x86_avx2_packuswb:
3211 return Intrinsic::x86_avx2_packsswb;
3213 case Intrinsic::x86_avx2_packssdw:
3214 case Intrinsic::x86_avx2_packusdw:
3215 return Intrinsic::x86_avx2_packssdw;
3217 case Intrinsic::x86_mmx_packsswb:
3218 case Intrinsic::x86_mmx_packuswb:
3219 return Intrinsic::x86_mmx_packsswb;
3221 case Intrinsic::x86_mmx_packssdw:
3222 return Intrinsic::x86_mmx_packssdw;
3235 void handleVectorPackIntrinsic(
IntrinsicInst &
I,
unsigned EltSizeInBits = 0) {
3237 bool isX86_MMX =
I.getOperand(0)->getType()->isX86_MMXTy();
3240 Value *S2 = getShadow(&
I, 1);
3241 assert(isX86_MMX ||
S1->getType()->isVectorTy());
3246 Type *
T = isX86_MMX ? getMMXVectorTy(EltSizeInBits) :
S1->
getType();
3262 F.getParent(), getSignedPackIntrinsic(
I.getIntrinsicID()));
3265 IRB.
CreateCall(ShadowFn, {S1_ext, S2_ext},
"_msprop_vector_pack");
3269 setOriginForNaryOp(
I);
3274 const unsigned SignificantBitsPerResultElement = 16;
3275 bool isX86_MMX =
I.getOperand(0)->getType()->isX86_MMXTy();
3277 unsigned ZeroBitsPerResultElement =
3281 auto *Shadow0 = getShadow(&
I, 0);
3282 auto *Shadow1 = getShadow(&
I, 1);
3287 S = IRB.
CreateLShr(S, ZeroBitsPerResultElement);
3290 setOriginForNaryOp(
I);
3295 unsigned EltSizeInBits = 0) {
3296 bool isX86_MMX =
I.getOperand(0)->getType()->isX86_MMXTy();
3297 Type *ResTy = isX86_MMX ? getMMXVectorTy(EltSizeInBits * 2) :
I.
getType();
3299 auto *Shadow0 = getShadow(&
I, 0);
3300 auto *Shadow1 = getShadow(&
I, 1);
3307 setOriginForNaryOp(
I);
3315 Type *ResTy = getShadowTy(&
I);
3316 auto *Shadow0 = getShadow(&
I, 0);
3317 auto *Shadow1 = getShadow(&
I, 1);
3322 setOriginForNaryOp(
I);
3330 auto *Shadow0 = getShadow(&
I, 0);
3331 auto *Shadow1 = getShadow(&
I, 1);
3333 Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&
I));
3335 setOriginForNaryOp(
I);
3344 setOrigin(&
I, getOrigin(&
I, 0));
3352 Value *OperandShadow = getShadow(&
I, 0);
3354 Value *OperandUnsetOrPoison = IRB.
CreateOr(OperandUnsetBits, OperandShadow);
3362 setOrigin(&
I, getOrigin(&
I, 0));
3370 Value *OperandShadow = getShadow(&
I, 0);
3371 Value *OperandSetOrPoison = IRB.
CreateOr(
I.getOperand(0), OperandShadow);
3379 setOrigin(&
I, getOrigin(&
I, 0));
3387 getShadowOriginPtr(
Addr, IRB, Ty,
Align(1),
true).first;
3392 insertShadowCheck(
Addr, &
I);
3403 Value *ShadowPtr, *OriginPtr;
3404 std::tie(ShadowPtr, OriginPtr) =
3405 getShadowOriginPtr(
Addr, IRB, Ty, Alignment,
false);
3408 insertShadowCheck(
Addr, &
I);
3411 Value *Origin = MS.TrackOrigins ? IRB.
CreateLoad(MS.OriginTy, OriginPtr)
3413 insertShadowCheck(Shadow, Origin, &
I);
3420 Value *PassThru =
I.getArgOperand(2);
3423 insertShadowCheck(
Ptr, &
I);
3424 insertShadowCheck(Mask, &
I);
3427 if (!PropagateShadow) {
3428 setShadow(&
I, getCleanShadow(&
I));
3429 setOrigin(&
I, getCleanOrigin());
3433 Type *ShadowTy = getShadowTy(&
I);
3434 Type *ElementShadowTy = cast<VectorType>(ShadowTy)->getElementType();
3435 auto [ShadowPtr, OriginPtr] =
3436 getShadowOriginPtr(
Ptr, IRB, ElementShadowTy, {},
false);
3439 ShadowTy, ShadowPtr, Mask, getShadow(PassThru),
"_msmaskedexpload");
3441 setShadow(&
I, Shadow);
3444 setOrigin(&
I, getCleanOrigin());
3449 Value *Values =
I.getArgOperand(0);
3454 insertShadowCheck(
Ptr, &
I);
3455 insertShadowCheck(Mask, &
I);
3458 Value *Shadow = getShadow(Values);
3459 Type *ElementShadowTy =
3460 getShadowTy(cast<VectorType>(Values->
getType())->getElementType());
3461 auto [ShadowPtr, OriginPtrs] =
3462 getShadowOriginPtr(
Ptr, IRB, ElementShadowTy, {},
true);
3471 Value *Ptrs =
I.getArgOperand(0);
3472 const Align Alignment(
3473 cast<ConstantInt>(
I.getArgOperand(1))->getZExtValue());
3475 Value *PassThru =
I.getArgOperand(3);
3477 Type *PtrsShadowTy = getShadowTy(Ptrs);
3479 insertShadowCheck(Mask, &
I);
3483 insertShadowCheck(MaskedPtrShadow, getOrigin(Ptrs), &
I);
3486 if (!PropagateShadow) {
3487 setShadow(&
I, getCleanShadow(&
I));
3488 setOrigin(&
I, getCleanOrigin());
3492 Type *ShadowTy = getShadowTy(&
I);
3493 Type *ElementShadowTy = cast<VectorType>(ShadowTy)->getElementType();
3494 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
3495 Ptrs, IRB, ElementShadowTy, Alignment,
false);
3499 getShadow(PassThru),
"_msmaskedgather");
3501 setShadow(&
I, Shadow);
3504 setOrigin(&
I, getCleanOrigin());
3509 Value *Values =
I.getArgOperand(0);
3510 Value *Ptrs =
I.getArgOperand(1);
3511 const Align Alignment(
3512 cast<ConstantInt>(
I.getArgOperand(2))->getZExtValue());
3515 Type *PtrsShadowTy = getShadowTy(Ptrs);
3517 insertShadowCheck(Mask, &
I);
3521 insertShadowCheck(MaskedPtrShadow, getOrigin(Ptrs), &
I);
3524 Value *Shadow = getShadow(Values);
3525 Type *ElementShadowTy =
3526 getShadowTy(cast<VectorType>(Values->
getType())->getElementType());
3527 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
3528 Ptrs, IRB, ElementShadowTy, Alignment,
true);
3537 Value *
V =
I.getArgOperand(0);
3539 const Align Alignment(
3540 cast<ConstantInt>(
I.getArgOperand(2))->getZExtValue());
3542 Value *Shadow = getShadow(V);
3545 insertShadowCheck(
Ptr, &
I);
3546 insertShadowCheck(Mask, &
I);
3551 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
3552 Ptr, IRB, Shadow->
getType(), Alignment,
true);
3556 if (!MS.TrackOrigins)
3559 auto &
DL =
F.getParent()->getDataLayout();
3560 paintOrigin(IRB, getOrigin(V), OriginPtr,
3568 const Align Alignment(
3569 cast<ConstantInt>(
I.getArgOperand(1))->getZExtValue());
3571 Value *PassThru =
I.getArgOperand(3);
3574 insertShadowCheck(
Ptr, &
I);
3575 insertShadowCheck(Mask, &
I);
3578 if (!PropagateShadow) {
3579 setShadow(&
I, getCleanShadow(&
I));
3580 setOrigin(&
I, getCleanOrigin());
3584 Type *ShadowTy = getShadowTy(&
I);
3585 Value *ShadowPtr, *OriginPtr;
3586 std::tie(ShadowPtr, OriginPtr) =
3587 getShadowOriginPtr(
Ptr, IRB, ShadowTy, Alignment,
false);
3589 getShadow(PassThru),
"_msmaskedld"));
3591 if (!MS.TrackOrigins)
3598 Value *NotNull = convertToBool(MaskedPassThruShadow, IRB,
"_mscmp");
3603 setOrigin(&
I, Origin);
3613 Type *ShadowTy = getShadowTy(&
I);
3616 Value *SMask = getShadow(&
I, 1);
3621 {getShadow(&I, 0), I.getOperand(1)});
3624 setOriginForNaryOp(
I);
3629 for (
unsigned X = OddElements ? 1 : 0;
X < Width;
X += 2) {
3646 cast<FixedVectorType>(
I.getArgOperand(0)->getType())->getNumElements();
3647 assert(isa<ConstantInt>(
I.getArgOperand(2)) &&
3648 "pclmul 3rd operand must be a constant");
3649 unsigned Imm = cast<ConstantInt>(
I.getArgOperand(2))->getZExtValue();
3651 getPclmulMask(Width, Imm & 0x01));
3653 getPclmulMask(Width, Imm & 0x10));
3654 ShadowAndOriginCombiner SOC(
this, IRB);
3655 SOC.Add(Shuf0, getOrigin(&
I, 0));
3656 SOC.Add(Shuf1, getOrigin(&
I, 1));
3664 cast<FixedVectorType>(
I.getArgOperand(0)->getType())->getNumElements();
3666 Value *Second = getShadow(&
I, 1);
3669 Mask.push_back(Width);
3670 for (
unsigned i = 1; i < Width; i++)
3674 setShadow(&
I, Shadow);
3675 setOriginForNaryOp(
I);
3680 Value *Shadow0 = getShadow(&
I, 0);
3681 Value *Shadow1 = getShadow(&
I, 1);
3687 setShadow(&
I, Shadow);
3688 setOriginForNaryOp(
I);
3694 cast<FixedVectorType>(
I.getArgOperand(0)->getType())->getNumElements();
3696 Value *Second = getShadow(&
I, 1);
3700 Mask.push_back(Width);
3701 for (
unsigned i = 1; i < Width; i++)
3705 setShadow(&
I, Shadow);
3706 setOriginForNaryOp(
I);
3713 assert(
I.getType()->isIntOrIntVectorTy());
3714 assert(
I.getArgOperand(0)->getType() ==
I.getType());
3718 setShadow(&
I, getShadow(&
I, 0));
3719 setOrigin(&
I, getOrigin(&
I, 0));
3724 Value *Shadow = getShadow(&
I, 0);
3725 setShadow(&
I, IRB.
CreateICmpNE(Shadow, getCleanShadow(Shadow)));
3726 setOrigin(&
I, getOrigin(&
I, 0));
3731 Value *Shadow0 = getShadow(&
I, 0);
3732 Value *Shadow1 = getShadow(&
I, 1);
3735 IRB.
CreateICmpNE(ShadowElt0, getCleanShadow(ShadowElt0));
3741 setShadow(&
I, Shadow);
3742 setOriginForNaryOp(
I);
3746 switch (
I.getIntrinsicID()) {
3747 case Intrinsic::uadd_with_overflow:
3748 case Intrinsic::sadd_with_overflow:
3749 case Intrinsic::usub_with_overflow:
3750 case Intrinsic::ssub_with_overflow:
3751 case Intrinsic::umul_with_overflow:
3752 case Intrinsic::smul_with_overflow:
3753 handleArithmeticWithOverflow(
I);
3755 case Intrinsic::abs:
3756 handleAbsIntrinsic(
I);
3758 case Intrinsic::is_fpclass:
3761 case Intrinsic::lifetime_start:
3762 handleLifetimeStart(
I);
3764 case Intrinsic::launder_invariant_group:
3765 case Intrinsic::strip_invariant_group:
3766 handleInvariantGroup(
I);
3768 case Intrinsic::bswap:
3771 case Intrinsic::ctlz:
3772 case Intrinsic::cttz:
3773 handleCountZeroes(
I);
3775 case Intrinsic::masked_compressstore:
3776 handleMaskedCompressStore(
I);
3778 case Intrinsic::masked_expandload:
3779 handleMaskedExpandLoad(
I);
3781 case Intrinsic::masked_gather:
3782 handleMaskedGather(
I);
3784 case Intrinsic::masked_scatter:
3785 handleMaskedScatter(
I);
3787 case Intrinsic::masked_store:
3788 handleMaskedStore(
I);
3790 case Intrinsic::masked_load:
3791 handleMaskedLoad(
I);
3793 case Intrinsic::vector_reduce_and:
3794 handleVectorReduceAndIntrinsic(
I);
3796 case Intrinsic::vector_reduce_or:
3797 handleVectorReduceOrIntrinsic(
I);
3799 case Intrinsic::vector_reduce_add:
3800 case Intrinsic::vector_reduce_xor:
3801 case Intrinsic::vector_reduce_mul:
3802 handleVectorReduceIntrinsic(
I);
3804 case Intrinsic::x86_sse_stmxcsr:
3807 case Intrinsic::x86_sse_ldmxcsr:
3810 case Intrinsic::x86_avx512_vcvtsd2usi64:
3811 case Intrinsic::x86_avx512_vcvtsd2usi32:
3812 case Intrinsic::x86_avx512_vcvtss2usi64:
3813 case Intrinsic::x86_avx512_vcvtss2usi32:
3814 case Intrinsic::x86_avx512_cvttss2usi64:
3815 case Intrinsic::x86_avx512_cvttss2usi:
3816 case Intrinsic::x86_avx512_cvttsd2usi64:
3817 case Intrinsic::x86_avx512_cvttsd2usi:
3818 case Intrinsic::x86_avx512_cvtusi2ss:
3819 case Intrinsic::x86_avx512_cvtusi642sd:
3820 case Intrinsic::x86_avx512_cvtusi642ss:
3821 handleVectorConvertIntrinsic(
I, 1,
true);
3823 case Intrinsic::x86_sse2_cvtsd2si64:
3824 case Intrinsic::x86_sse2_cvtsd2si:
3825 case Intrinsic::x86_sse2_cvtsd2ss:
3826 case Intrinsic::x86_sse2_cvttsd2si64:
3827 case Intrinsic::x86_sse2_cvttsd2si:
3828 case Intrinsic::x86_sse_cvtss2si64:
3829 case Intrinsic::x86_sse_cvtss2si:
3830 case Intrinsic::x86_sse_cvttss2si64:
3831 case Intrinsic::x86_sse_cvttss2si:
3832 handleVectorConvertIntrinsic(
I, 1);
3834 case Intrinsic::x86_sse_cvtps2pi:
3835 case Intrinsic::x86_sse_cvttps2pi:
3836 handleVectorConvertIntrinsic(
I, 2);
3839 case Intrinsic::x86_avx512_psll_w_512:
3840 case Intrinsic::x86_avx512_psll_d_512:
3841 case Intrinsic::x86_avx512_psll_q_512:
3842 case Intrinsic::x86_avx512_pslli_w_512:
3843 case Intrinsic::x86_avx512_pslli_d_512:
3844 case Intrinsic::x86_avx512_pslli_q_512:
3845 case Intrinsic::x86_avx512_psrl_w_512:
3846 case Intrinsic::x86_avx512_psrl_d_512:
3847 case Intrinsic::x86_avx512_psrl_q_512:
3848 case Intrinsic::x86_avx512_psra_w_512:
3849 case Intrinsic::x86_avx512_psra_d_512:
3850 case Intrinsic::x86_avx512_psra_q_512:
3851 case Intrinsic::x86_avx512_psrli_w_512:
3852 case Intrinsic::x86_avx512_psrli_d_512:
3853 case Intrinsic::x86_avx512_psrli_q_512:
3854 case Intrinsic::x86_avx512_psrai_w_512:
3855 case Intrinsic::x86_avx512_psrai_d_512:
3856 case Intrinsic::x86_avx512_psrai_q_512:
3857 case Intrinsic::x86_avx512_psra_q_256:
3858 case Intrinsic::x86_avx512_psra_q_128:
3859 case Intrinsic::x86_avx512_psrai_q_256:
3860 case Intrinsic::x86_avx512_psrai_q_128:
3861 case Intrinsic::x86_avx2_psll_w:
3862 case Intrinsic::x86_avx2_psll_d:
3863 case Intrinsic::x86_avx2_psll_q:
3864 case Intrinsic::x86_avx2_pslli_w:
3865 case Intrinsic::x86_avx2_pslli_d:
3866 case Intrinsic::x86_avx2_pslli_q:
3867 case Intrinsic::x86_avx2_psrl_w:
3868 case Intrinsic::x86_avx2_psrl_d:
3869 case Intrinsic::x86_avx2_psrl_q:
3870 case Intrinsic::x86_avx2_psra_w:
3871 case Intrinsic::x86_avx2_psra_d:
3872 case Intrinsic::x86_avx2_psrli_w:
3873 case Intrinsic::x86_avx2_psrli_d:
3874 case Intrinsic::x86_avx2_psrli_q:
3875 case Intrinsic::x86_avx2_psrai_w:
3876 case Intrinsic::x86_avx2_psrai_d:
3877 case Intrinsic::x86_sse2_psll_w:
3878 case Intrinsic::x86_sse2_psll_d:
3879 case Intrinsic::x86_sse2_psll_q:
3880 case Intrinsic::x86_sse2_pslli_w:
3881 case Intrinsic::x86_sse2_pslli_d:
3882 case Intrinsic::x86_sse2_pslli_q:
3883 case Intrinsic::x86_sse2_psrl_w:
3884 case Intrinsic::x86_sse2_psrl_d:
3885 case Intrinsic::x86_sse2_psrl_q:
3886 case Intrinsic::x86_sse2_psra_w:
3887 case Intrinsic::x86_sse2_psra_d:
3888 case Intrinsic::x86_sse2_psrli_w:
3889 case Intrinsic::x86_sse2_psrli_d:
3890 case Intrinsic::x86_sse2_psrli_q:
3891 case Intrinsic::x86_sse2_psrai_w:
3892 case Intrinsic::x86_sse2_psrai_d:
3893 case Intrinsic::x86_mmx_psll_w:
3894 case Intrinsic::x86_mmx_psll_d:
3895 case Intrinsic::x86_mmx_psll_q:
3896 case Intrinsic::x86_mmx_pslli_w:
3897 case Intrinsic::x86_mmx_pslli_d:
3898 case Intrinsic::x86_mmx_pslli_q:
3899 case Intrinsic::x86_mmx_psrl_w:
3900 case Intrinsic::x86_mmx_psrl_d:
3901 case Intrinsic::x86_mmx_psrl_q:
3902 case Intrinsic::x86_mmx_psra_w:
3903 case Intrinsic::x86_mmx_psra_d:
3904 case Intrinsic::x86_mmx_psrli_w:
3905 case Intrinsic::x86_mmx_psrli_d:
3906 case Intrinsic::x86_mmx_psrli_q:
3907 case Intrinsic::x86_mmx_psrai_w:
3908 case Intrinsic::x86_mmx_psrai_d:
3909 handleVectorShiftIntrinsic(
I,
false);
3911 case Intrinsic::x86_avx2_psllv_d:
3912 case Intrinsic::x86_avx2_psllv_d_256:
3913 case Intrinsic::x86_avx512_psllv_d_512:
3914 case Intrinsic::x86_avx2_psllv_q:
3915 case Intrinsic::x86_avx2_psllv_q_256:
3916 case Intrinsic::x86_avx512_psllv_q_512:
3917 case Intrinsic::x86_avx2_psrlv_d:
3918 case Intrinsic::x86_avx2_psrlv_d_256:
3919 case Intrinsic::x86_avx512_psrlv_d_512:
3920 case Intrinsic::x86_avx2_psrlv_q:
3921 case Intrinsic::x86_avx2_psrlv_q_256:
3922 case Intrinsic::x86_avx512_psrlv_q_512:
3923 case Intrinsic::x86_avx2_psrav_d:
3924 case Intrinsic::x86_avx2_psrav_d_256:
3925 case Intrinsic::x86_avx512_psrav_d_512:
3926 case Intrinsic::x86_avx512_psrav_q_128:
3927 case Intrinsic::x86_avx512_psrav_q_256:
3928 case Intrinsic::x86_avx512_psrav_q_512:
3929 handleVectorShiftIntrinsic(
I,
true);
3932 case Intrinsic::x86_sse2_packsswb_128:
3933 case Intrinsic::x86_sse2_packssdw_128:
3934 case Intrinsic::x86_sse2_packuswb_128:
3935 case Intrinsic::x86_sse41_packusdw:
3936 case Intrinsic::x86_avx2_packsswb:
3937 case Intrinsic::x86_avx2_packssdw:
3938 case Intrinsic::x86_avx2_packuswb:
3939 case Intrinsic::x86_avx2_packusdw:
3940 handleVectorPackIntrinsic(
I);
3943 case Intrinsic::x86_mmx_packsswb:
3944 case Intrinsic::x86_mmx_packuswb:
3945 handleVectorPackIntrinsic(
I, 16);
3948 case Intrinsic::x86_mmx_packssdw:
3949 handleVectorPackIntrinsic(
I, 32);
3952 case Intrinsic::x86_mmx_psad_bw:
3953 case Intrinsic::x86_sse2_psad_bw:
3954 case Intrinsic::x86_avx2_psad_bw:
3955 handleVectorSadIntrinsic(
I);
3958 case Intrinsic::x86_sse2_pmadd_wd:
3959 case Intrinsic::x86_avx2_pmadd_wd:
3960 case Intrinsic::x86_ssse3_pmadd_ub_sw_128:
3961 case Intrinsic::x86_avx2_pmadd_ub_sw:
3962 handleVectorPmaddIntrinsic(
I);
3965 case Intrinsic::x86_ssse3_pmadd_ub_sw:
3966 handleVectorPmaddIntrinsic(
I, 8);
3969 case Intrinsic::x86_mmx_pmadd_wd:
3970 handleVectorPmaddIntrinsic(
I, 16);
3973 case Intrinsic::x86_sse_cmp_ss:
3974 case Intrinsic::x86_sse2_cmp_sd:
3975 case Intrinsic::x86_sse_comieq_ss:
3976 case Intrinsic::x86_sse_comilt_ss:
3977 case Intrinsic::x86_sse_comile_ss:
3978 case Intrinsic::x86_sse_comigt_ss:
3979 case Intrinsic::x86_sse_comige_ss:
3980 case Intrinsic::x86_sse_comineq_ss:
3981 case Intrinsic::x86_sse_ucomieq_ss:
3982 case Intrinsic::x86_sse_ucomilt_ss:
3983 case Intrinsic::x86_sse_ucomile_ss:
3984 case Intrinsic::x86_sse_ucomigt_ss:
3985 case Intrinsic::x86_sse_ucomige_ss:
3986 case Intrinsic::x86_sse_ucomineq_ss:
3987 case Intrinsic::x86_sse2_comieq_sd:
3988 case Intrinsic::x86_sse2_comilt_sd:
3989 case Intrinsic::x86_sse2_comile_sd:
3990 case Intrinsic::x86_sse2_comigt_sd:
3991 case Intrinsic::x86_sse2_comige_sd:
3992 case Intrinsic::x86_sse2_comineq_sd:
3993 case Intrinsic::x86_sse2_ucomieq_sd:
3994 case Intrinsic::x86_sse2_ucomilt_sd:
3995 case Intrinsic::x86_sse2_ucomile_sd:
3996 case Intrinsic::x86_sse2_ucomigt_sd:
3997 case Intrinsic::x86_sse2_ucomige_sd:
3998 case Intrinsic::x86_sse2_ucomineq_sd:
3999 handleVectorCompareScalarIntrinsic(
I);
4002 case Intrinsic::x86_avx_cmp_pd_256:
4003 case Intrinsic::x86_avx_cmp_ps_256:
4004 case Intrinsic::x86_sse2_cmp_pd:
4005 case Intrinsic::x86_sse_cmp_ps:
4006 handleVectorComparePackedIntrinsic(
I);
4009 case Intrinsic::x86_bmi_bextr_32:
4010 case Intrinsic::x86_bmi_bextr_64:
4011 case Intrinsic::x86_bmi_bzhi_32:
4012 case Intrinsic::x86_bmi_bzhi_64:
4013 case Intrinsic::x86_bmi_pdep_32:
4014 case Intrinsic::x86_bmi_pdep_64:
4015 case Intrinsic::x86_bmi_pext_32:
4016 case Intrinsic::x86_bmi_pext_64:
4017 handleBmiIntrinsic(
I);
4020 case Intrinsic::x86_pclmulqdq:
4021 case Intrinsic::x86_pclmulqdq_256:
4022 case Intrinsic::x86_pclmulqdq_512:
4023 handlePclmulIntrinsic(
I);
4026 case Intrinsic::x86_sse41_round_sd:
4027 case Intrinsic::x86_sse41_round_ss:
4028 handleUnarySdSsIntrinsic(
I);
4030 case Intrinsic::x86_sse2_max_sd:
4031 case Intrinsic::x86_sse_max_ss:
4032 case Intrinsic::x86_sse2_min_sd:
4033 case Intrinsic::x86_sse_min_ss:
4034 handleBinarySdSsIntrinsic(
I);
4037 case Intrinsic::x86_avx_vtestc_pd:
4038 case Intrinsic::x86_avx_vtestc_pd_256:
4039 case Intrinsic::x86_avx_vtestc_ps:
4040 case Intrinsic::x86_avx_vtestc_ps_256:
4041 case Intrinsic::x86_avx_vtestnzc_pd:
4042 case Intrinsic::x86_avx_vtestnzc_pd_256:
4043 case Intrinsic::x86_avx_vtestnzc_ps:
4044 case Intrinsic::x86_avx_vtestnzc_ps_256:
4045 case Intrinsic::x86_avx_vtestz_pd:
4046 case Intrinsic::x86_avx_vtestz_pd_256:
4047 case Intrinsic::x86_avx_vtestz_ps:
4048 case Intrinsic::x86_avx_vtestz_ps_256:
4049 case Intrinsic::x86_avx_ptestc_256:
4050 case Intrinsic::x86_avx_ptestnzc_256:
4051 case Intrinsic::x86_avx_ptestz_256:
4052 case Intrinsic::x86_sse41_ptestc:
4053 case Intrinsic::x86_sse41_ptestnzc:
4054 case Intrinsic::x86_sse41_ptestz:
4055 handleVtestIntrinsic(
I);
4058 case Intrinsic::fshl:
4059 case Intrinsic::fshr:
4060 handleFunnelShift(
I);
4063 case Intrinsic::is_constant:
4065 setShadow(&
I, getCleanShadow(&
I));
4066 setOrigin(&
I, getCleanOrigin());
4070 if (!handleUnknownIntrinsic(
I))
4071 visitInstruction(
I);
4076 void visitLibAtomicLoad(
CallBase &CB) {
4078 assert(isa<CallInst>(CB));
4087 Value *NewOrdering =
4091 NextNodeIRBuilder NextIRB(&CB);
4092 Value *SrcShadowPtr, *SrcOriginPtr;
4093 std::tie(SrcShadowPtr, SrcOriginPtr) =
4094 getShadowOriginPtr(SrcPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
4096 Value *DstShadowPtr =
4097 getShadowOriginPtr(DstPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
4101 NextIRB.CreateMemCpy(DstShadowPtr,
Align(1), SrcShadowPtr,
Align(1),
Size);
4102 if (MS.TrackOrigins) {
4103 Value *SrcOrigin = NextIRB.CreateAlignedLoad(MS.OriginTy, SrcOriginPtr,
4105 Value *NewOrigin = updateOrigin(SrcOrigin, NextIRB);
4106 NextIRB.CreateCall(MS.MsanSetOriginFn, {DstPtr, Size, NewOrigin});
4110 void visitLibAtomicStore(
CallBase &CB) {
4117 Value *NewOrdering =
4121 Value *DstShadowPtr =
4139 visitAsmInstruction(CB);
4141 visitInstruction(CB);
4150 case LibFunc_atomic_load:
4151 if (!isa<CallInst>(CB)) {
4152 llvm::errs() <<
"MSAN -- cannot instrument invoke of libatomic load."
4156 visitLibAtomicLoad(CB);
4158 case LibFunc_atomic_store:
4159 visitLibAtomicStore(CB);
4166 if (
auto *Call = dyn_cast<CallInst>(&CB)) {
4167 assert(!isa<IntrinsicInst>(Call) &&
"intrinsics are handled elsewhere");
4175 B.addAttribute(Attribute::Memory).addAttribute(Attribute::Speculatable);
4177 Call->removeFnAttrs(
B);
4179 Func->removeFnAttrs(
B);
4185 bool MayCheckCall = MS.EagerChecks;
4189 MayCheckCall &= !
Func->getName().starts_with(
"__sanitizer_unaligned_");
4192 unsigned ArgOffset = 0;
4195 if (!
A->getType()->isSized()) {
4196 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is not sized: " << CB <<
"\n");
4204 bool EagerCheck = MayCheckCall && !ByVal && NoUndef;
4207 insertShadowCheck(
A, &CB);
4208 Size =
DL.getTypeAllocSize(
A->getType());
4214 Value *ArgShadow = getShadow(
A);
4215 Value *ArgShadowBase = getShadowPtrForArgument(IRB, ArgOffset);
4217 <<
" Shadow: " << *ArgShadow <<
"\n");
4221 assert(
A->getType()->isPointerTy() &&
4222 "ByVal argument is not a pointer!");
4230 Value *AShadowPtr, *AOriginPtr;
4231 std::tie(AShadowPtr, AOriginPtr) =
4232 getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(), Alignment,
4234 if (!PropagateShadow) {
4241 if (MS.TrackOrigins) {
4242 Value *ArgOriginBase = getOriginPtrForArgument(IRB, ArgOffset);
4256 Size =
DL.getTypeAllocSize(
A->getType());
4261 Constant *Cst = dyn_cast<Constant>(ArgShadow);
4262 if (MS.TrackOrigins && !(Cst && Cst->
isNullValue())) {
4264 getOriginPtrForArgument(IRB, ArgOffset));
4268 assert(Store !=
nullptr);
4277 if (FT->isVarArg()) {
4278 VAHelper->visitCallBase(CB, IRB);
4285 if (isa<CallInst>(CB) && cast<CallInst>(CB).isMustTailCall())
4288 if (MayCheckCall && CB.
hasRetAttr(Attribute::NoUndef)) {
4289 setShadow(&CB, getCleanShadow(&CB));
4290 setOrigin(&CB, getCleanOrigin());
4296 Value *
Base = getShadowPtrForRetval(IRBBefore);
4297 IRBBefore.CreateAlignedStore(getCleanShadow(&CB),
Base,
4300 if (isa<CallInst>(CB)) {
4304 BasicBlock *NormalDest = cast<InvokeInst>(CB).getNormalDest();
4309 setShadow(&CB, getCleanShadow(&CB));
4310 setOrigin(&CB, getCleanOrigin());
4317 "Could not find insertion point for retval shadow load");
4320 Value *RetvalShadow = IRBAfter.CreateAlignedLoad(
4321 getShadowTy(&CB), getShadowPtrForRetval(IRBAfter),
4323 setShadow(&CB, RetvalShadow);
4324 if (MS.TrackOrigins)
4325 setOrigin(&CB, IRBAfter.CreateLoad(MS.OriginTy,
4326 getOriginPtrForRetval()));
4330 if (
auto *
I = dyn_cast<BitCastInst>(RetVal)) {
4331 RetVal =
I->getOperand(0);
4333 if (
auto *
I = dyn_cast<CallInst>(RetVal)) {
4334 return I->isMustTailCall();
4341 Value *RetVal =
I.getReturnValue();
4347 Value *ShadowPtr = getShadowPtrForRetval(IRB);
4348 bool HasNoUndef =
F.hasRetAttribute(Attribute::NoUndef);
4349 bool StoreShadow = !(MS.EagerChecks && HasNoUndef);
4352 bool EagerCheck = (MS.EagerChecks && HasNoUndef) || (
F.getName() ==
"main");
4354 Value *Shadow = getShadow(RetVal);
4355 bool StoreOrigin =
true;
4357 insertShadowCheck(RetVal, &
I);
4358 Shadow = getCleanShadow(RetVal);
4359 StoreOrigin =
false;
4366 if (MS.TrackOrigins && StoreOrigin)
4367 IRB.
CreateStore(getOrigin(RetVal), getOriginPtrForRetval());
4373 if (!PropagateShadow) {
4374 setShadow(&
I, getCleanShadow(&
I));
4375 setOrigin(&
I, getCleanOrigin());
4379 ShadowPHINodes.push_back(&
I);
4380 setShadow(&
I, IRB.
CreatePHI(getShadowTy(&
I),
I.getNumIncomingValues(),
4382 if (MS.TrackOrigins)
4384 &
I, IRB.
CreatePHI(MS.OriginTy,
I.getNumIncomingValues(),
"_msphi_o"));
4401 IRB.
CreateCall(MS.MsanPoisonStackFn, {&I, Len});
4403 Value *ShadowBase, *OriginBase;
4404 std::tie(ShadowBase, OriginBase) = getShadowOriginPtr(
4411 if (PoisonStack && MS.TrackOrigins) {
4412 Value *Idptr = getLocalVarIdptr(
I);
4414 Value *Descr = getLocalVarDescription(
I);
4415 IRB.
CreateCall(MS.MsanSetAllocaOriginWithDescriptionFn,
4416 {&I, Len, Idptr, Descr});
4418 IRB.
CreateCall(MS.MsanSetAllocaOriginNoDescriptionFn, {&I, Len, Idptr});
4424 Value *Descr = getLocalVarDescription(
I);
4426 IRB.
CreateCall(MS.MsanPoisonAllocaFn, {&I, Len, Descr});
4428 IRB.
CreateCall(MS.MsanUnpoisonAllocaFn, {&I, Len});
4435 NextNodeIRBuilder IRB(InsPoint);
4439 if (
I.isArrayAllocation())
4443 if (MS.CompileKernel)
4444 poisonAllocaKmsan(
I, IRB, Len);
4446 poisonAllocaUserspace(
I, IRB, Len);
4450 setShadow(&
I, getCleanShadow(&
I));
4451 setOrigin(&
I, getCleanOrigin());
4463 Value *Sb = getShadow(
B);
4464 Value *Sc = getShadow(
C);
4465 Value *Sd = getShadow(
D);
4470 if (
I.getType()->isAggregateType()) {
4474 Sa1 = getPoisonedShadow(getShadowTy(
I.getType()));
4482 C = CreateAppToShadowCast(IRB,
C);
4483 D = CreateAppToShadowCast(IRB,
D);
4490 if (MS.TrackOrigins) {
4493 if (
B->getType()->isVectorTy()) {
4494 B = convertToBool(
B, IRB);
4495 Sb = convertToBool(Sb, IRB);
4502 getOrigin(
I.getFalseValue()))));
4509 setShadow(&
I, getCleanShadow(&
I));
4510 setOrigin(&
I, getCleanOrigin());
4514 setShadow(&
I, getCleanShadow(&
I));
4515 setOrigin(&
I, getCleanOrigin());
4519 setShadow(&
I, getCleanShadow(&
I));
4520 setOrigin(&
I, getCleanOrigin());
4527 Value *Agg =
I.getAggregateOperand();
4529 Value *AggShadow = getShadow(Agg);
4533 setShadow(&
I, ResShadow);
4534 setOriginForNaryOp(
I);
4540 Value *AggShadow = getShadow(
I.getAggregateOperand());
4541 Value *InsShadow = getShadow(
I.getInsertedValueOperand());
4547 setOriginForNaryOp(
I);
4551 if (
CallInst *CI = dyn_cast<CallInst>(&
I)) {
4552 errs() <<
"ZZZ call " << CI->getCalledFunction()->getName() <<
"\n";
4554 errs() <<
"ZZZ " <<
I.getOpcodeName() <<
"\n";
4556 errs() <<
"QQQ " <<
I <<
"\n";
4583 insertShadowCheck(Operand, &
I);
4590 auto Size =
DL.getTypeStoreSize(ElemTy);
4592 if (MS.CompileKernel) {
4593 IRB.
CreateCall(MS.MsanInstrumentAsmStoreFn, {Operand, SizeVal});
4599 auto [ShadowPtr,
_] =
4600 getShadowOriginPtrUserspace(Operand, IRB, IRB.
getInt8Ty(),
Align(1));
4611 int NumRetOutputs = 0;
4613 Type *
RetTy = cast<Value>(CB)->getType();
4614 if (!
RetTy->isVoidTy()) {
4616 auto *
ST = dyn_cast<StructType>(
RetTy);
4618 NumRetOutputs =
ST->getNumElements();
4624 switch (
Info.Type) {
4632 return NumOutputs - NumRetOutputs;
4655 int OutputArgs = getNumOutputArgs(IA, CB);
4661 for (
int i = OutputArgs; i < NumOperands; i++) {
4669 for (
int i = 0; i < OutputArgs; i++) {
4675 setShadow(&
I, getCleanShadow(&
I));
4676 setOrigin(&
I, getCleanOrigin());
4681 setShadow(&
I, getCleanShadow(&
I));
4682 setOrigin(&
I, getCleanOrigin());
4690 for (
size_t i = 0, n =
I.getNumOperands(); i < n; i++) {
4691 Value *Operand =
I.getOperand(i);
4693 insertShadowCheck(Operand, &
I);
4695 setShadow(&
I, getCleanShadow(&
I));
4696 setOrigin(&
I, getCleanOrigin());
4700struct VarArgHelperBase :
public VarArgHelper {
4702 MemorySanitizer &MS;
4703 MemorySanitizerVisitor &MSV;
4705 const unsigned VAListTagSize;
4707 VarArgHelperBase(
Function &
F, MemorySanitizer &MS,
4708 MemorySanitizerVisitor &MSV,
unsigned VAListTagSize)
4709 :
F(
F), MS(MS), MSV(MSV), VAListTagSize(VAListTagSize) {}
4713 return IRB.
CreateAdd(
Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
4718 unsigned ArgOffset) {
4727 unsigned ArgOffset,
unsigned ArgSize) {
4731 return getShadowPtrForVAArgument(Ty, IRB, ArgOffset);
4746 unsigned BaseOffset) {
4755 TailSize,
Align(8));
4760 Value *VAListTag =
I.getArgOperand(0);
4762 auto [ShadowPtr, OriginPtr] = MSV.getShadowOriginPtr(
4763 VAListTag, IRB, IRB.
getInt8Ty(), Alignment,
true);
4766 VAListTagSize, Alignment,
false);
4773 unpoisonVAListTagForInst(
I);
4779 unpoisonVAListTagForInst(
I);
4784struct VarArgAMD64Helper :
public VarArgHelperBase {
4787 static const unsigned AMD64GpEndOffset = 48;
4788 static const unsigned AMD64FpEndOffsetSSE = 176;
4790 static const unsigned AMD64FpEndOffsetNoSSE = AMD64GpEndOffset;
4792 unsigned AMD64FpEndOffset;
4795 Value *VAArgOverflowSize =
nullptr;
4797 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
4799 VarArgAMD64Helper(
Function &
F, MemorySanitizer &MS,
4800 MemorySanitizerVisitor &MSV)
4801 : VarArgHelperBase(
F, MS, MSV, 24) {
4802 AMD64FpEndOffset = AMD64FpEndOffsetSSE;
4803 for (
const auto &Attr :
F.getAttributes().getFnAttrs()) {
4804 if (Attr.isStringAttribute() &&
4805 (Attr.getKindAsString() ==
"target-features")) {
4806 if (Attr.getValueAsString().contains(
"-sse"))
4807 AMD64FpEndOffset = AMD64FpEndOffsetNoSSE;
4813 ArgKind classifyArgument(
Value *arg) {
4816 if (
T->isX86_FP80Ty())
4818 if (
T->isFPOrFPVectorTy() ||
T->isX86_MMXTy())
4819 return AK_FloatingPoint;
4820 if (
T->isIntegerTy() &&
T->getPrimitiveSizeInBits() <= 64)
4821 return AK_GeneralPurpose;
4822 if (
T->isPointerTy())
4823 return AK_GeneralPurpose;
4836 unsigned GpOffset = 0;
4837 unsigned FpOffset = AMD64GpEndOffset;
4838 unsigned OverflowOffset = AMD64FpEndOffset;
4843 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
4850 assert(
A->getType()->isPointerTy());
4852 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
4854 unsigned BaseOffset = OverflowOffset;
4856 getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset);
4857 Value *OriginBase =
nullptr;
4858 if (MS.TrackOrigins)
4859 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
4860 OverflowOffset += AlignedSize;
4863 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
4867 Value *ShadowPtr, *OriginPtr;
4868 std::tie(ShadowPtr, OriginPtr) =
4873 if (MS.TrackOrigins)
4877 ArgKind AK = classifyArgument(
A);
4878 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
4880 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
4882 Value *ShadowBase, *OriginBase =
nullptr;
4884 case AK_GeneralPurpose:
4885 ShadowBase = getShadowPtrForVAArgument(
A->getType(), IRB, GpOffset);
4886 if (MS.TrackOrigins)
4887 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset);
4891 case AK_FloatingPoint:
4892 ShadowBase = getShadowPtrForVAArgument(
A->getType(), IRB, FpOffset);
4893 if (MS.TrackOrigins)
4894 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
4901 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
4903 unsigned BaseOffset = OverflowOffset;
4905 getShadowPtrForVAArgument(
A->getType(), IRB, OverflowOffset);
4906 if (MS.TrackOrigins) {
4907 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
4909 OverflowOffset += AlignedSize;
4912 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
4921 Value *Shadow = MSV.getShadow(
A);
4923 if (MS.TrackOrigins) {
4924 Value *Origin = MSV.getOrigin(
A);
4926 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
4932 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
4933 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
4936 void finalizeInstrumentation()
override {
4937 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
4938 "finalizeInstrumentation called twice");
4939 if (!VAStartInstrumentationList.
empty()) {
4946 ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), VAArgOverflowSize);
4953 Intrinsic::umin, CopySize,
4957 if (MS.TrackOrigins) {
4967 for (
size_t i = 0, n = VAStartInstrumentationList.
size(); i < n; i++) {
4968 CallInst *OrigInst = VAStartInstrumentationList[i];
4969 NextNodeIRBuilder IRB(OrigInst);
4972 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
4975 ConstantInt::get(MS.IntptrTy, 16)),
4976 PointerType::get(RegSaveAreaPtrTy, 0));
4977 Value *RegSaveAreaPtr =
4978 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
4979 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
4981 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
4982 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
4984 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
4986 if (MS.TrackOrigins)
4987 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
4988 Alignment, AMD64FpEndOffset);
4989 Type *OverflowArgAreaPtrTy = PointerType::getUnqual(*MS.C);
4992 ConstantInt::get(MS.IntptrTy, 8)),
4993 PointerType::get(OverflowArgAreaPtrTy, 0));
4994 Value *OverflowArgAreaPtr =
4995 IRB.
CreateLoad(OverflowArgAreaPtrTy, OverflowArgAreaPtrPtr);
4996 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
4997 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
4998 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
5002 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
5004 if (MS.TrackOrigins) {
5007 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
5016struct VarArgMIPS64Helper :
public VarArgHelperBase {
5018 Value *VAArgSize =
nullptr;
5020 VarArgMIPS64Helper(
Function &
F, MemorySanitizer &MS,
5021 MemorySanitizerVisitor &MSV)
5022 : VarArgHelperBase(
F, MS, MSV, 8) {}
5025 unsigned VAArgOffset = 0;
5029 Triple TargetTriple(
F.getParent()->getTargetTriple());
5031 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
5036 VAArgOffset += (8 - ArgSize);
5038 Base = getShadowPtrForVAArgument(
A->getType(), IRB, VAArgOffset, ArgSize);
5039 VAArgOffset += ArgSize;
5040 VAArgOffset =
alignTo(VAArgOffset, 8);
5049 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
5052 void finalizeInstrumentation()
override {
5053 assert(!VAArgSize && !VAArgTLSCopy &&
5054 "finalizeInstrumentation called twice");
5058 IRB.
CreateAdd(ConstantInt::get(MS.IntptrTy, 0), VAArgSize);
5060 if (!VAStartInstrumentationList.
empty()) {
5069 Intrinsic::umin, CopySize,
5077 for (
size_t i = 0, n = VAStartInstrumentationList.
size(); i < n; i++) {
5078 CallInst *OrigInst = VAStartInstrumentationList[i];
5079 NextNodeIRBuilder IRB(OrigInst);
5081 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
5082 Value *RegSaveAreaPtrPtr =
5084 PointerType::get(RegSaveAreaPtrTy, 0));
5085 Value *RegSaveAreaPtr =
5086 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
5087 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
5089 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
5090 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
5092 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
5099struct VarArgAArch64Helper :
public VarArgHelperBase {
5100 static const unsigned kAArch64GrArgSize = 64;
5101 static const unsigned kAArch64VrArgSize = 128;
5103 static const unsigned AArch64GrBegOffset = 0;
5104 static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
5106 static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
5107 static const unsigned AArch64VrEndOffset =
5108 AArch64VrBegOffset + kAArch64VrArgSize;
5109 static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
5112 Value *VAArgOverflowSize =
nullptr;
5114 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
5116 VarArgAArch64Helper(
Function &
F, MemorySanitizer &MS,
5117 MemorySanitizerVisitor &MSV)
5118 : VarArgHelperBase(
F, MS, MSV, 32) {}
5121 std::pair<ArgKind, uint64_t> classifyArgument(
Type *
T) {
5122 if (
T->isIntOrPtrTy() &&
T->getPrimitiveSizeInBits() <= 64)
5123 return {AK_GeneralPurpose, 1};
5124 if (
T->isFloatingPointTy() &&
T->getPrimitiveSizeInBits() <= 128)
5125 return {AK_FloatingPoint, 1};
5127 if (
T->isArrayTy()) {
5128 auto R = classifyArgument(
T->getArrayElementType());
5129 R.second *=
T->getScalarType()->getArrayNumElements();
5134 auto R = classifyArgument(FV->getScalarType());
5135 R.second *= FV->getNumElements();
5140 return {AK_Memory, 0};
5153 unsigned GrOffset = AArch64GrBegOffset;
5154 unsigned VrOffset = AArch64VrBegOffset;
5155 unsigned OverflowOffset = AArch64VAEndOffset;
5160 auto [AK, RegNum] = classifyArgument(
A->getType());
5161 if (AK == AK_GeneralPurpose &&
5162 (GrOffset + RegNum * 8) > AArch64GrEndOffset)
5164 if (AK == AK_FloatingPoint &&
5165 (VrOffset + RegNum * 16) > AArch64VrEndOffset)
5169 case AK_GeneralPurpose:
5170 Base = getShadowPtrForVAArgument(
A->getType(), IRB, GrOffset);
5171 GrOffset += 8 * RegNum;
5173 case AK_FloatingPoint:
5174 Base = getShadowPtrForVAArgument(
A->getType(), IRB, VrOffset);
5175 VrOffset += 16 * RegNum;
5182 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
5184 unsigned BaseOffset = OverflowOffset;
5185 Base = getShadowPtrForVAArgument(
A->getType(), IRB, BaseOffset);
5186 OverflowOffset += AlignedSize;
5189 CleanUnusedTLS(IRB,
Base, BaseOffset);
5201 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
5202 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
5209 ConstantInt::get(MS.IntptrTy, offset)),
5210 PointerType::get(*MS.C, 0));
5218 ConstantInt::get(MS.IntptrTy, offset)),
5219 PointerType::get(*MS.C, 0));
5221 return IRB.
CreateSExt(SaveArea32, MS.IntptrTy);
5224 void finalizeInstrumentation()
override {
5225 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
5226 "finalizeInstrumentation called twice");
5227 if (!VAStartInstrumentationList.
empty()) {
5234 ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset), VAArgOverflowSize);
5241 Intrinsic::umin, CopySize,
5247 Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
5248 Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize);
5252 for (
size_t i = 0, n = VAStartInstrumentationList.
size(); i < n; i++) {
5253 CallInst *OrigInst = VAStartInstrumentationList[i];
5254 NextNodeIRBuilder IRB(OrigInst);
5273 Value *StackSaveAreaPtr =
5274 IRB.
CreateIntToPtr(getVAField64(IRB, VAListTag, 0), RegSaveAreaPtrTy);
5277 Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
5278 Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
5281 IRB.
CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea), RegSaveAreaPtrTy);
5284 Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
5285 Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
5288 IRB.
CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea), RegSaveAreaPtrTy);
5294 Value *GrRegSaveAreaShadowPtrOff =
5295 IRB.
CreateAdd(GrArgSize, GrOffSaveArea);
5297 Value *GrRegSaveAreaShadowPtr =
5298 MSV.getShadowOriginPtr(GrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
5304 Value *GrCopySize = IRB.
CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
5310 Value *VrRegSaveAreaShadowPtrOff =
5311 IRB.
CreateAdd(VrArgSize, VrOffSaveArea);
5313 Value *VrRegSaveAreaShadowPtr =
5314 MSV.getShadowOriginPtr(VrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
5321 VrRegSaveAreaShadowPtrOff);
5322 Value *VrCopySize = IRB.
CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
5328 Value *StackSaveAreaShadowPtr =
5329 MSV.getShadowOriginPtr(StackSaveAreaPtr, IRB, IRB.
getInt8Ty(),
5334 VAArgTLSCopy, IRB.
getInt32(AArch64VAEndOffset));
5337 Align(16), VAArgOverflowSize);
5343struct VarArgPowerPC64Helper :
public VarArgHelperBase {
5345 Value *VAArgSize =
nullptr;
5347 VarArgPowerPC64Helper(
Function &
F, MemorySanitizer &MS,
5348 MemorySanitizerVisitor &MSV)
5349 : VarArgHelperBase(
F, MS, MSV, 8) {}
5359 Triple TargetTriple(
F.getParent()->getTargetTriple());
5367 unsigned VAArgOffset = VAArgBase;
5371 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
5373 assert(
A->getType()->isPointerTy());
5375 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
5378 ArgAlign =
Align(8);
5379 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
5381 Value *
Base = getShadowPtrForVAArgument(
5382 RealTy, IRB, VAArgOffset - VAArgBase, ArgSize);
5384 Value *AShadowPtr, *AOriginPtr;
5385 std::tie(AShadowPtr, AOriginPtr) =
5386 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
5396 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
5398 if (
A->getType()->isArrayTy()) {
5401 Type *ElementTy =
A->getType()->getArrayElementType();
5403 ArgAlign =
Align(
DL.getTypeAllocSize(ElementTy));
5404 }
else if (
A->getType()->isVectorTy()) {
5406 ArgAlign =
Align(ArgSize);
5409 ArgAlign =
Align(8);
5410 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
5411 if (
DL.isBigEndian()) {
5415 VAArgOffset += (8 - ArgSize);
5418 Base = getShadowPtrForVAArgument(
A->getType(), IRB,
5419 VAArgOffset - VAArgBase, ArgSize);
5423 VAArgOffset += ArgSize;
5427 VAArgBase = VAArgOffset;
5431 ConstantInt::get(IRB.
getInt64Ty(), VAArgOffset - VAArgBase);
5434 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
5437 void finalizeInstrumentation()
override {
5438 assert(!VAArgSize && !VAArgTLSCopy &&
5439 "finalizeInstrumentation called twice");
5443 IRB.
CreateAdd(ConstantInt::get(MS.IntptrTy, 0), VAArgSize);
5445 if (!VAStartInstrumentationList.
empty()) {
5455 Intrinsic::umin, CopySize,
5463 for (
size_t i = 0, n = VAStartInstrumentationList.
size(); i < n; i++) {
5464 CallInst *OrigInst = VAStartInstrumentationList[i];
5465 NextNodeIRBuilder IRB(OrigInst);
5467 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
5468 Value *RegSaveAreaPtrPtr =
5470 PointerType::get(RegSaveAreaPtrTy, 0));
5471 Value *RegSaveAreaPtr =
5472 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
5473 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
5475 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
5476 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
5478 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
5485struct VarArgSystemZHelper :
public VarArgHelperBase {
5486 static const unsigned SystemZGpOffset = 16;
5487 static const unsigned SystemZGpEndOffset = 56;
5488 static const unsigned SystemZFpOffset = 128;
5489 static const unsigned SystemZFpEndOffset = 160;
5490 static const unsigned SystemZMaxVrArgs = 8;
5491 static const unsigned SystemZRegSaveAreaSize = 160;
5492 static const unsigned SystemZOverflowOffset = 160;
5493 static const unsigned SystemZVAListTagSize = 32;
5494 static const unsigned SystemZOverflowArgAreaPtrOffset = 16;
5495 static const unsigned SystemZRegSaveAreaPtrOffset = 24;
5497 bool IsSoftFloatABI;
5500 Value *VAArgOverflowSize =
nullptr;
5502 enum class ArgKind {
5510 enum class ShadowExtension {
None,
Zero, Sign };
5512 VarArgSystemZHelper(
Function &
F, MemorySanitizer &MS,
5513 MemorySanitizerVisitor &MSV)
5514 : VarArgHelperBase(
F, MS, MSV, SystemZVAListTagSize),
5515 IsSoftFloatABI(
F.getFnAttribute(
"use-soft-float").getValueAsBool()) {}
5517 ArgKind classifyArgument(
Type *
T) {
5524 if (
T->isIntegerTy(128) ||
T->isFP128Ty())
5525 return ArgKind::Indirect;
5526 if (
T->isFloatingPointTy())
5527 return IsSoftFloatABI ? ArgKind::GeneralPurpose : ArgKind::FloatingPoint;
5528 if (
T->isIntegerTy() ||
T->isPointerTy())
5529 return ArgKind::GeneralPurpose;
5530 if (
T->isVectorTy())
5531 return ArgKind::Vector;
5532 return ArgKind::Memory;
5535 ShadowExtension getShadowExtension(
const CallBase &CB,
unsigned ArgNo) {
5545 return ShadowExtension::Zero;
5549 return ShadowExtension::Sign;
5551 return ShadowExtension::None;
5555 unsigned GpOffset = SystemZGpOffset;
5556 unsigned FpOffset = SystemZFpOffset;
5557 unsigned VrIndex = 0;
5558 unsigned OverflowOffset = SystemZOverflowOffset;
5565 ArgKind AK = classifyArgument(
T);
5566 if (AK == ArgKind::Indirect) {
5567 T = PointerType::get(
T, 0);
5568 AK = ArgKind::GeneralPurpose;
5570 if (AK == ArgKind::GeneralPurpose && GpOffset >= SystemZGpEndOffset)
5571 AK = ArgKind::Memory;
5572 if (AK == ArgKind::FloatingPoint && FpOffset >= SystemZFpEndOffset)
5573 AK = ArgKind::Memory;
5574 if (AK == ArgKind::Vector && (VrIndex >= SystemZMaxVrArgs || !IsFixed))
5575 AK = ArgKind::Memory;
5576 Value *ShadowBase =
nullptr;
5577 Value *OriginBase =
nullptr;
5578 ShadowExtension SE = ShadowExtension::None;
5580 case ArgKind::GeneralPurpose: {
5585 SE = getShadowExtension(CB, ArgNo);
5587 if (SE == ShadowExtension::None) {
5589 assert(ArgAllocSize <= ArgSize);
5590 GapSize = ArgSize - ArgAllocSize;
5592 ShadowBase = getShadowAddrForVAArgument(IRB, GpOffset + GapSize);
5593 if (MS.TrackOrigins)
5594 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset + GapSize);
5596 GpOffset += ArgSize;
5602 case ArgKind::FloatingPoint: {
5611 ShadowBase = getShadowAddrForVAArgument(IRB, FpOffset);
5612 if (MS.TrackOrigins)
5613 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
5615 FpOffset += ArgSize;
5621 case ArgKind::Vector: {
5628 case ArgKind::Memory: {
5636 SE = getShadowExtension(CB, ArgNo);
5638 SE == ShadowExtension::None ? ArgSize - ArgAllocSize : 0;
5640 getShadowAddrForVAArgument(IRB, OverflowOffset + GapSize);
5641 if (MS.TrackOrigins)
5643 getOriginPtrForVAArgument(IRB, OverflowOffset + GapSize);
5644 OverflowOffset += ArgSize;
5651 case ArgKind::Indirect:
5654 if (ShadowBase ==
nullptr)
5656 Value *Shadow = MSV.getShadow(
A);
5657 if (SE != ShadowExtension::None)
5658 Shadow = MSV.CreateShadowCast(IRB, Shadow, IRB.
getInt64Ty(),
5659 SE == ShadowExtension::Sign);
5661 ShadowBase, PointerType::get(Shadow->
getType(), 0),
"_msarg_va_s");
5663 if (MS.TrackOrigins) {
5664 Value *Origin = MSV.getOrigin(
A);
5666 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
5670 Constant *OverflowSize = ConstantInt::get(
5671 IRB.
getInt64Ty(), OverflowOffset - SystemZOverflowOffset);
5672 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
5676 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
5680 ConstantInt::get(MS.IntptrTy, SystemZRegSaveAreaPtrOffset)),
5681 PointerType::get(RegSaveAreaPtrTy, 0));
5682 Value *RegSaveAreaPtr = IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
5683 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
5685 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
5686 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(), Alignment,
5691 unsigned RegSaveAreaSize =
5692 IsSoftFloatABI ? SystemZGpEndOffset : SystemZRegSaveAreaSize;
5693 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
5695 if (MS.TrackOrigins)
5696 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
5697 Alignment, RegSaveAreaSize);
5703 Type *OverflowArgAreaPtrTy = PointerType::getUnqual(*MS.C);
5707 ConstantInt::get(MS.IntptrTy, SystemZOverflowArgAreaPtrOffset)),
5708 PointerType::get(OverflowArgAreaPtrTy, 0));
5709 Value *OverflowArgAreaPtr =
5710 IRB.
CreateLoad(OverflowArgAreaPtrTy, OverflowArgAreaPtrPtr);
5711 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
5713 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
5714 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
5717 SystemZOverflowOffset);
5718 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
5720 if (MS.TrackOrigins) {
5722 SystemZOverflowOffset);
5723 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
5728 void finalizeInstrumentation()
override {
5729 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
5730 "finalizeInstrumentation called twice");
5731 if (!VAStartInstrumentationList.
empty()) {
5738 IRB.
CreateAdd(ConstantInt::get(MS.IntptrTy, SystemZOverflowOffset),
5746 Intrinsic::umin, CopySize,
5750 if (MS.TrackOrigins) {
5760 for (
size_t VaStartNo = 0, VaStartNum = VAStartInstrumentationList.
size();
5761 VaStartNo < VaStartNum; VaStartNo++) {
5762 CallInst *OrigInst = VAStartInstrumentationList[VaStartNo];
5763 NextNodeIRBuilder IRB(OrigInst);
5765 copyRegSaveArea(IRB, VAListTag);
5766 copyOverflowArea(IRB, VAListTag);
5773using VarArgLoongArch64Helper = VarArgMIPS64Helper;
5776struct VarArgNoOpHelper :
public VarArgHelper {
5777 VarArgNoOpHelper(
Function &
F, MemorySanitizer &MS,
5778 MemorySanitizerVisitor &MSV) {}
5786 void finalizeInstrumentation()
override {}
5792 MemorySanitizerVisitor &Visitor) {
5795 Triple TargetTriple(Func.getParent()->getTargetTriple());
5797 return new VarArgAMD64Helper(Func, Msan, Visitor);
5799 return new VarArgMIPS64Helper(Func, Msan, Visitor);
5801 return new VarArgAArch64Helper(Func, Msan, Visitor);
5804 return new VarArgPowerPC64Helper(Func, Msan, Visitor);
5806 return new VarArgSystemZHelper(Func, Msan, Visitor);
5808 return new VarArgLoongArch64Helper(Func, Msan, Visitor);
5810 return new VarArgNoOpHelper(Func, Msan, Visitor);
5817 if (
F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
5820 MemorySanitizerVisitor Visitor(
F, *
this, TLI);
5824 B.addAttribute(Attribute::Memory).addAttribute(Attribute::Speculatable);
5827 return Visitor.runOnFunction();
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements a class to represent arbitrary precision integral constant values and operations...
static bool isStore(int Opcode)
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
static const size_t kNumberOfAccessSizes
VarLocInsertPt getNextNode(const DbgRecord *DVR)
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static AtomicOrdering addReleaseOrdering(AtomicOrdering AO)
static AtomicOrdering addAcquireOrdering(AtomicOrdering AO)
static bool isAMustTailRetVal(Value *RetVal)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool runOnFunction(Function &F, bool PostInlining)
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool isSigned(unsigned int Opcode)
This is the interface for a simple mod/ref and alias analysis over globals.
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
static const MemoryMapParams Linux_LoongArch64_MemoryMapParams
static const PlatformMemoryMapParams Linux_S390_MemoryMapParams
static const Align kMinOriginAlignment
static const MemoryMapParams Linux_X86_64_MemoryMapParams
static cl::opt< uint64_t > ClShadowBase("msan-shadow-base", cl::desc("Define custom MSan ShadowBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClDumpStrictInstructions("msan-dump-strict-instructions", cl::desc("print out instructions with default strict semantics"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClHandleICmpExact("msan-handle-icmp-exact", cl::desc("exact handling of relational integer ICmp"), cl::Hidden, cl::init(false))
static const PlatformMemoryMapParams Linux_X86_MemoryMapParams
static cl::opt< uint64_t > ClOriginBase("msan-origin-base", cl::desc("Define custom MSan OriginBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClCheckConstantShadow("msan-check-constant-shadow", cl::desc("Insert checks for constant shadow values"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_LoongArch_MemoryMapParams
static const MemoryMapParams NetBSD_X86_64_MemoryMapParams
static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams
static const unsigned kOriginSize
static cl::opt< bool > ClWithComdat("msan-with-comdat", cl::desc("Place MSan constructors in comdat sections"), cl::Hidden, cl::init(false))
static cl::opt< int > ClTrackOrigins("msan-track-origins", cl::desc("Track origins (allocation sites) of poisoned memory"), cl::Hidden, cl::init(0))
Track origins of uninitialized values.
static cl::opt< int > ClInstrumentationWithCallThreshold("msan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented requires more than " "this number of checks and origin stores, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(3500))
static cl::opt< int > ClPoisonStackPattern("msan-poison-stack-pattern", cl::desc("poison uninitialized stack variables with the given pattern"), cl::Hidden, cl::init(0xff))
static const Align kShadowTLSAlignment
static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams
static Constant * getOrInsertGlobal(Module &M, StringRef Name, Type *Ty)
static const MemoryMapParams Linux_S390X_MemoryMapParams
static cl::opt< bool > ClPoisonUndef("msan-poison-undef", cl::desc("poison undef temps"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClPoisonStack("msan-poison-stack", cl::desc("poison uninitialized stack variables"), cl::Hidden, cl::init(true))
static const MemoryMapParams Linux_I386_MemoryMapParams
const char kMsanInitName[]
static cl::opt< bool > ClPrintStackNames("msan-print-stack-names", cl::desc("Print name of local stack variable"), cl::Hidden, cl::init(true))
static const MemoryMapParams Linux_AArch64_MemoryMapParams
static cl::opt< uint64_t > ClAndMask("msan-and-mask", cl::desc("Define custom MSan AndMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleLifetimeIntrinsics("msan-handle-lifetime-intrinsics", cl::desc("when possible, poison scoped variables at the beginning of the scope " "(slower, but more precise)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKeepGoing("msan-keep-going", cl::desc("keep going after reporting a UMR"), cl::Hidden, cl::init(false))
static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams
static GlobalVariable * createPrivateConstGlobalForString(Module &M, StringRef Str)
Create a non-const global initialized with the given string.
static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClEagerChecks("msan-eager-checks", cl::desc("check arguments and return values at function call boundaries"), cl::Hidden, cl::init(false))
static cl::opt< int > ClDisambiguateWarning("msan-disambiguate-warning-threshold", cl::desc("Define threshold for number of checks per " "debug location to force origin update."), cl::Hidden, cl::init(3))
static VarArgHelper * CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, MemorySanitizerVisitor &Visitor)
static const MemoryMapParams Linux_MIPS64_MemoryMapParams
static const MemoryMapParams Linux_PowerPC64_MemoryMapParams
static cl::opt< uint64_t > ClXorMask("msan-xor-mask", cl::desc("Define custom MSan XorMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleAsmConservative("msan-handle-asm-conservative", cl::desc("conservative handling of inline assembly"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams
static const PlatformMemoryMapParams FreeBSD_ARM_MemoryMapParams
static const unsigned kParamTLSSize
static cl::opt< bool > ClHandleICmp("msan-handle-icmp", cl::desc("propagate shadow through ICmpEQ and ICmpNE"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClEnableKmsan("msan-kernel", cl::desc("Enable KernelMemorySanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClPoisonStackWithCall("msan-poison-stack-with-call", cl::desc("poison uninitialized stack variables with a call"), cl::Hidden, cl::init(false))
static const PlatformMemoryMapParams NetBSD_X86_MemoryMapParams
static const unsigned kRetvalTLSSize
static const MemoryMapParams FreeBSD_AArch64_MemoryMapParams
const char kMsanModuleCtorName[]
static const MemoryMapParams FreeBSD_I386_MemoryMapParams
static cl::opt< bool > ClCheckAccessAddress("msan-check-access-address", cl::desc("report accesses through a pointer which has poisoned shadow"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDisableChecks("msan-disable-checks", cl::desc("Apply no_sanitize to the whole file"), cl::Hidden, cl::init(false))
Module.h This file contains the declarations for the Module class.
FunctionAnalysisManager FAM
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Class for arbitrary precision integers.
an instruction to allocate memory on the stack
void setAlignment(Align Align)
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
LLVM Basic Block Representation.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
InstListType::iterator iterator
Instruction iterators...
This class represents a no-op cast from one type to another.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
MaybeAlign getParamAlign(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
void setArgOperand(unsigned i, Value *v)
FunctionType * getFunctionType() const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_SGT
signed greater than
@ ICMP_SGE
signed greater or equal
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static Constant * getString(LLVMContext &Context, StringRef Initializer, bool AddNull=true)
This method constructs a CDS and initializes it with a text string.
static Constant * get(LLVMContext &Context, ArrayRef< uint8_t > Elts)
get() constructors - Return a constant with vector type with an element count and element type matchi...
This is the shared class of boolean and integer constants.
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
static Constant * get(StructType *T, ArrayRef< Constant * > V)
static Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static Constant * getAllOnesValue(Type *Ty)
bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
bool isZeroValue() const
Return true if the value is negative zero or null value.
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
static bool shouldExecute(unsigned CounterName)
This instruction compares its operands according to the predicate given to the constructor.
Class to represent fixed width SIMD vectors.
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
This class represents a freeze function that returns random concrete value if an operand is either a ...
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
void setComdat(Comdat *C)
@ PrivateLinkage
Like Internal, but omit from symbol table.
@ ExternalLinkage
Externally visible function.
Analysis pass providing a never-invalidated alias analysis result.
This instruction compares its operands according to the predicate given to the constructor.
CallInst * CreateMaskedCompressStore(Value *Val, Value *Ptr, Value *Mask=nullptr)
Create a call to Masked Compress Store intrinsic.
CallInst * CreateMaskedExpandLoad(Type *Ty, Value *Ptr, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Expand Load intrinsic.
Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
CallInst * CreateAndReduce(Value *Src)
Create a vector int AND reduction intrinsic of the source vector.
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memset to the specified pointer and the specified value.
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateTypeSize(Type *DstType, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
ConstantInt * getInt8(uint8_t C)
Get a constant 8-bit value.
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Value * CreateUDiv(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
CallInst * CreateOrReduce(Value *Src)
Create a vector int OR reduction intrinsic of the source vector.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
DebugLoc getCurrentDebugLocation() const
Get location information used by debugging information.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
LLVMContext & getContext() const
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Value * CreateIsNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg == 0.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateInBoundsPtrAdd(Value *Ptr, Value *Offset, const Twine &Name="")
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", bool IsInBounds=false)
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memcpy between the specified pointers.
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
CallInst * CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, Value *Mask=nullptr)
Create a call to Masked Scatter intrinsic.
CallInst * CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Gather intrinsic.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
std::vector< ConstraintInfo > ConstraintInfoVector
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
This instruction inserts a single (scalar) element into a VectorType value.
This instruction inserts a struct field of array element value into an aggregate value.
Base class for instruction visitors.
void visit(Iterator Start, Iterator End)
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
const BasicBlock * getParent() const
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
This class represents a cast from an integer to a pointer.
Class to represent integer types.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
An instruction for reading from memory.
MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
This class wraps the llvm.memcpy intrinsic.
This class wraps the llvm.memmove intrinsic.
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
A Module instance is used to store all the information related to an LLVM module.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void abandon()
Mark an analysis as abandoned.
This class represents a cast from a pointer to an integer.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
This class represents a sign extension of integer types.
This class represents the LLVM 'select' instruction.
bool remove(const value_type &X)
Remove an item from the set vector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
This instruction constructs a fixed permutation of two input vectors.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Class to represent struct types.
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Triple - Helper class for working with autoconf configuration names.
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
bool isPPC_FP128Ty() const
Return true if this is powerpc long double.
static Type * getX86_MMXTy(LLVMContext &C)
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static Type * getVoidTy(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
'undef' values are things that do not have specified contents.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
This represents the llvm.va_copy intrinsic.
This represents the llvm.va_start intrinsic.
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void setName(const Twine &Name)
Change the name of the value.
This class represents zero extension of integer types.
int getNumOccurrences() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
This class provides various memory handling functions that manipulate MemoryBlock instances.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
@ C
The default llvm calling convention, compatible with C.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
@ SC
CHAIN = SC CHAIN, Imm128 - System call.
initializer< Ty > init(const Ty &Val)
Function * Kernel
Summary of a kernel (=entry point for target offloading).
NodeAddr< FuncNode * > Func
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
@ Or
Bitwise or logical OR of integers.
std::pair< Instruction *, Value * > SplitBlockAndInsertSimpleForLoop(Value *End, Instruction *SplitBefore)
Insert a for (int i = 0; i < End; i++) loop structure (with the exception that End is assumed > 0,...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
constexpr unsigned BitWidth
void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
iterator_range< df_iterator< T > > depth_first(const T &G)
Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
A CRTP mix-in to automatically provide informational APIs needed for passes.