182#include "llvm/IR/IntrinsicsX86.h"
211#define DEBUG_TYPE "msan"
214 "Controls which checks to insert");
232 "msan-track-origins",
237 cl::desc(
"keep going after reporting a UMR"),
246 "msan-poison-stack-with-call",
251 "msan-poison-stack-pattern",
252 cl::desc(
"poison uninitialized stack variables with the given pattern"),
257 cl::desc(
"Print name of local stack variable"),
266 cl::desc(
"propagate shadow through ICmpEQ and ICmpNE"),
271 cl::desc(
"exact handling of relational integer ICmp"),
275 "msan-handle-lifetime-intrinsics",
277 "when possible, poison scoped variables at the beginning of the scope "
278 "(slower, but more precise)"),
292 "msan-handle-asm-conservative",
303 "msan-check-access-address",
304 cl::desc(
"report accesses through a pointer which has poisoned shadow"),
309 cl::desc(
"check arguments and return values at function call boundaries"),
313 "msan-dump-strict-instructions",
314 cl::desc(
"print out instructions with default strict semantics"),
318 "msan-instrumentation-with-call-threshold",
320 "If the function being instrumented requires more than "
321 "this number of checks and origin stores, use callbacks instead of "
322 "inline checks (-1 means never use callbacks)."),
327 cl::desc(
"Enable KernelMemorySanitizer instrumentation"),
337 cl::desc(
"Insert checks for constant shadow values"),
344 cl::desc(
"Place MSan constructors in comdat sections"),
350 cl::desc(
"Define custom MSan AndMask"),
354 cl::desc(
"Define custom MSan XorMask"),
358 cl::desc(
"Define custom MSan ShadowBase"),
362 cl::desc(
"Define custom MSan OriginBase"),
367 cl::desc(
"Define threshold for number of checks per "
368 "debug location to force origin update."),
380struct MemoryMapParams {
387struct PlatformMemoryMapParams {
388 const MemoryMapParams *bits32;
389 const MemoryMapParams *bits64;
535class MemorySanitizer {
544 MemorySanitizer(MemorySanitizer &&) =
delete;
545 MemorySanitizer &operator=(MemorySanitizer &&) =
delete;
546 MemorySanitizer(
const MemorySanitizer &) =
delete;
547 MemorySanitizer &operator=(
const MemorySanitizer &) =
delete;
552 friend struct MemorySanitizerVisitor;
553 friend struct VarArgAMD64Helper;
554 friend struct VarArgMIPS64Helper;
555 friend struct VarArgAArch64Helper;
556 friend struct VarArgPowerPC64Helper;
557 friend struct VarArgSystemZHelper;
559 void initializeModule(
Module &M);
564 template <
typename... ArgsTy>
590 Value *ParamOriginTLS;
596 Value *RetvalOriginTLS;
604 Value *VAArgOriginTLS;
608 Value *VAArgOverflowSizeTLS;
611 bool CallbacksInitialized =
false;
656 Value *MsanMetadataAlloca;
662 const MemoryMapParams *MapParams;
666 MemoryMapParams CustomMapParams;
671 MDNode *OriginStoreWeights;
674void insertModuleCtor(
Module &M) {
702 Recover(getOptOrDefault(
ClKeepGoing, Kernel || R)),
717 MemorySanitizer Msan(*
F.getParent(),
Options);
736 OS, MapClassName2PassName);
743 OS <<
"eager-checks;";
744 OS <<
"track-origins=" <<
Options.TrackOrigins;
760template <
typename... ArgsTy>
768 std::forward<ArgsTy>(Args)...);
771 return M.getOrInsertFunction(
Name, MsanMetadata,
772 std::forward<ArgsTy>(Args)...);
781 RetvalOriginTLS =
nullptr;
783 ParamOriginTLS =
nullptr;
785 VAArgOriginTLS =
nullptr;
786 VAArgOverflowSizeTLS =
nullptr;
788 WarningFn =
M.getOrInsertFunction(
"__msan_warning",
790 IRB.getVoidTy(), IRB.getInt32Ty());
801 MsanGetContextStateFn =
M.getOrInsertFunction(
807 for (
int ind = 0, size = 1; ind < 4; ind++,
size <<= 1) {
808 std::string name_load =
809 "__msan_metadata_ptr_for_load_" + std::to_string(size);
810 std::string name_store =
811 "__msan_metadata_ptr_for_store_" + std::to_string(size);
812 MsanMetadataPtrForLoad_1_8[ind] = getOrInsertMsanMetadataFunction(
814 MsanMetadataPtrForStore_1_8[ind] = getOrInsertMsanMetadataFunction(
818 MsanMetadataPtrForLoadN = getOrInsertMsanMetadataFunction(
821 MsanMetadataPtrForStoreN = getOrInsertMsanMetadataFunction(
822 M,
"__msan_metadata_ptr_for_store_n",
827 M.getOrInsertFunction(
"__msan_poison_alloca", IRB.getVoidTy(),
828 IRB.getInt8PtrTy(), IntptrTy, IRB.getInt8PtrTy());
829 MsanUnpoisonAllocaFn =
M.getOrInsertFunction(
830 "__msan_unpoison_alloca", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy);
834 return M.getOrInsertGlobal(
Name, Ty, [&] {
836 nullptr,
Name,
nullptr,
849 StringRef WarningFnName = Recover ?
"__msan_warning_with_origin"
850 :
"__msan_warning_with_origin_noreturn";
851 WarningFn =
M.getOrInsertFunction(WarningFnName,
853 IRB.getVoidTy(), IRB.getInt32Ty());
856 Recover ?
"__msan_warning" :
"__msan_warning_noreturn";
857 WarningFn =
M.getOrInsertFunction(WarningFnName, IRB.getVoidTy());
883 VAArgOverflowSizeTLS =
888 unsigned AccessSize = 1 << AccessSizeIndex;
889 std::string FunctionName =
"__msan_maybe_warning_" + itostr(AccessSize);
890 MaybeWarningFn[AccessSizeIndex] =
M.getOrInsertFunction(
892 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt32Ty());
894 FunctionName =
"__msan_maybe_store_origin_" + itostr(AccessSize);
895 MaybeStoreOriginFn[AccessSizeIndex] =
M.getOrInsertFunction(
897 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt8PtrTy(),
901 MsanSetAllocaOriginWithDescriptionFn =
M.getOrInsertFunction(
902 "__msan_set_alloca_origin_with_descr", IRB.getVoidTy(),
903 IRB.getInt8PtrTy(), IntptrTy, IRB.getInt8PtrTy(), IRB.getInt8PtrTy());
904 MsanSetAllocaOriginNoDescriptionFn =
M.getOrInsertFunction(
905 "__msan_set_alloca_origin_no_descr", IRB.getVoidTy(), IRB.getInt8PtrTy(),
906 IntptrTy, IRB.getInt8PtrTy());
907 MsanPoisonStackFn =
M.getOrInsertFunction(
908 "__msan_poison_stack", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy);
914 if (CallbacksInitialized)
920 MsanChainOriginFn =
M.getOrInsertFunction(
921 "__msan_chain_origin",
924 MsanSetOriginFn =
M.getOrInsertFunction(
926 IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, IRB.getInt32Ty());
928 M.getOrInsertFunction(
"__msan_memmove", IRB.getInt8PtrTy(),
929 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
931 M.getOrInsertFunction(
"__msan_memcpy", IRB.getInt8PtrTy(),
932 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
933 MemsetFn =
M.getOrInsertFunction(
935 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy);
937 MsanInstrumentAsmStoreFn =
938 M.getOrInsertFunction(
"__msan_instrument_asm_store", IRB.getVoidTy(),
942 createKernelApi(M, TLI);
944 createUserspaceApi(M, TLI);
946 CallbacksInitialized =
true;
952 isStore ? MsanMetadataPtrForStore_1_8 : MsanMetadataPtrForLoad_1_8;
970void MemorySanitizer::initializeModule(
Module &M) {
971 auto &
DL =
M.getDataLayout();
973 TargetTriple =
Triple(
M.getTargetTriple());
975 bool ShadowPassed =
ClShadowBase.getNumOccurrences() > 0;
976 bool OriginPassed =
ClOriginBase.getNumOccurrences() > 0;
978 if (ShadowPassed || OriginPassed) {
983 MapParams = &CustomMapParams;
985 switch (TargetTriple.getOS()) {
987 switch (TargetTriple.getArch()) {
1002 switch (TargetTriple.getArch()) {
1011 switch (TargetTriple.getArch()) {
1045 C = &(
M.getContext());
1047 IntptrTy = IRB.getIntPtrTy(
DL);
1048 OriginTy = IRB.getInt32Ty();
1053 if (!CompileKernel) {
1055 M.getOrInsertGlobal(
"__msan_track_origins", IRB.getInt32Ty(), [&] {
1056 return new GlobalVariable(
1057 M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
1058 IRB.getInt32(TrackOrigins),
"__msan_track_origins");
1062 M.getOrInsertGlobal(
"__msan_keep_going", IRB.getInt32Ty(), [&] {
1063 return new GlobalVariable(M, IRB.getInt32Ty(), true,
1064 GlobalValue::WeakODRLinkage,
1065 IRB.getInt32(Recover),
"__msan_keep_going");
1080struct VarArgHelper {
1081 virtual ~VarArgHelper() =
default;
1096 virtual void finalizeInstrumentation() = 0;
1099struct MemorySanitizerVisitor;
1104 MemorySanitizerVisitor &Visitor);
1111 if (TypeSizeFixed <= 8)
1120class NextNodeIRBuilder :
public IRBuilder<> {
1133struct MemorySanitizerVisitor :
public InstVisitor<MemorySanitizerVisitor> {
1135 MemorySanitizer &MS;
1138 std::unique_ptr<VarArgHelper> VAHelper;
1145 bool PropagateShadow;
1149 struct ShadowOriginAndInsertPoint {
1155 : Shadow(S), Origin(
O), OrigIns(
I) {}
1163 int64_t SplittableBlocksCount = 0;
1165 MemorySanitizerVisitor(
Function &
F, MemorySanitizer &MS,
1168 bool SanitizeFunction =
1170 InsertChecks = SanitizeFunction;
1171 PropagateShadow = SanitizeFunction;
1181 MS.initializeCallbacks(*
F.getParent(), TLI);
1182 FnPrologueEnd =
IRBuilder<>(
F.getEntryBlock().getFirstNonPHI())
1185 if (MS.CompileKernel) {
1187 insertKmsanPrologue(IRB);
1191 <<
"MemorySanitizer is not inserting checks into '"
1192 <<
F.getName() <<
"'\n");
1195 bool instrumentWithCalls(
Value *V) {
1197 if (isa<Constant>(V))
1200 ++SplittableBlocksCount;
1206 return I.getParent() == FnPrologueEnd->
getParent() &&
1207 (&
I == FnPrologueEnd ||
I.comesBefore(FnPrologueEnd));
1215 if (MS.TrackOrigins <= 1)
1217 return IRB.
CreateCall(MS.MsanChainOriginFn, V);
1222 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1234 const Align IntptrAlignment =
DL.getABITypeAlign(MS.IntptrTy);
1235 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1245 auto [InsertPt,
Index] =
1257 Align CurrentAlignment = Alignment;
1258 if (Alignment >= IntptrAlignment && IntptrSize >
kOriginSize) {
1259 Value *IntptrOrigin = originToIntptr(IRB, Origin);
1260 Value *IntptrOriginPtr =
1262 for (
unsigned i = 0; i <
Size / IntptrSize; ++i) {
1267 CurrentAlignment = IntptrAlignment;
1284 Value *ConvertedShadow = convertShadowToScalar(Shadow, IRB);
1285 if (
auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
1293 paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
1302 if (instrumentWithCalls(ConvertedShadow) &&
1305 Value *ConvertedShadow2 =
1308 Fn, {ConvertedShadow2,
1313 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1317 paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), OriginPtr, StoreSize,
1322 void materializeStores() {
1325 Value *Val =
SI->getValueOperand();
1327 Value *Shadow =
SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
1328 Value *ShadowPtr, *OriginPtr;
1330 const Align Alignment =
SI->getAlign();
1332 std::tie(ShadowPtr, OriginPtr) =
1333 getShadowOriginPtr(
Addr, IRB, ShadowTy, Alignment,
true);
1342 if (MS.TrackOrigins && !
SI->isAtomic())
1343 storeOrigin(IRB,
Addr, Shadow, getOrigin(Val), OriginPtr,
1350 if (MS.TrackOrigins < 2)
1353 if (LazyWarningDebugLocationCount.
empty())
1354 for (
const auto &
I : InstrumentationList)
1355 ++LazyWarningDebugLocationCount[
I.OrigIns->getDebugLoc()];
1369 if (
Instruction *OI = dyn_cast_or_null<Instruction>(Origin)) {
1371 auto NewDebugLoc = OI->getDebugLoc();
1378 IRBOrigin.SetCurrentDebugLocation(NewDebugLoc);
1379 Origin = updateOrigin(Origin, IRBOrigin);
1384 if (MS.CompileKernel || MS.TrackOrigins)
1398 if (instrumentWithCalls(ConvertedShadow) &&
1401 Value *ConvertedShadow2 =
1404 Fn, {ConvertedShadow2,
1405 MS.TrackOrigins && Origin ? Origin : (
Value *)IRB.
getInt32(0)});
1409 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1412 !MS.Recover, MS.ColdCallWeights);
1415 insertWarningFn(IRB, Origin);
1420 void materializeInstructionChecks(
1425 bool Combine = !MS.TrackOrigins;
1427 Value *Shadow =
nullptr;
1428 for (
const auto &ShadowData : InstructionChecks) {
1432 Value *ConvertedShadow = ShadowData.Shadow;
1434 if (
auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
1441 insertWarningFn(IRB, ShadowData.Origin);
1451 materializeOneCheck(IRB, ConvertedShadow, ShadowData.Origin);
1456 Shadow = ConvertedShadow;
1460 Shadow = convertToBool(Shadow, IRB,
"_mscmp");
1461 ConvertedShadow = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1462 Shadow = IRB.
CreateOr(Shadow, ConvertedShadow,
"_msor");
1468 materializeOneCheck(IRB, Shadow,
nullptr);
1472 void materializeChecks() {
1474 [](
const ShadowOriginAndInsertPoint &L,
1475 const ShadowOriginAndInsertPoint &R) {
1476 return L.OrigIns <
R.OrigIns;
1479 for (
auto I = InstrumentationList.begin();
1480 I != InstrumentationList.end();) {
1482 std::find_if(
I + 1, InstrumentationList.end(),
1483 [L =
I->OrigIns](
const ShadowOriginAndInsertPoint &R) {
1484 return L != R.OrigIns;
1498 MS.ParamTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1499 {Zero, IRB.getInt32(0)},
"param_shadow");
1500 MS.RetvalTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1501 {Zero, IRB.getInt32(1)},
"retval_shadow");
1502 MS.VAArgTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1503 {Zero, IRB.getInt32(2)},
"va_arg_shadow");
1504 MS.VAArgOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1505 {Zero, IRB.getInt32(3)},
"va_arg_origin");
1506 MS.VAArgOverflowSizeTLS =
1507 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1508 {Zero, IRB.getInt32(4)},
"va_arg_overflow_size");
1509 MS.ParamOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1510 {Zero, IRB.getInt32(5)},
"param_origin");
1511 MS.RetvalOriginTLS =
1512 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1513 {Zero, IRB.getInt32(6)},
"retval_origin");
1515 MS.MsanMetadataAlloca = IRB.
CreateAlloca(MS.MsanMetadata, 0u);
1527 for (
PHINode *PN : ShadowPHINodes) {
1528 PHINode *PNS = cast<PHINode>(getShadow(PN));
1529 PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr;
1530 size_t NumValues = PN->getNumIncomingValues();
1531 for (
size_t v = 0;
v < NumValues;
v++) {
1532 PNS->
addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
1534 PNO->
addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
1538 VAHelper->finalizeInstrumentation();
1542 if (InstrumentLifetimeStart) {
1543 for (
auto Item : LifetimeStartList) {
1544 instrumentAlloca(*Item.second, Item.first);
1545 AllocaSet.
remove(Item.second);
1551 instrumentAlloca(*AI);
1554 materializeChecks();
1558 materializeStores();
1564 Type *getShadowTy(
Value *V) {
return getShadowTy(
V->getType()); }
1576 if (
VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
1577 uint32_t EltSize =
DL.getTypeSizeInBits(VT->getElementType());
1579 VT->getElementCount());
1581 if (
ArrayType *AT = dyn_cast<ArrayType>(OrigTy)) {
1582 return ArrayType::get(getShadowTy(AT->getElementType()),
1583 AT->getNumElements());
1585 if (
StructType *ST = dyn_cast<StructType>(OrigTy)) {
1587 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
1588 Elements.push_back(getShadowTy(
ST->getElementType(i)));
1590 LLVM_DEBUG(
dbgs() <<
"getShadowTy: " << *ST <<
" ===> " << *Res <<
"\n");
1606 Value *ShadowBool = convertToBool(ShadowItem, IRB);
1608 if (Aggregator != FalseVal)
1609 Aggregator = IRB.
CreateOr(Aggregator, ShadowBool);
1611 Aggregator = ShadowBool;
1620 if (!
Array->getNumElements())
1624 Value *Aggregator = convertShadowToScalar(FirstItem, IRB);
1628 Value *ShadowInner = convertShadowToScalar(ShadowItem, IRB);
1629 Aggregator = IRB.
CreateOr(Aggregator, ShadowInner);
1639 return collapseStructShadow(
Struct, V, IRB);
1640 if (
ArrayType *Array = dyn_cast<ArrayType>(
V->getType()))
1641 return collapseArrayShadow(Array, V, IRB);
1642 if (isa<VectorType>(
V->getType())) {
1643 if (isa<ScalableVectorType>(
V->getType()))
1646 V->getType()->getPrimitiveSizeInBits().getFixedValue();
1654 Type *VTy =
V->getType();
1656 return convertToBool(convertShadowToScalar(V, IRB), IRB,
name);
1663 Type *ptrToIntPtrType(
Type *PtrTy)
const {
1664 if (
VectorType *VectTy = dyn_cast<VectorType>(PtrTy)) {
1665 return VectorType::get(ptrToIntPtrType(VectTy->getElementType()),
1666 VectTy->getElementCount());
1672 Type *getPtrToShadowPtrType(
Type *IntPtrTy,
Type *ShadowTy)
const {
1673 if (
VectorType *VectTy = dyn_cast<VectorType>(IntPtrTy)) {
1674 return VectorType::get(
1675 getPtrToShadowPtrType(VectTy->getElementType(), ShadowTy),
1676 VectTy->getElementCount());
1678 assert(IntPtrTy == MS.IntptrTy);
1679 return PointerType::get(*MS.C, 0);
1683 if (
VectorType *VectTy = dyn_cast<VectorType>(IntPtrTy)) {
1685 VectTy->getElementCount(), constToIntPtr(VectTy->getElementType(),
C));
1687 assert(IntPtrTy == MS.IntptrTy);
1699 Type *IntptrTy = ptrToIntPtrType(
Addr->getType());
1702 if (
uint64_t AndMask = MS.MapParams->AndMask)
1703 OffsetLong = IRB.
CreateAnd(OffsetLong, constToIntPtr(IntptrTy, ~AndMask));
1705 if (
uint64_t XorMask = MS.MapParams->XorMask)
1706 OffsetLong = IRB.
CreateXor(OffsetLong, constToIntPtr(IntptrTy, XorMask));
1718 std::pair<Value *, Value *>
1721 Type *IntptrTy = ptrToIntPtrType(
Addr->getType());
1722 Value *ShadowOffset = getShadowPtrOffset(
Addr, IRB);
1723 Value *ShadowLong = ShadowOffset;
1724 if (
uint64_t ShadowBase = MS.MapParams->ShadowBase) {
1726 IRB.
CreateAdd(ShadowLong, constToIntPtr(IntptrTy, ShadowBase));
1729 ShadowLong, getPtrToShadowPtrType(IntptrTy, ShadowTy));
1731 Value *OriginPtr =
nullptr;
1732 if (MS.TrackOrigins) {
1733 Value *OriginLong = ShadowOffset;
1734 uint64_t OriginBase = MS.MapParams->OriginBase;
1735 if (OriginBase != 0)
1737 IRB.
CreateAdd(OriginLong, constToIntPtr(IntptrTy, OriginBase));
1740 OriginLong = IRB.
CreateAnd(OriginLong, constToIntPtr(IntptrTy, ~Mask));
1743 OriginLong, getPtrToShadowPtrType(IntptrTy, MS.OriginTy));
1745 return std::make_pair(ShadowPtr, OriginPtr);
1748 template <
typename... ArgsTy>
1753 {MS.MsanMetadataAlloca, std::forward<ArgsTy>(Args)...});
1754 return IRB.
CreateLoad(MS.MsanMetadata, MS.MsanMetadataAlloca);
1757 return IRB.
CreateCall(Callee, {std::forward<ArgsTy>(Args)...});
1760 std::pair<Value *, Value *> getShadowOriginPtrKernelNoVec(
Value *
Addr,
1764 Value *ShadowOriginPtrs;
1772 ShadowOriginPtrs = createMetadataCall(IRB, Getter, AddrCast);
1775 ShadowOriginPtrs = createMetadataCall(
1777 isStore ? MS.MsanMetadataPtrForStoreN : MS.MsanMetadataPtrForLoadN,
1784 return std::make_pair(ShadowPtr, OriginPtr);
1790 std::pair<Value *, Value *> getShadowOriginPtrKernel(
Value *
Addr,
1797 return getShadowOriginPtrKernelNoVec(
Addr, IRB, ShadowTy,
isStore);
1801 unsigned NumElements = cast<FixedVectorType>(VectTy)->getNumElements();
1802 Value *ShadowPtrs = ConstantInt::getNullValue(
1804 Value *OriginPtrs =
nullptr;
1805 if (MS.TrackOrigins)
1806 OriginPtrs = ConstantInt::getNullValue(
1808 for (
unsigned i = 0; i < NumElements; ++i) {
1811 auto [ShadowPtr, OriginPtr] =
1812 getShadowOriginPtrKernelNoVec(OneAddr, IRB, ShadowTy,
isStore);
1816 if (MS.TrackOrigins)
1820 return {ShadowPtrs, OriginPtrs};
1827 if (MS.CompileKernel)
1828 return getShadowOriginPtrKernel(
Addr, IRB, ShadowTy,
isStore);
1829 return getShadowOriginPtrUserspace(
Addr, IRB, ShadowTy, Alignment);
1844 if (!MS.TrackOrigins)
1858 Value *getOriginPtrForRetval() {
1860 return MS.RetvalOriginTLS;
1865 assert(!ShadowMap.
count(V) &&
"Values may only have one shadow");
1866 ShadowMap[
V] = PropagateShadow ? SV : getCleanShadow(V);
1871 if (!MS.TrackOrigins)
1873 assert(!OriginMap.
count(V) &&
"Values may only have one origin");
1874 LLVM_DEBUG(
dbgs() <<
"ORIGIN: " << *V <<
" ==> " << *Origin <<
"\n");
1875 OriginMap[
V] = Origin;
1879 Type *ShadowTy = getShadowTy(OrigTy);
1889 Constant *getCleanShadow(
Value *V) {
return getCleanShadow(
V->getType()); }
1894 if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
1896 if (
ArrayType *AT = dyn_cast<ArrayType>(ShadowTy)) {
1898 getPoisonedShadow(AT->getElementType()));
1901 if (
StructType *ST = dyn_cast<StructType>(ShadowTy)) {
1903 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
1904 Vals.
push_back(getPoisonedShadow(
ST->getElementType(i)));
1912 Type *ShadowTy = getShadowTy(V);
1915 return getPoisonedShadow(ShadowTy);
1927 if (!PropagateShadow ||
I->getMetadata(LLVMContext::MD_nosanitize))
1928 return getCleanShadow(V);
1930 Value *Shadow = ShadowMap[
V];
1932 LLVM_DEBUG(
dbgs() <<
"No shadow: " << *V <<
"\n" << *(
I->getParent()));
1934 assert(Shadow &&
"No shadow for a value");
1938 if (
UndefValue *U = dyn_cast<UndefValue>(V)) {
1939 Value *
AllOnes = (PropagateShadow && PoisonUndef) ? getPoisonedShadow(V)
1940 : getCleanShadow(V);
1945 if (
Argument *
A = dyn_cast<Argument>(V)) {
1947 Value *&ShadowPtr = ShadowMap[
V];
1952 unsigned ArgOffset = 0;
1954 for (
auto &FArg :
F->args()) {
1955 if (!FArg.getType()->isSized()) {
1960 unsigned Size = FArg.hasByValAttr()
1961 ?
DL.getTypeAllocSize(FArg.getParamByValType())
1962 :
DL.getTypeAllocSize(FArg.getType());
1966 if (FArg.hasByValAttr()) {
1970 const Align ArgAlign =
DL.getValueOrABITypeAlignment(
1971 FArg.getParamAlign(), FArg.getParamByValType());
1972 Value *CpShadowPtr, *CpOriginPtr;
1973 std::tie(CpShadowPtr, CpOriginPtr) =
1974 getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign,
1976 if (!PropagateShadow || Overflow) {
1978 EntryIRB.CreateMemSet(
1982 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
1984 Value *Cpy = EntryIRB.CreateMemCpy(CpShadowPtr, CopyAlign,
Base,
1989 if (MS.TrackOrigins) {
1991 getOriginPtrForArgument(EntryIRB, ArgOffset);
1995 EntryIRB.CreateMemCpy(
2004 if (!PropagateShadow || Overflow || FArg.hasByValAttr() ||
2005 (MS.EagerChecks && FArg.hasAttribute(Attribute::NoUndef))) {
2006 ShadowPtr = getCleanShadow(V);
2007 setOrigin(
A, getCleanOrigin());
2010 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2011 ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg),
Base,
2013 if (MS.TrackOrigins) {
2015 getOriginPtrForArgument(EntryIRB, ArgOffset);
2016 setOrigin(
A, EntryIRB.CreateLoad(MS.OriginTy, OriginPtr));
2020 <<
" ARG: " << FArg <<
" ==> " << *ShadowPtr <<
"\n");
2026 assert(ShadowPtr &&
"Could not find shadow for an argument");
2030 return getCleanShadow(V);
2035 return getShadow(
I->getOperand(i));
2040 if (!MS.TrackOrigins)
2042 if (!PropagateShadow || isa<Constant>(V) || isa<InlineAsm>(V))
2043 return getCleanOrigin();
2044 assert((isa<Instruction>(V) || isa<Argument>(V)) &&
2045 "Unexpected value type in getOrigin()");
2047 if (
I->getMetadata(LLVMContext::MD_nosanitize))
2048 return getCleanOrigin();
2050 Value *Origin = OriginMap[
V];
2051 assert(Origin &&
"Missing origin");
2057 return getOrigin(
I->getOperand(i));
2070 LLVM_DEBUG(
dbgs() <<
"Skipping check of " << *Shadow <<
" before "
2071 << *OrigIns <<
"\n");
2076 assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy) ||
2077 isa<StructType>(ShadowTy) || isa<ArrayType>(ShadowTy)) &&
2078 "Can only insert checks for integer, vector, and aggregate shadow "
2081 InstrumentationList.push_back(
2082 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
2091 Value *Shadow, *Origin;
2093 Shadow = getShadow(Val);
2096 Origin = getOrigin(Val);
2098 Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
2101 Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
2103 insertShadowCheck(Shadow, Origin, OrigIns);
2108 case AtomicOrdering::NotAtomic:
2109 return AtomicOrdering::NotAtomic;
2110 case AtomicOrdering::Unordered:
2111 case AtomicOrdering::Monotonic:
2112 case AtomicOrdering::Release:
2113 return AtomicOrdering::Release;
2114 case AtomicOrdering::Acquire:
2115 case AtomicOrdering::AcquireRelease:
2116 return AtomicOrdering::AcquireRelease;
2117 case AtomicOrdering::SequentiallyConsistent:
2118 return AtomicOrdering::SequentiallyConsistent;
2124 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2125 uint32_t OrderingTable[NumOrderings] = {};
2127 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2128 OrderingTable[(
int)AtomicOrderingCABI::release] =
2129 (int)AtomicOrderingCABI::release;
2130 OrderingTable[(int)AtomicOrderingCABI::consume] =
2131 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2132 OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
2133 (
int)AtomicOrderingCABI::acq_rel;
2134 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2135 (
int)AtomicOrderingCABI::seq_cst;
2138 ArrayRef(OrderingTable, NumOrderings));
2143 case AtomicOrdering::NotAtomic:
2144 return AtomicOrdering::NotAtomic;
2145 case AtomicOrdering::Unordered:
2146 case AtomicOrdering::Monotonic:
2147 case AtomicOrdering::Acquire:
2148 return AtomicOrdering::Acquire;
2149 case AtomicOrdering::Release:
2150 case AtomicOrdering::AcquireRelease:
2151 return AtomicOrdering::AcquireRelease;
2152 case AtomicOrdering::SequentiallyConsistent:
2153 return AtomicOrdering::SequentiallyConsistent;
2159 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2160 uint32_t OrderingTable[NumOrderings] = {};
2162 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2163 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2164 OrderingTable[(int)AtomicOrderingCABI::consume] =
2165 (
int)AtomicOrderingCABI::acquire;
2166 OrderingTable[(int)AtomicOrderingCABI::release] =
2167 OrderingTable[(
int)AtomicOrderingCABI::acq_rel] =
2168 (int)AtomicOrderingCABI::acq_rel;
2169 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2170 (
int)AtomicOrderingCABI::seq_cst;
2173 ArrayRef(OrderingTable, NumOrderings));
2179 if (
I.getMetadata(LLVMContext::MD_nosanitize))
2182 if (isInPrologue(
I))
2192 assert(
I.getType()->isSized() &&
"Load type must have size");
2193 assert(!
I.getMetadata(LLVMContext::MD_nosanitize));
2194 NextNodeIRBuilder IRB(&
I);
2195 Type *ShadowTy = getShadowTy(&
I);
2197 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
2198 const Align Alignment =
I.getAlign();
2199 if (PropagateShadow) {
2200 std::tie(ShadowPtr, OriginPtr) =
2201 getShadowOriginPtr(
Addr, IRB, ShadowTy, Alignment,
false);
2205 setShadow(&
I, getCleanShadow(&
I));
2209 insertShadowCheck(
I.getPointerOperand(), &
I);
2214 if (MS.TrackOrigins) {
2215 if (PropagateShadow) {
2220 setOrigin(&
I, getCleanOrigin());
2230 StoreList.push_back(&
I);
2232 insertShadowCheck(
I.getPointerOperand(), &
I);
2236 assert(isa<AtomicRMWInst>(
I) || isa<AtomicCmpXchgInst>(
I));
2240 Value *Val =
I.getOperand(1);
2241 Value *ShadowPtr = getShadowOriginPtr(
Addr, IRB, getShadowTy(Val),
Align(1),
2246 insertShadowCheck(
Addr, &
I);
2251 if (isa<AtomicCmpXchgInst>(
I))
2252 insertShadowCheck(Val, &
I);
2256 setShadow(&
I, getCleanShadow(&
I));
2257 setOrigin(&
I, getCleanOrigin());
2272 insertShadowCheck(
I.getOperand(1), &
I);
2276 setOrigin(&
I, getOrigin(&
I, 0));
2280 insertShadowCheck(
I.getOperand(2), &
I);
2282 auto *Shadow0 = getShadow(&
I, 0);
2283 auto *Shadow1 = getShadow(&
I, 1);
2286 setOriginForNaryOp(
I);
2291 auto *Shadow0 = getShadow(&
I, 0);
2292 auto *Shadow1 = getShadow(&
I, 1);
2295 setOriginForNaryOp(
I);
2301 setShadow(&
I, IRB.
CreateSExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2302 setOrigin(&
I, getOrigin(&
I, 0));
2307 setShadow(&
I, IRB.
CreateZExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2308 setOrigin(&
I, getOrigin(&
I, 0));
2313 setShadow(&
I, IRB.
CreateTrunc(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2314 setOrigin(&
I, getOrigin(&
I, 0));
2321 if (
auto *CI = dyn_cast<CallInst>(
I.getOperand(0)))
2322 if (CI->isMustTailCall())
2326 setOrigin(&
I, getOrigin(&
I, 0));
2332 "_msprop_ptrtoint"));
2333 setOrigin(&
I, getOrigin(&
I, 0));
2339 "_msprop_inttoptr"));
2340 setOrigin(&
I, getOrigin(&
I, 0));
2343 void visitFPToSIInst(
CastInst &
I) { handleShadowOr(
I); }
2344 void visitFPToUIInst(
CastInst &
I) { handleShadowOr(
I); }
2345 void visitSIToFPInst(
CastInst &
I) { handleShadowOr(
I); }
2346 void visitUIToFPInst(
CastInst &
I) { handleShadowOr(
I); }
2347 void visitFPExtInst(
CastInst &
I) { handleShadowOr(
I); }
2348 void visitFPTruncInst(
CastInst &
I) { handleShadowOr(
I); }
2362 Value *S1 = getShadow(&
I, 0);
2363 Value *S2 = getShadow(&
I, 1);
2364 Value *V1 =
I.getOperand(0);
2373 setShadow(&
I, IRB.
CreateOr({S1S2, V1S2, S1V2}));
2374 setOriginForNaryOp(
I);
2384 Value *S1 = getShadow(&
I, 0);
2385 Value *S2 = getShadow(&
I, 1);
2395 setShadow(&
I, IRB.
CreateOr({S1S2, V1S2, S1V2}));
2396 setOriginForNaryOp(
I);
2414 template <
bool CombineShadow>
class Combiner {
2415 Value *Shadow =
nullptr;
2416 Value *Origin =
nullptr;
2418 MemorySanitizerVisitor *MSV;
2422 : IRB(IRB), MSV(MSV) {}
2426 if (CombineShadow) {
2431 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
2432 Shadow = IRB.
CreateOr(Shadow, OpShadow,
"_msprop");
2436 if (MSV->MS.TrackOrigins) {
2441 Constant *ConstOrigin = dyn_cast<Constant>(OpOrigin);
2443 if (!ConstOrigin || !ConstOrigin->
isNullValue()) {
2444 Value *
Cond = MSV->convertToBool(OpShadow, IRB);
2454 Value *OpShadow = MSV->getShadow(V);
2455 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) :
nullptr;
2456 return Add(OpShadow, OpOrigin);
2462 if (CombineShadow) {
2464 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(
I));
2465 MSV->setShadow(
I, Shadow);
2467 if (MSV->MS.TrackOrigins) {
2469 MSV->setOrigin(
I, Origin);
2479 if (!MS.TrackOrigins)
2482 OriginCombiner
OC(
this, IRB);
2483 for (
Use &
Op :
I.operands())
2488 size_t VectorOrPrimitiveTypeSizeInBits(
Type *Ty) {
2490 "Vector of pointers is not a valid shadow type");
2491 return Ty->
isVectorTy() ? cast<FixedVectorType>(Ty)->getNumElements() *
2500 Type *srcTy =
V->getType();
2501 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
2502 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
2503 if (srcSizeInBits > 1 && dstSizeInBits == 1)
2509 cast<VectorType>(dstTy)->getElementCount() ==
2510 cast<VectorType>(srcTy)->getElementCount())
2521 Type *ShadowTy = getShadowTy(V);
2522 if (
V->getType() == ShadowTy)
2524 if (
V->getType()->isPtrOrPtrVectorTy())
2533 ShadowAndOriginCombiner
SC(
this, IRB);
2534 for (
Use &
Op :
I.operands())
2554 if (
auto *VTy = dyn_cast<VectorType>(Ty)) {
2555 unsigned NumElements = cast<FixedVectorType>(VTy)->getNumElements();
2556 Type *EltTy = VTy->getElementType();
2558 for (
unsigned Idx = 0;
Idx < NumElements; ++
Idx) {
2561 const APInt &
V = Elt->getValue();
2570 if (
ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg)) {
2571 const APInt &
V = Elt->getValue();
2581 IRB.
CreateMul(getShadow(OtherArg), ShadowMul,
"msprop_mul_cst"));
2582 setOrigin(&
I, getOrigin(OtherArg));
2586 Constant *constOp0 = dyn_cast<Constant>(
I.getOperand(0));
2587 Constant *constOp1 = dyn_cast<Constant>(
I.getOperand(1));
2588 if (constOp0 && !constOp1)
2589 handleMulByConstant(
I, constOp0,
I.getOperand(1));
2590 else if (constOp1 && !constOp0)
2591 handleMulByConstant(
I, constOp1,
I.getOperand(0));
2606 insertShadowCheck(
I.getOperand(1), &
I);
2607 setShadow(&
I, getShadow(&
I, 0));
2608 setOrigin(&
I, getOrigin(&
I, 0));
2625 void handleEqualityComparison(
ICmpInst &
I) {
2629 Value *Sa = getShadow(
A);
2630 Value *Sb = getShadow(
B);
2656 setOriginForNaryOp(
I);
2698 void handleRelationalComparisonExact(
ICmpInst &
I) {
2702 Value *Sa = getShadow(
A);
2703 Value *Sb = getShadow(
B);
2714 bool IsSigned =
I.isSigned();
2716 getLowestPossibleValue(IRB,
A, Sa, IsSigned),
2717 getHighestPossibleValue(IRB,
B, Sb, IsSigned));
2719 getHighestPossibleValue(IRB,
A, Sa, IsSigned),
2720 getLowestPossibleValue(IRB,
B, Sb, IsSigned));
2723 setOriginForNaryOp(
I);
2730 void handleSignedRelationalComparison(
ICmpInst &
I) {
2734 if ((constOp = dyn_cast<Constant>(
I.getOperand(1)))) {
2735 op =
I.getOperand(0);
2736 pre =
I.getPredicate();
2737 }
else if ((constOp = dyn_cast<Constant>(
I.getOperand(0)))) {
2738 op =
I.getOperand(1);
2739 pre =
I.getSwappedPredicate();
2752 setShadow(&
I, Shadow);
2753 setOrigin(&
I, getOrigin(
op));
2764 if (
I.isEquality()) {
2765 handleEqualityComparison(
I);
2771 handleRelationalComparisonExact(
I);
2775 handleSignedRelationalComparison(
I);
2780 if ((isa<Constant>(
I.getOperand(0)) || isa<Constant>(
I.getOperand(1)))) {
2781 handleRelationalComparisonExact(
I);
2788 void visitFCmpInst(
FCmpInst &
I) { handleShadowOr(
I); }
2794 Value *S1 = getShadow(&
I, 0);
2795 Value *S2 = getShadow(&
I, 1);
2800 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
2801 setOriginForNaryOp(
I);
2812 Value *S0 = getShadow(&
I, 0);
2813 Value *S1 = getShadow(&
I, 1);
2814 Value *S2 = getShadow(&
I, 2);
2819 I.getModule(),
I.getIntrinsicID(), S2Conv->
getType());
2821 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
2822 setOriginForNaryOp(
I);
2836 getShadow(
I.getArgOperand(1));
2840 {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
2841 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
2842 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2843 I.eraseFromParent();
2861 getShadow(
I.getArgOperand(1));
2865 {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
2866 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
2867 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2868 I.eraseFromParent();
2876 {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
2877 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
2878 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2879 I.eraseFromParent();
2882 void visitVAStartInst(
VAStartInst &
I) { VAHelper->visitVAStartInst(
I); }
2884 void visitVACopyInst(
VACopyInst &
I) { VAHelper->visitVACopyInst(
I); }
2893 Value *Shadow = getShadow(&
I, 1);
2894 Value *ShadowPtr, *OriginPtr;
2898 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
2903 insertShadowCheck(
Addr, &
I);
2906 if (MS.TrackOrigins)
2919 Type *ShadowTy = getShadowTy(&
I);
2920 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
2921 if (PropagateShadow) {
2925 std::tie(ShadowPtr, OriginPtr) =
2926 getShadowOriginPtr(
Addr, IRB, ShadowTy, Alignment,
false);
2930 setShadow(&
I, getCleanShadow(&
I));
2934 insertShadowCheck(
Addr, &
I);
2936 if (MS.TrackOrigins) {
2937 if (PropagateShadow)
2938 setOrigin(&
I, IRB.
CreateLoad(MS.OriginTy, OriginPtr));
2940 setOrigin(&
I, getCleanOrigin());
2953 if (!(
RetTy->isIntOrIntVectorTy() ||
RetTy->isFPOrFPVectorTy() ||
2954 RetTy->isX86_MMXTy()))
2957 unsigned NumArgOperands =
I.arg_size();
2958 for (
unsigned i = 0; i < NumArgOperands; ++i) {
2959 Type *Ty =
I.getArgOperand(i)->getType();
2965 ShadowAndOriginCombiner
SC(
this, IRB);
2966 for (
unsigned i = 0; i < NumArgOperands; ++i)
2967 SC.Add(
I.getArgOperand(i));
2984 unsigned NumArgOperands =
I.arg_size();
2985 if (NumArgOperands == 0)
2988 if (NumArgOperands == 2 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
2989 I.getArgOperand(1)->getType()->isVectorTy() &&
2990 I.getType()->isVoidTy() && !
I.onlyReadsMemory()) {
2992 return handleVectorStoreIntrinsic(
I);
2995 if (NumArgOperands == 1 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
2996 I.getType()->isVectorTy() &&
I.onlyReadsMemory()) {
2998 return handleVectorLoadIntrinsic(
I);
3001 if (
I.doesNotAccessMemory())
3002 if (maybeHandleSimpleNomemIntrinsic(
I))
3010 setShadow(&
I, getShadow(&
I, 0));
3011 setOrigin(&
I, getOrigin(&
I, 0));
3019 InstrumentLifetimeStart =
false;
3020 LifetimeStartList.push_back(std::make_pair(&
I, AI));
3026 Type *OpType =
Op->getType();
3028 F.getParent(), Intrinsic::bswap,
ArrayRef(&OpType, 1));
3030 setOrigin(&
I, getOrigin(
Op));
3035 Value *Src =
I.getArgOperand(0);
3041 Constant *IsZeroPoison = cast<Constant>(
I.getOperand(1));
3044 BoolShadow = IRB.
CreateOr(BoolShadow, BoolZeroPoison,
"_mscz_bs");
3047 Value *OutputShadow =
3048 IRB.
CreateSExt(BoolShadow, getShadowTy(Src),
"_mscz_os");
3050 setShadow(&
I, OutputShadow);
3051 setOriginForNaryOp(
I);
3069 void handleVectorConvertIntrinsic(
IntrinsicInst &
I,
int NumUsedElements,
3070 bool HasRoundingMode =
false) {
3072 Value *CopyOp, *ConvertOp;
3074 assert((!HasRoundingMode ||
3075 isa<ConstantInt>(
I.getArgOperand(
I.arg_size() - 1))) &&
3076 "Invalid rounding mode");
3078 switch (
I.arg_size() - HasRoundingMode) {
3080 CopyOp =
I.getArgOperand(0);
3081 ConvertOp =
I.getArgOperand(1);
3084 ConvertOp =
I.getArgOperand(0);
3098 Value *ConvertShadow = getShadow(ConvertOp);
3099 Value *AggShadow =
nullptr;
3103 for (
int i = 1; i < NumUsedElements; ++i) {
3106 AggShadow = IRB.
CreateOr(AggShadow, MoreShadow);
3109 AggShadow = ConvertShadow;
3112 insertShadowCheck(AggShadow, getOrigin(ConvertOp), &
I);
3119 Value *ResultShadow = getShadow(CopyOp);
3120 Type *EltTy = cast<VectorType>(ResultShadow->
getType())->getElementType();
3121 for (
int i = 0; i < NumUsedElements; ++i) {
3123 ResultShadow, ConstantInt::getNullValue(EltTy),
3126 setShadow(&
I, ResultShadow);
3127 setOrigin(&
I, getOrigin(CopyOp));
3129 setShadow(&
I, getCleanShadow(&
I));
3130 setOrigin(&
I, getCleanOrigin());
3138 S = CreateShadowCast(IRB, S, IRB.
getInt64Ty(),
true);
3141 return CreateShadowCast(IRB, S2,
T,
true);
3149 return CreateShadowCast(IRB, S2,
T,
true);
3166 void handleVectorShiftIntrinsic(
IntrinsicInst &
I,
bool Variable) {
3171 Value *S1 = getShadow(&
I, 0);
3172 Value *S2 = getShadow(&
I, 1);
3173 Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2)
3174 : Lower64ShadowExtend(IRB, S2, getShadowTy(&
I));
3175 Value *V1 =
I.getOperand(0);
3178 {IRB.CreateBitCast(S1, V1->getType()), V2});
3180 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3181 setOriginForNaryOp(
I);
3185 Type *getMMXVectorTy(
unsigned EltSizeInBits) {
3186 const unsigned X86_MMXSizeInBits = 64;
3187 assert(EltSizeInBits != 0 && (X86_MMXSizeInBits % EltSizeInBits) == 0 &&
3188 "Illegal MMX vector element size");
3190 X86_MMXSizeInBits / EltSizeInBits);
3197 case Intrinsic::x86_sse2_packsswb_128:
3198 case Intrinsic::x86_sse2_packuswb_128:
3199 return Intrinsic::x86_sse2_packsswb_128;
3201 case Intrinsic::x86_sse2_packssdw_128:
3202 case Intrinsic::x86_sse41_packusdw:
3203 return Intrinsic::x86_sse2_packssdw_128;
3205 case Intrinsic::x86_avx2_packsswb:
3206 case Intrinsic::x86_avx2_packuswb:
3207 return Intrinsic::x86_avx2_packsswb;
3209 case Intrinsic::x86_avx2_packssdw:
3210 case Intrinsic::x86_avx2_packusdw:
3211 return Intrinsic::x86_avx2_packssdw;
3213 case Intrinsic::x86_mmx_packsswb:
3214 case Intrinsic::x86_mmx_packuswb:
3215 return Intrinsic::x86_mmx_packsswb;
3217 case Intrinsic::x86_mmx_packssdw:
3218 return Intrinsic::x86_mmx_packssdw;
3231 void handleVectorPackIntrinsic(
IntrinsicInst &
I,
unsigned EltSizeInBits = 0) {
3233 bool isX86_MMX =
I.getOperand(0)->getType()->isX86_MMXTy();
3235 Value *S1 = getShadow(&
I, 0);
3236 Value *S2 = getShadow(&
I, 1);
3242 Type *
T = isX86_MMX ? getMMXVectorTy(EltSizeInBits) : S1->
getType();
3258 F.getParent(), getSignedPackIntrinsic(
I.getIntrinsicID()));
3261 IRB.
CreateCall(ShadowFn, {S1_ext, S2_ext},
"_msprop_vector_pack");
3265 setOriginForNaryOp(
I);
3270 const unsigned SignificantBitsPerResultElement = 16;
3271 bool isX86_MMX =
I.getOperand(0)->getType()->isX86_MMXTy();
3273 unsigned ZeroBitsPerResultElement =
3277 auto *Shadow0 = getShadow(&
I, 0);
3278 auto *Shadow1 = getShadow(&
I, 1);
3283 S = IRB.
CreateLShr(S, ZeroBitsPerResultElement);
3286 setOriginForNaryOp(
I);
3291 unsigned EltSizeInBits = 0) {
3292 bool isX86_MMX =
I.getOperand(0)->getType()->isX86_MMXTy();
3293 Type *ResTy = isX86_MMX ? getMMXVectorTy(EltSizeInBits * 2) :
I.
getType();
3295 auto *Shadow0 = getShadow(&
I, 0);
3296 auto *Shadow1 = getShadow(&
I, 1);
3303 setOriginForNaryOp(
I);
3311 Type *ResTy = getShadowTy(&
I);
3312 auto *Shadow0 = getShadow(&
I, 0);
3313 auto *Shadow1 = getShadow(&
I, 1);
3318 setOriginForNaryOp(
I);
3326 auto *Shadow0 = getShadow(&
I, 0);
3327 auto *Shadow1 = getShadow(&
I, 1);
3329 Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&
I));
3331 setOriginForNaryOp(
I);
3340 setOrigin(&
I, getOrigin(&
I, 0));
3348 Value *OperandShadow = getShadow(&
I, 0);
3350 Value *OperandUnsetOrPoison = IRB.
CreateOr(OperandUnsetBits, OperandShadow);
3358 setOrigin(&
I, getOrigin(&
I, 0));
3366 Value *OperandShadow = getShadow(&
I, 0);
3367 Value *OperandSetOrPoison = IRB.
CreateOr(
I.getOperand(0), OperandShadow);
3375 setOrigin(&
I, getOrigin(&
I, 0));
3384 getShadowOriginPtr(
Addr, IRB, Ty,
Align(1),
true).first;
3390 insertShadowCheck(
Addr, &
I);
3401 Value *ShadowPtr, *OriginPtr;
3402 std::tie(ShadowPtr, OriginPtr) =
3403 getShadowOriginPtr(
Addr, IRB, Ty, Alignment,
false);
3406 insertShadowCheck(
Addr, &
I);
3409 Value *Origin = MS.TrackOrigins ? IRB.
CreateLoad(MS.OriginTy, OriginPtr)
3411 insertShadowCheck(Shadow, Origin, &
I);
3418 Value *PassThru =
I.getArgOperand(2);
3421 insertShadowCheck(
Ptr, &
I);
3422 insertShadowCheck(Mask, &
I);
3425 if (!PropagateShadow) {
3426 setShadow(&
I, getCleanShadow(&
I));
3427 setOrigin(&
I, getCleanOrigin());
3431 Type *ShadowTy = getShadowTy(&
I);
3432 Type *ElementShadowTy = cast<VectorType>(ShadowTy)->getElementType();
3433 auto [ShadowPtr, OriginPtr] =
3434 getShadowOriginPtr(
Ptr, IRB, ElementShadowTy, {},
false);
3437 ShadowTy, ShadowPtr, Mask, getShadow(PassThru),
"_msmaskedexpload");
3439 setShadow(&
I, Shadow);
3442 setOrigin(&
I, getCleanOrigin());
3447 Value *Values =
I.getArgOperand(0);
3452 insertShadowCheck(
Ptr, &
I);
3453 insertShadowCheck(Mask, &
I);
3456 Value *Shadow = getShadow(Values);
3457 Type *ElementShadowTy =
3458 getShadowTy(cast<VectorType>(Values->
getType())->getElementType());
3459 auto [ShadowPtr, OriginPtrs] =
3460 getShadowOriginPtr(
Ptr, IRB, ElementShadowTy, {},
true);
3469 Value *Ptrs =
I.getArgOperand(0);
3470 const Align Alignment(
3471 cast<ConstantInt>(
I.getArgOperand(1))->getZExtValue());
3473 Value *PassThru =
I.getArgOperand(3);
3475 Type *PtrsShadowTy = getShadowTy(Ptrs);
3477 insertShadowCheck(Mask, &
I);
3481 insertShadowCheck(MaskedPtrShadow, getOrigin(Ptrs), &
I);
3484 if (!PropagateShadow) {
3485 setShadow(&
I, getCleanShadow(&
I));
3486 setOrigin(&
I, getCleanOrigin());
3490 Type *ShadowTy = getShadowTy(&
I);
3491 Type *ElementShadowTy = cast<VectorType>(ShadowTy)->getElementType();
3492 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
3493 Ptrs, IRB, ElementShadowTy, Alignment,
false);
3497 getShadow(PassThru),
"_msmaskedgather");
3499 setShadow(&
I, Shadow);
3502 setOrigin(&
I, getCleanOrigin());
3507 Value *Values =
I.getArgOperand(0);
3508 Value *Ptrs =
I.getArgOperand(1);
3509 const Align Alignment(
3510 cast<ConstantInt>(
I.getArgOperand(2))->getZExtValue());
3513 Type *PtrsShadowTy = getShadowTy(Ptrs);
3515 insertShadowCheck(Mask, &
I);
3519 insertShadowCheck(MaskedPtrShadow, getOrigin(Ptrs), &
I);
3522 Value *Shadow = getShadow(Values);
3523 Type *ElementShadowTy =
3524 getShadowTy(cast<VectorType>(Values->
getType())->getElementType());
3525 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
3526 Ptrs, IRB, ElementShadowTy, Alignment,
true);
3535 Value *
V =
I.getArgOperand(0);
3537 const Align Alignment(
3538 cast<ConstantInt>(
I.getArgOperand(2))->getZExtValue());
3540 Value *Shadow = getShadow(V);
3543 insertShadowCheck(
Ptr, &
I);
3544 insertShadowCheck(Mask, &
I);
3549 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
3550 Ptr, IRB, Shadow->
getType(), Alignment,
true);
3554 if (!MS.TrackOrigins)
3557 auto &
DL =
F.getParent()->getDataLayout();
3558 paintOrigin(IRB, getOrigin(V), OriginPtr,
3566 const Align Alignment(
3567 cast<ConstantInt>(
I.getArgOperand(1))->getZExtValue());
3569 Value *PassThru =
I.getArgOperand(3);
3572 insertShadowCheck(
Ptr, &
I);
3573 insertShadowCheck(Mask, &
I);
3576 if (!PropagateShadow) {
3577 setShadow(&
I, getCleanShadow(&
I));
3578 setOrigin(&
I, getCleanOrigin());
3582 Type *ShadowTy = getShadowTy(&
I);
3583 Value *ShadowPtr, *OriginPtr;
3584 std::tie(ShadowPtr, OriginPtr) =
3585 getShadowOriginPtr(
Ptr, IRB, ShadowTy, Alignment,
false);
3587 getShadow(PassThru),
"_msmaskedld"));
3589 if (!MS.TrackOrigins)
3596 Value *NotNull = convertToBool(MaskedPassThruShadow, IRB,
"_mscmp");
3601 setOrigin(&
I, Origin);
3611 Type *ShadowTy = getShadowTy(&
I);
3614 Value *SMask = getShadow(&
I, 1);
3619 {getShadow(&I, 0), I.getOperand(1)});
3622 setOriginForNaryOp(
I);
3627 for (
unsigned X = OddElements ? 1 : 0;
X <
Width;
X += 2) {
3644 cast<FixedVectorType>(
I.getArgOperand(0)->getType())->getNumElements();
3645 assert(isa<ConstantInt>(
I.getArgOperand(2)) &&
3646 "pclmul 3rd operand must be a constant");
3647 unsigned Imm = cast<ConstantInt>(
I.getArgOperand(2))->getZExtValue();
3649 getPclmulMask(Width, Imm & 0x01));
3651 getPclmulMask(Width, Imm & 0x10));
3652 ShadowAndOriginCombiner SOC(
this, IRB);
3653 SOC.Add(Shuf0, getOrigin(&
I, 0));
3654 SOC.Add(Shuf1, getOrigin(&
I, 1));
3662 cast<FixedVectorType>(
I.getArgOperand(0)->getType())->getNumElements();
3664 Value *Second = getShadow(&
I, 1);
3667 Mask.push_back(Width);
3668 for (
unsigned i = 1; i <
Width; i++)
3672 setShadow(&
I, Shadow);
3673 setOriginForNaryOp(
I);
3678 Value *Shadow0 = getShadow(&
I, 0);
3679 Value *Shadow1 = getShadow(&
I, 1);
3685 setShadow(&
I, Shadow);
3686 setOriginForNaryOp(
I);
3692 cast<FixedVectorType>(
I.getArgOperand(0)->getType())->getNumElements();
3694 Value *Second = getShadow(&
I, 1);
3698 Mask.push_back(Width);
3699 for (
unsigned i = 1; i <
Width; i++)
3703 setShadow(&
I, Shadow);
3704 setOriginForNaryOp(
I);
3711 assert(
I.getType()->isIntOrIntVectorTy());
3712 assert(
I.getArgOperand(0)->getType() ==
I.getType());
3716 setShadow(&
I, getShadow(&
I, 0));
3717 setOrigin(&
I, getOrigin(&
I, 0));
3722 Value *Shadow = getShadow(&
I, 0);
3723 setShadow(&
I, IRB.
CreateICmpNE(Shadow, getCleanShadow(Shadow)));
3724 setOrigin(&
I, getOrigin(&
I, 0));
3728 switch (
I.getIntrinsicID()) {
3729 case Intrinsic::abs:
3730 handleAbsIntrinsic(
I);
3732 case Intrinsic::is_fpclass:
3735 case Intrinsic::lifetime_start:
3736 handleLifetimeStart(
I);
3738 case Intrinsic::launder_invariant_group:
3739 case Intrinsic::strip_invariant_group:
3740 handleInvariantGroup(
I);
3742 case Intrinsic::bswap:
3745 case Intrinsic::ctlz:
3746 case Intrinsic::cttz:
3747 handleCountZeroes(
I);
3749 case Intrinsic::masked_compressstore:
3750 handleMaskedCompressStore(
I);
3752 case Intrinsic::masked_expandload:
3753 handleMaskedExpandLoad(
I);
3755 case Intrinsic::masked_gather:
3756 handleMaskedGather(
I);
3758 case Intrinsic::masked_scatter:
3759 handleMaskedScatter(
I);
3761 case Intrinsic::masked_store:
3762 handleMaskedStore(
I);
3764 case Intrinsic::masked_load:
3765 handleMaskedLoad(
I);
3767 case Intrinsic::vector_reduce_and:
3768 handleVectorReduceAndIntrinsic(
I);
3770 case Intrinsic::vector_reduce_or:
3771 handleVectorReduceOrIntrinsic(
I);
3773 case Intrinsic::vector_reduce_add:
3774 case Intrinsic::vector_reduce_xor:
3775 case Intrinsic::vector_reduce_mul:
3776 handleVectorReduceIntrinsic(
I);
3778 case Intrinsic::x86_sse_stmxcsr:
3781 case Intrinsic::x86_sse_ldmxcsr:
3784 case Intrinsic::x86_avx512_vcvtsd2usi64:
3785 case Intrinsic::x86_avx512_vcvtsd2usi32:
3786 case Intrinsic::x86_avx512_vcvtss2usi64:
3787 case Intrinsic::x86_avx512_vcvtss2usi32:
3788 case Intrinsic::x86_avx512_cvttss2usi64:
3789 case Intrinsic::x86_avx512_cvttss2usi:
3790 case Intrinsic::x86_avx512_cvttsd2usi64:
3791 case Intrinsic::x86_avx512_cvttsd2usi:
3792 case Intrinsic::x86_avx512_cvtusi2ss:
3793 case Intrinsic::x86_avx512_cvtusi642sd:
3794 case Intrinsic::x86_avx512_cvtusi642ss:
3795 handleVectorConvertIntrinsic(
I, 1,
true);
3797 case Intrinsic::x86_sse2_cvtsd2si64:
3798 case Intrinsic::x86_sse2_cvtsd2si:
3799 case Intrinsic::x86_sse2_cvtsd2ss:
3800 case Intrinsic::x86_sse2_cvttsd2si64:
3801 case Intrinsic::x86_sse2_cvttsd2si:
3802 case Intrinsic::x86_sse_cvtss2si64:
3803 case Intrinsic::x86_sse_cvtss2si:
3804 case Intrinsic::x86_sse_cvttss2si64:
3805 case Intrinsic::x86_sse_cvttss2si:
3806 handleVectorConvertIntrinsic(
I, 1);
3808 case Intrinsic::x86_sse_cvtps2pi:
3809 case Intrinsic::x86_sse_cvttps2pi:
3810 handleVectorConvertIntrinsic(
I, 2);
3813 case Intrinsic::x86_avx512_psll_w_512:
3814 case Intrinsic::x86_avx512_psll_d_512:
3815 case Intrinsic::x86_avx512_psll_q_512:
3816 case Intrinsic::x86_avx512_pslli_w_512:
3817 case Intrinsic::x86_avx512_pslli_d_512:
3818 case Intrinsic::x86_avx512_pslli_q_512:
3819 case Intrinsic::x86_avx512_psrl_w_512:
3820 case Intrinsic::x86_avx512_psrl_d_512:
3821 case Intrinsic::x86_avx512_psrl_q_512:
3822 case Intrinsic::x86_avx512_psra_w_512:
3823 case Intrinsic::x86_avx512_psra_d_512:
3824 case Intrinsic::x86_avx512_psra_q_512:
3825 case Intrinsic::x86_avx512_psrli_w_512:
3826 case Intrinsic::x86_avx512_psrli_d_512:
3827 case Intrinsic::x86_avx512_psrli_q_512:
3828 case Intrinsic::x86_avx512_psrai_w_512:
3829 case Intrinsic::x86_avx512_psrai_d_512:
3830 case Intrinsic::x86_avx512_psrai_q_512:
3831 case Intrinsic::x86_avx512_psra_q_256:
3832 case Intrinsic::x86_avx512_psra_q_128:
3833 case Intrinsic::x86_avx512_psrai_q_256:
3834 case Intrinsic::x86_avx512_psrai_q_128:
3835 case Intrinsic::x86_avx2_psll_w:
3836 case Intrinsic::x86_avx2_psll_d:
3837 case Intrinsic::x86_avx2_psll_q:
3838 case Intrinsic::x86_avx2_pslli_w:
3839 case Intrinsic::x86_avx2_pslli_d:
3840 case Intrinsic::x86_avx2_pslli_q:
3841 case Intrinsic::x86_avx2_psrl_w:
3842 case Intrinsic::x86_avx2_psrl_d:
3843 case Intrinsic::x86_avx2_psrl_q:
3844 case Intrinsic::x86_avx2_psra_w:
3845 case Intrinsic::x86_avx2_psra_d:
3846 case Intrinsic::x86_avx2_psrli_w:
3847 case Intrinsic::x86_avx2_psrli_d:
3848 case Intrinsic::x86_avx2_psrli_q:
3849 case Intrinsic::x86_avx2_psrai_w:
3850 case Intrinsic::x86_avx2_psrai_d:
3851 case Intrinsic::x86_sse2_psll_w:
3852 case Intrinsic::x86_sse2_psll_d:
3853 case Intrinsic::x86_sse2_psll_q:
3854 case Intrinsic::x86_sse2_pslli_w:
3855 case Intrinsic::x86_sse2_pslli_d:
3856 case Intrinsic::x86_sse2_pslli_q:
3857 case Intrinsic::x86_sse2_psrl_w:
3858 case Intrinsic::x86_sse2_psrl_d:
3859 case Intrinsic::x86_sse2_psrl_q:
3860 case Intrinsic::x86_sse2_psra_w:
3861 case Intrinsic::x86_sse2_psra_d:
3862 case Intrinsic::x86_sse2_psrli_w:
3863 case Intrinsic::x86_sse2_psrli_d:
3864 case Intrinsic::x86_sse2_psrli_q:
3865 case Intrinsic::x86_sse2_psrai_w:
3866 case Intrinsic::x86_sse2_psrai_d:
3867 case Intrinsic::x86_mmx_psll_w:
3868 case Intrinsic::x86_mmx_psll_d:
3869 case Intrinsic::x86_mmx_psll_q:
3870 case Intrinsic::x86_mmx_pslli_w:
3871 case Intrinsic::x86_mmx_pslli_d:
3872 case Intrinsic::x86_mmx_pslli_q:
3873 case Intrinsic::x86_mmx_psrl_w:
3874 case Intrinsic::x86_mmx_psrl_d:
3875 case Intrinsic::x86_mmx_psrl_q:
3876 case Intrinsic::x86_mmx_psra_w:
3877 case Intrinsic::x86_mmx_psra_d:
3878 case Intrinsic::x86_mmx_psrli_w:
3879 case Intrinsic::x86_mmx_psrli_d:
3880 case Intrinsic::x86_mmx_psrli_q:
3881 case Intrinsic::x86_mmx_psrai_w:
3882 case Intrinsic::x86_mmx_psrai_d:
3883 handleVectorShiftIntrinsic(
I,
false);
3885 case Intrinsic::x86_avx2_psllv_d:
3886 case Intrinsic::x86_avx2_psllv_d_256:
3887 case Intrinsic::x86_avx512_psllv_d_512:
3888 case Intrinsic::x86_avx2_psllv_q:
3889 case Intrinsic::x86_avx2_psllv_q_256:
3890 case Intrinsic::x86_avx512_psllv_q_512:
3891 case Intrinsic::x86_avx2_psrlv_d:
3892 case Intrinsic::x86_avx2_psrlv_d_256:
3893 case Intrinsic::x86_avx512_psrlv_d_512:
3894 case Intrinsic::x86_avx2_psrlv_q:
3895 case Intrinsic::x86_avx2_psrlv_q_256:
3896 case Intrinsic::x86_avx512_psrlv_q_512:
3897 case Intrinsic::x86_avx2_psrav_d:
3898 case Intrinsic::x86_avx2_psrav_d_256:
3899 case Intrinsic::x86_avx512_psrav_d_512:
3900 case Intrinsic::x86_avx512_psrav_q_128:
3901 case Intrinsic::x86_avx512_psrav_q_256:
3902 case Intrinsic::x86_avx512_psrav_q_512:
3903 handleVectorShiftIntrinsic(
I,
true);
3906 case Intrinsic::x86_sse2_packsswb_128:
3907 case Intrinsic::x86_sse2_packssdw_128:
3908 case Intrinsic::x86_sse2_packuswb_128:
3909 case Intrinsic::x86_sse41_packusdw:
3910 case Intrinsic::x86_avx2_packsswb:
3911 case Intrinsic::x86_avx2_packssdw:
3912 case Intrinsic::x86_avx2_packuswb:
3913 case Intrinsic::x86_avx2_packusdw:
3914 handleVectorPackIntrinsic(
I);
3917 case Intrinsic::x86_mmx_packsswb:
3918 case Intrinsic::x86_mmx_packuswb:
3919 handleVectorPackIntrinsic(
I, 16);
3922 case Intrinsic::x86_mmx_packssdw:
3923 handleVectorPackIntrinsic(
I, 32);
3926 case Intrinsic::x86_mmx_psad_bw:
3927 case Intrinsic::x86_sse2_psad_bw:
3928 case Intrinsic::x86_avx2_psad_bw:
3929 handleVectorSadIntrinsic(
I);
3932 case Intrinsic::x86_sse2_pmadd_wd:
3933 case Intrinsic::x86_avx2_pmadd_wd:
3934 case Intrinsic::x86_ssse3_pmadd_ub_sw_128:
3935 case Intrinsic::x86_avx2_pmadd_ub_sw:
3936 handleVectorPmaddIntrinsic(
I);
3939 case Intrinsic::x86_ssse3_pmadd_ub_sw:
3940 handleVectorPmaddIntrinsic(
I, 8);
3943 case Intrinsic::x86_mmx_pmadd_wd:
3944 handleVectorPmaddIntrinsic(
I, 16);
3947 case Intrinsic::x86_sse_cmp_ss:
3948 case Intrinsic::x86_sse2_cmp_sd:
3949 case Intrinsic::x86_sse_comieq_ss:
3950 case Intrinsic::x86_sse_comilt_ss:
3951 case Intrinsic::x86_sse_comile_ss:
3952 case Intrinsic::x86_sse_comigt_ss:
3953 case Intrinsic::x86_sse_comige_ss:
3954 case Intrinsic::x86_sse_comineq_ss:
3955 case Intrinsic::x86_sse_ucomieq_ss:
3956 case Intrinsic::x86_sse_ucomilt_ss:
3957 case Intrinsic::x86_sse_ucomile_ss:
3958 case Intrinsic::x86_sse_ucomigt_ss:
3959 case Intrinsic::x86_sse_ucomige_ss:
3960 case Intrinsic::x86_sse_ucomineq_ss:
3961 case Intrinsic::x86_sse2_comieq_sd:
3962 case Intrinsic::x86_sse2_comilt_sd:
3963 case Intrinsic::x86_sse2_comile_sd:
3964 case Intrinsic::x86_sse2_comigt_sd:
3965 case Intrinsic::x86_sse2_comige_sd:
3966 case Intrinsic::x86_sse2_comineq_sd:
3967 case Intrinsic::x86_sse2_ucomieq_sd:
3968 case Intrinsic::x86_sse2_ucomilt_sd:
3969 case Intrinsic::x86_sse2_ucomile_sd:
3970 case Intrinsic::x86_sse2_ucomigt_sd:
3971 case Intrinsic::x86_sse2_ucomige_sd:
3972 case Intrinsic::x86_sse2_ucomineq_sd:
3973 handleVectorCompareScalarIntrinsic(
I);
3976 case Intrinsic::x86_avx_cmp_pd_256:
3977 case Intrinsic::x86_avx_cmp_ps_256:
3978 case Intrinsic::x86_sse2_cmp_pd:
3979 case Intrinsic::x86_sse_cmp_ps:
3980 handleVectorComparePackedIntrinsic(
I);
3983 case Intrinsic::x86_bmi_bextr_32:
3984 case Intrinsic::x86_bmi_bextr_64:
3985 case Intrinsic::x86_bmi_bzhi_32:
3986 case Intrinsic::x86_bmi_bzhi_64:
3987 case Intrinsic::x86_bmi_pdep_32:
3988 case Intrinsic::x86_bmi_pdep_64:
3989 case Intrinsic::x86_bmi_pext_32:
3990 case Intrinsic::x86_bmi_pext_64:
3991 handleBmiIntrinsic(
I);
3994 case Intrinsic::x86_pclmulqdq:
3995 case Intrinsic::x86_pclmulqdq_256:
3996 case Intrinsic::x86_pclmulqdq_512:
3997 handlePclmulIntrinsic(
I);
4000 case Intrinsic::x86_sse41_round_sd:
4001 case Intrinsic::x86_sse41_round_ss:
4002 handleUnarySdSsIntrinsic(
I);
4004 case Intrinsic::x86_sse2_max_sd:
4005 case Intrinsic::x86_sse_max_ss:
4006 case Intrinsic::x86_sse2_min_sd:
4007 case Intrinsic::x86_sse_min_ss:
4008 handleBinarySdSsIntrinsic(
I);
4011 case Intrinsic::x86_avx_vtestc_pd:
4012 case Intrinsic::x86_avx_vtestc_pd_256:
4013 case Intrinsic::x86_avx_vtestc_ps:
4014 case Intrinsic::x86_avx_vtestc_ps_256:
4015 case Intrinsic::x86_avx_vtestnzc_pd:
4016 case Intrinsic::x86_avx_vtestnzc_pd_256:
4017 case Intrinsic::x86_avx_vtestnzc_ps:
4018 case Intrinsic::x86_avx_vtestnzc_ps_256:
4019 case Intrinsic::x86_avx_vtestz_pd:
4020 case Intrinsic::x86_avx_vtestz_pd_256:
4021 case Intrinsic::x86_avx_vtestz_ps:
4022 case Intrinsic::x86_avx_vtestz_ps_256:
4023 case Intrinsic::x86_avx_ptestc_256:
4024 case Intrinsic::x86_avx_ptestnzc_256:
4025 case Intrinsic::x86_avx_ptestz_256:
4026 case Intrinsic::x86_sse41_ptestc:
4027 case Intrinsic::x86_sse41_ptestnzc:
4028 case Intrinsic::x86_sse41_ptestz:
4029 handleVtestIntrinsic(
I);
4032 case Intrinsic::fshl:
4033 case Intrinsic::fshr:
4034 handleFunnelShift(
I);
4037 case Intrinsic::is_constant:
4039 setShadow(&
I, getCleanShadow(&
I));
4040 setOrigin(&
I, getCleanOrigin());
4044 if (!handleUnknownIntrinsic(
I))
4045 visitInstruction(
I);
4050 void visitLibAtomicLoad(
CallBase &CB) {
4052 assert(isa<CallInst>(CB));
4061 Value *NewOrdering =
4065 NextNodeIRBuilder NextIRB(&CB);
4066 Value *SrcShadowPtr, *SrcOriginPtr;
4067 std::tie(SrcShadowPtr, SrcOriginPtr) =
4068 getShadowOriginPtr(SrcPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
4070 Value *DstShadowPtr =
4071 getShadowOriginPtr(DstPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
4075 NextIRB.CreateMemCpy(DstShadowPtr,
Align(1), SrcShadowPtr,
Align(1),
Size);
4076 if (MS.TrackOrigins) {
4077 Value *SrcOrigin = NextIRB.CreateAlignedLoad(MS.OriginTy, SrcOriginPtr,
4079 Value *NewOrigin = updateOrigin(SrcOrigin, NextIRB);
4080 NextIRB.CreateCall(MS.MsanSetOriginFn, {DstPtr, Size, NewOrigin});
4084 void visitLibAtomicStore(
CallBase &CB) {
4091 Value *NewOrdering =
4095 Value *DstShadowPtr =
4113 visitAsmInstruction(CB);
4115 visitInstruction(CB);
4124 case LibFunc_atomic_load:
4125 if (!isa<CallInst>(CB)) {
4126 llvm::errs() <<
"MSAN -- cannot instrument invoke of libatomic load."
4130 visitLibAtomicLoad(CB);
4132 case LibFunc_atomic_store:
4133 visitLibAtomicStore(CB);
4140 if (
auto *Call = dyn_cast<CallInst>(&CB)) {
4141 assert(!isa<IntrinsicInst>(Call) &&
"intrinsics are handled elsewhere");
4149 B.addAttribute(Attribute::Memory).addAttribute(Attribute::Speculatable);
4151 Call->removeFnAttrs(
B);
4153 Func->removeFnAttrs(
B);
4159 bool MayCheckCall = MS.EagerChecks;
4163 MayCheckCall &= !
Func->getName().startswith(
"__sanitizer_unaligned_");
4166 unsigned ArgOffset = 0;
4169 if (!
A->getType()->isSized()) {
4170 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is not sized: " << CB <<
"\n");
4178 bool EagerCheck = MayCheckCall && !ByVal && NoUndef;
4181 insertShadowCheck(
A, &CB);
4182 Size =
DL.getTypeAllocSize(
A->getType());
4188 Value *ArgShadow = getShadow(
A);
4189 Value *ArgShadowBase = getShadowPtrForArgument(IRB, ArgOffset);
4191 <<
" Shadow: " << *ArgShadow <<
"\n");
4195 assert(
A->getType()->isPointerTy() &&
4196 "ByVal argument is not a pointer!");
4204 Value *AShadowPtr, *AOriginPtr;
4205 std::tie(AShadowPtr, AOriginPtr) =
4206 getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(), Alignment,
4208 if (!PropagateShadow) {
4215 if (MS.TrackOrigins) {
4216 Value *ArgOriginBase = getOriginPtrForArgument(IRB, ArgOffset);
4230 Size =
DL.getTypeAllocSize(
A->getType());
4235 Constant *Cst = dyn_cast<Constant>(ArgShadow);
4236 if (MS.TrackOrigins && !(Cst && Cst->
isNullValue())) {
4238 getOriginPtrForArgument(IRB, ArgOffset));
4242 assert(Store !=
nullptr);
4251 if (FT->isVarArg()) {
4252 VAHelper->visitCallBase(CB, IRB);
4259 if (isa<CallInst>(CB) && cast<CallInst>(CB).isMustTailCall())
4262 if (MayCheckCall && CB.
hasRetAttr(Attribute::NoUndef)) {
4263 setShadow(&CB, getCleanShadow(&CB));
4264 setOrigin(&CB, getCleanOrigin());
4270 Value *
Base = getShadowPtrForRetval(IRBBefore);
4271 IRBBefore.CreateAlignedStore(getCleanShadow(&CB),
Base,
4274 if (isa<CallInst>(CB)) {
4278 BasicBlock *NormalDest = cast<InvokeInst>(CB).getNormalDest();
4283 setShadow(&CB, getCleanShadow(&CB));
4284 setOrigin(&CB, getCleanOrigin());
4291 "Could not find insertion point for retval shadow load");
4294 Value *RetvalShadow = IRBAfter.CreateAlignedLoad(
4295 getShadowTy(&CB), getShadowPtrForRetval(IRBAfter),
4297 setShadow(&CB, RetvalShadow);
4298 if (MS.TrackOrigins)
4299 setOrigin(&CB, IRBAfter.CreateLoad(MS.OriginTy,
4300 getOriginPtrForRetval()));
4304 if (
auto *
I = dyn_cast<BitCastInst>(RetVal)) {
4305 RetVal =
I->getOperand(0);
4307 if (
auto *
I = dyn_cast<CallInst>(RetVal)) {
4308 return I->isMustTailCall();
4315 Value *RetVal =
I.getReturnValue();
4321 Value *ShadowPtr = getShadowPtrForRetval(IRB);
4322 bool HasNoUndef =
F.hasRetAttribute(Attribute::NoUndef);
4323 bool StoreShadow = !(MS.EagerChecks && HasNoUndef);
4326 bool EagerCheck = (MS.EagerChecks && HasNoUndef) || (
F.getName() ==
"main");
4328 Value *Shadow = getShadow(RetVal);
4329 bool StoreOrigin =
true;
4331 insertShadowCheck(RetVal, &
I);
4332 Shadow = getCleanShadow(RetVal);
4333 StoreOrigin =
false;
4340 if (MS.TrackOrigins && StoreOrigin)
4341 IRB.
CreateStore(getOrigin(RetVal), getOriginPtrForRetval());
4347 if (!PropagateShadow) {
4348 setShadow(&
I, getCleanShadow(&
I));
4349 setOrigin(&
I, getCleanOrigin());
4353 ShadowPHINodes.push_back(&
I);
4354 setShadow(&
I, IRB.
CreatePHI(getShadowTy(&
I),
I.getNumIncomingValues(),
4356 if (MS.TrackOrigins)
4358 &
I, IRB.
CreatePHI(MS.OriginTy,
I.getNumIncomingValues(),
"_msphi_o"));
4376 {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len});
4378 Value *ShadowBase, *OriginBase;
4379 std::tie(ShadowBase, OriginBase) = getShadowOriginPtr(
4386 if (PoisonStack && MS.TrackOrigins) {
4387 Value *Idptr = getLocalVarIdptr(
I);
4389 Value *Descr = getLocalVarDescription(
I);
4390 IRB.
CreateCall(MS.MsanSetAllocaOriginWithDescriptionFn,
4391 {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len,
4392 IRB.CreatePointerCast(Idptr, IRB.getInt8PtrTy()),
4393 IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy())});
4395 IRB.
CreateCall(MS.MsanSetAllocaOriginNoDescriptionFn,
4396 {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len,
4397 IRB.CreatePointerCast(Idptr, IRB.getInt8PtrTy())});
4403 Value *Descr = getLocalVarDescription(
I);
4406 {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len,
4407 IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy())});
4410 {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len});
4417 NextNodeIRBuilder IRB(InsPoint);
4421 if (
I.isArrayAllocation())
4425 if (MS.CompileKernel)
4426 poisonAllocaKmsan(
I, IRB, Len);
4428 poisonAllocaUserspace(
I, IRB, Len);
4432 setShadow(&
I, getCleanShadow(&
I));
4433 setOrigin(&
I, getCleanOrigin());
4445 Value *Sb = getShadow(
B);
4446 Value *Sc = getShadow(
C);
4447 Value *Sd = getShadow(
D);
4452 if (
I.getType()->isAggregateType()) {
4456 Sa1 = getPoisonedShadow(getShadowTy(
I.getType()));
4464 C = CreateAppToShadowCast(IRB,
C);
4465 D = CreateAppToShadowCast(IRB,
D);
4472 if (MS.TrackOrigins) {
4475 if (
B->getType()->isVectorTy()) {
4476 B = convertToBool(
B, IRB);
4477 Sb = convertToBool(Sb, IRB);
4484 getOrigin(
I.getFalseValue()))));
4491 setShadow(&
I, getCleanShadow(&
I));
4492 setOrigin(&
I, getCleanOrigin());
4496 setShadow(&
I, getCleanShadow(&
I));
4497 setOrigin(&
I, getCleanOrigin());
4501 setShadow(&
I, getCleanShadow(&
I));
4502 setOrigin(&
I, getCleanOrigin());
4509 Value *Agg =
I.getAggregateOperand();
4511 Value *AggShadow = getShadow(Agg);
4515 setShadow(&
I, ResShadow);
4516 setOriginForNaryOp(
I);
4522 Value *AggShadow = getShadow(
I.getAggregateOperand());
4523 Value *InsShadow = getShadow(
I.getInsertedValueOperand());
4529 setOriginForNaryOp(
I);
4533 if (
CallInst *CI = dyn_cast<CallInst>(&
I)) {
4534 errs() <<
"ZZZ call " << CI->getCalledFunction()->getName() <<
"\n";
4536 errs() <<
"ZZZ " <<
I.getOpcodeName() <<
"\n";
4538 errs() <<
"QQQ " <<
I <<
"\n";
4565 insertShadowCheck(Operand, &
I);
4575 IRB.
CreateCall(MS.MsanInstrumentAsmStoreFn, {Ptr, SizeVal});
4580 int NumRetOutputs = 0;
4582 Type *
RetTy = cast<Value>(CB)->getType();
4583 if (!
RetTy->isVoidTy()) {
4585 auto *
ST = dyn_cast<StructType>(
RetTy);
4587 NumRetOutputs =
ST->getNumElements();
4593 switch (
Info.Type) {
4601 return NumOutputs - NumRetOutputs;
4624 int OutputArgs = getNumOutputArgs(IA, CB);
4630 for (
int i = OutputArgs; i < NumOperands; i++) {
4638 for (
int i = 0; i < OutputArgs; i++) {
4644 setShadow(&
I, getCleanShadow(&
I));
4645 setOrigin(&
I, getCleanOrigin());
4650 setShadow(&
I, getCleanShadow(&
I));
4651 setOrigin(&
I, getCleanOrigin());
4659 for (
size_t i = 0, n =
I.getNumOperands(); i < n; i++) {
4660 Value *Operand =
I.getOperand(i);
4662 insertShadowCheck(Operand, &
I);
4664 setShadow(&
I, getCleanShadow(&
I));
4665 setOrigin(&
I, getCleanOrigin());
4670struct VarArgAMD64Helper :
public VarArgHelper {
4673 static const unsigned AMD64GpEndOffset = 48;
4674 static const unsigned AMD64FpEndOffsetSSE = 176;
4676 static const unsigned AMD64FpEndOffsetNoSSE = AMD64GpEndOffset;
4678 unsigned AMD64FpEndOffset;
4680 MemorySanitizer &MS;
4681 MemorySanitizerVisitor &MSV;
4684 Value *VAArgOverflowSize =
nullptr;
4688 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
4690 VarArgAMD64Helper(
Function &
F, MemorySanitizer &MS,
4691 MemorySanitizerVisitor &MSV)
4692 :
F(
F), MS(MS), MSV(MSV) {
4693 AMD64FpEndOffset = AMD64FpEndOffsetSSE;
4694 for (
const auto &Attr :
F.getAttributes().getFnAttrs()) {
4695 if (Attr.isStringAttribute() &&
4696 (Attr.getKindAsString() ==
"target-features")) {
4697 if (Attr.getValueAsString().contains(
"-sse"))
4698 AMD64FpEndOffset = AMD64FpEndOffsetNoSSE;
4704 ArgKind classifyArgument(
Value *arg) {
4707 if (
T->isFPOrFPVectorTy() ||
T->isX86_MMXTy())
4708 return AK_FloatingPoint;
4709 if (
T->isIntegerTy() &&
T->getPrimitiveSizeInBits() <= 64)
4710 return AK_GeneralPurpose;
4711 if (
T->isPointerTy())
4712 return AK_GeneralPurpose;
4725 unsigned GpOffset = 0;
4726 unsigned FpOffset = AMD64GpEndOffset;
4727 unsigned OverflowOffset = AMD64FpEndOffset;
4731 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
4738 assert(
A->getType()->isPointerTy());
4740 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
4741 Value *ShadowBase = getShadowPtrForVAArgument(
4742 RealTy, IRB, OverflowOffset,
alignTo(ArgSize, 8));
4743 Value *OriginBase =
nullptr;
4744 if (MS.TrackOrigins)
4745 OriginBase = getOriginPtrForVAArgument(RealTy, IRB, OverflowOffset);
4746 OverflowOffset +=
alignTo(ArgSize, 8);
4749 Value *ShadowPtr, *OriginPtr;
4750 std::tie(ShadowPtr, OriginPtr) =
4756 if (MS.TrackOrigins)
4760 ArgKind AK = classifyArgument(
A);
4761 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
4763 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
4765 Value *ShadowBase, *OriginBase =
nullptr;
4767 case AK_GeneralPurpose:
4769 getShadowPtrForVAArgument(
A->getType(), IRB, GpOffset, 8);
4770 if (MS.TrackOrigins)
4771 OriginBase = getOriginPtrForVAArgument(
A->getType(), IRB, GpOffset);
4774 case AK_FloatingPoint:
4776 getShadowPtrForVAArgument(
A->getType(), IRB, FpOffset, 16);
4777 if (MS.TrackOrigins)
4778 OriginBase = getOriginPtrForVAArgument(
A->getType(), IRB, FpOffset);
4784 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
4786 getShadowPtrForVAArgument(
A->getType(), IRB, OverflowOffset, 8);
4787 if (MS.TrackOrigins)
4789 getOriginPtrForVAArgument(
A->getType(), IRB, OverflowOffset);
4790 OverflowOffset +=
alignTo(ArgSize, 8);
4799 Value *Shadow = MSV.getShadow(
A);
4801 if (MS.TrackOrigins) {
4802 Value *Origin = MSV.getOrigin(
A);
4804 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
4811 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
4816 unsigned ArgOffset,
unsigned ArgSize) {
4839 Value *VAListTag =
I.getArgOperand(0);
4840 Value *ShadowPtr, *OriginPtr;
4842 std::tie(ShadowPtr, OriginPtr) =
4843 MSV.getShadowOriginPtr(VAListTag, IRB, IRB.
getInt8Ty(), Alignment,
4849 24, Alignment,
false);
4858 unpoisonVAListTagForInst(
I);
4864 unpoisonVAListTagForInst(
I);
4867 void finalizeInstrumentation()
override {
4868 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
4869 "finalizeInstrumentation called twice");
4870 if (!VAStartInstrumentationList.
empty()) {
4884 Intrinsic::umin, CopySize,
4888 if (MS.TrackOrigins) {
4898 for (
size_t i = 0, n = VAStartInstrumentationList.
size(); i < n; i++) {
4899 CallInst *OrigInst = VAStartInstrumentationList[i];
4900 NextNodeIRBuilder IRB(OrigInst);
4903 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
4907 PointerType::get(RegSaveAreaPtrTy, 0));
4908 Value *RegSaveAreaPtr =
4909 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
4910 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
4912 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
4913 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
4915 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
4917 if (MS.TrackOrigins)
4918 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
4919 Alignment, AMD64FpEndOffset);
4920 Type *OverflowArgAreaPtrTy = PointerType::getUnqual(*MS.C);
4924 PointerType::get(OverflowArgAreaPtrTy, 0));
4925 Value *OverflowArgAreaPtr =
4926 IRB.
CreateLoad(OverflowArgAreaPtrTy, OverflowArgAreaPtrPtr);
4927 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
4928 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
4929 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
4933 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
4935 if (MS.TrackOrigins) {