184#include "llvm/IR/IntrinsicsAArch64.h"
185#include "llvm/IR/IntrinsicsX86.h"
215#define DEBUG_TYPE "msan"
218 "Controls which checks to insert");
221 "Controls which instruction to instrument");
239 "msan-track-origins",
244 cl::desc(
"keep going after reporting a UMR"),
253 "msan-poison-stack-with-call",
258 "msan-poison-stack-pattern",
259 cl::desc(
"poison uninitialized stack variables with the given pattern"),
264 cl::desc(
"Print name of local stack variable"),
273 cl::desc(
"propagate shadow through ICmpEQ and ICmpNE"),
278 cl::desc(
"exact handling of relational integer ICmp"),
282 "msan-handle-lifetime-intrinsics",
284 "when possible, poison scoped variables at the beginning of the scope "
285 "(slower, but more precise)"),
296 "msan-handle-asm-conservative",
307 "msan-check-access-address",
308 cl::desc(
"report accesses through a pointer which has poisoned shadow"),
313 cl::desc(
"check arguments and return values at function call boundaries"),
317 "msan-dump-strict-instructions",
318 cl::desc(
"print out instructions with default strict semantics"),
322 "msan-dump-strict-intrinsics",
323 cl::desc(
"Prints 'unknown' intrinsics that were handled heuristically. "
324 "Use -msan-dump-strict-instructions to print intrinsics that "
325 "could not be handled exactly nor heuristically."),
329 "msan-instrumentation-with-call-threshold",
331 "If the function being instrumented requires more than "
332 "this number of checks and origin stores, use callbacks instead of "
333 "inline checks (-1 means never use callbacks)."),
338 cl::desc(
"Enable KernelMemorySanitizer instrumentation"),
348 cl::desc(
"Insert checks for constant shadow values"),
355 cl::desc(
"Place MSan constructors in comdat sections"),
361 cl::desc(
"Define custom MSan AndMask"),
365 cl::desc(
"Define custom MSan XorMask"),
369 cl::desc(
"Define custom MSan ShadowBase"),
373 cl::desc(
"Define custom MSan OriginBase"),
378 cl::desc(
"Define threshold for number of checks per "
379 "debug location to force origin update."),
391struct MemoryMapParams {
398struct PlatformMemoryMapParams {
399 const MemoryMapParams *bits32;
400 const MemoryMapParams *bits64;
562class MemorySanitizer {
571 MemorySanitizer(MemorySanitizer &&) =
delete;
572 MemorySanitizer &operator=(MemorySanitizer &&) =
delete;
573 MemorySanitizer(
const MemorySanitizer &) =
delete;
574 MemorySanitizer &operator=(
const MemorySanitizer &) =
delete;
579 friend struct MemorySanitizerVisitor;
580 friend struct VarArgHelperBase;
581 friend struct VarArgAMD64Helper;
582 friend struct VarArgAArch64Helper;
583 friend struct VarArgPowerPCHelper;
584 friend struct VarArgSystemZHelper;
585 friend struct VarArgI386Helper;
586 friend struct VarArgGenericHelper;
588 void initializeModule(
Module &M);
593 template <
typename... ArgsTy>
620 Value *ParamOriginTLS;
626 Value *RetvalOriginTLS;
632 Value *VAArgOriginTLS;
635 Value *VAArgOverflowSizeTLS;
638 bool CallbacksInitialized =
false;
683 Value *MsanMetadataAlloca;
689 const MemoryMapParams *MapParams;
693 MemoryMapParams CustomMapParams;
698 MDNode *OriginStoreWeights;
701void insertModuleCtor(
Module &M) {
729 Recover(getOptOrDefault(
ClKeepGoing, Kernel || R)),
747 MemorySanitizer Msan(*
F.getParent(),
Options);
766 OS, MapClassName2PassName);
773 OS <<
"eager-checks;";
774 OS <<
"track-origins=" <<
Options.TrackOrigins;
790template <
typename... ArgsTy>
797 std::forward<ArgsTy>(Args)...);
800 return M.getOrInsertFunction(
Name, MsanMetadata,
801 std::forward<ArgsTy>(Args)...);
810 RetvalOriginTLS =
nullptr;
812 ParamOriginTLS =
nullptr;
814 VAArgOriginTLS =
nullptr;
815 VAArgOverflowSizeTLS =
nullptr;
817 WarningFn =
M.getOrInsertFunction(
"__msan_warning",
819 IRB.getVoidTy(), IRB.getInt32Ty());
830 MsanGetContextStateFn =
831 M.getOrInsertFunction(
"__msan_get_context_state", PtrTy);
835 for (
int ind = 0, size = 1; ind < 4; ind++,
size <<= 1) {
836 std::string name_load =
837 "__msan_metadata_ptr_for_load_" + std::to_string(size);
838 std::string name_store =
839 "__msan_metadata_ptr_for_store_" + std::to_string(size);
840 MsanMetadataPtrForLoad_1_8[ind] =
841 getOrInsertMsanMetadataFunction(M, name_load, PtrTy);
842 MsanMetadataPtrForStore_1_8[ind] =
843 getOrInsertMsanMetadataFunction(M, name_store, PtrTy);
846 MsanMetadataPtrForLoadN = getOrInsertMsanMetadataFunction(
847 M,
"__msan_metadata_ptr_for_load_n", PtrTy, IRB.getInt64Ty());
848 MsanMetadataPtrForStoreN = getOrInsertMsanMetadataFunction(
849 M,
"__msan_metadata_ptr_for_store_n", PtrTy, IRB.getInt64Ty());
852 MsanPoisonAllocaFn =
M.getOrInsertFunction(
853 "__msan_poison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
854 MsanUnpoisonAllocaFn =
M.getOrInsertFunction(
855 "__msan_unpoison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy);
859 return M.getOrInsertGlobal(
Name, Ty, [&] {
861 nullptr,
Name,
nullptr,
867void MemorySanitizer::createUserspaceApi(
Module &M,
875 StringRef WarningFnName = Recover ?
"__msan_warning_with_origin"
876 :
"__msan_warning_with_origin_noreturn";
877 WarningFn =
M.getOrInsertFunction(WarningFnName,
879 IRB.getVoidTy(), IRB.getInt32Ty());
882 Recover ?
"__msan_warning" :
"__msan_warning_noreturn";
883 WarningFn =
M.getOrInsertFunction(WarningFnName, IRB.getVoidTy());
909 VAArgOverflowSizeTLS =
914 unsigned AccessSize = 1 << AccessSizeIndex;
915 std::string FunctionName =
"__msan_maybe_warning_" + itostr(AccessSize);
916 MaybeWarningFn[AccessSizeIndex] =
M.getOrInsertFunction(
918 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt32Ty());
920 FunctionName =
"__msan_maybe_store_origin_" + itostr(AccessSize);
921 MaybeStoreOriginFn[AccessSizeIndex] =
M.getOrInsertFunction(
923 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), PtrTy,
927 MsanSetAllocaOriginWithDescriptionFn =
928 M.getOrInsertFunction(
"__msan_set_alloca_origin_with_descr",
929 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy, PtrTy);
930 MsanSetAllocaOriginNoDescriptionFn =
931 M.getOrInsertFunction(
"__msan_set_alloca_origin_no_descr",
932 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
933 MsanPoisonStackFn =
M.getOrInsertFunction(
"__msan_poison_stack",
934 IRB.getVoidTy(), PtrTy, IntptrTy);
938void MemorySanitizer::initializeCallbacks(
Module &M,
941 if (CallbacksInitialized)
947 MsanChainOriginFn =
M.getOrInsertFunction(
948 "__msan_chain_origin",
951 MsanSetOriginFn =
M.getOrInsertFunction(
953 IRB.getVoidTy(), PtrTy, IntptrTy, IRB.getInt32Ty());
955 M.getOrInsertFunction(
"__msan_memmove", PtrTy, PtrTy, PtrTy, IntptrTy);
957 M.getOrInsertFunction(
"__msan_memcpy", PtrTy, PtrTy, PtrTy, IntptrTy);
958 MemsetFn =
M.getOrInsertFunction(
"__msan_memset",
960 PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
962 MsanInstrumentAsmStoreFn =
M.getOrInsertFunction(
963 "__msan_instrument_asm_store", IRB.getVoidTy(), PtrTy, IntptrTy);
966 createKernelApi(M, TLI);
968 createUserspaceApi(M, TLI);
970 CallbacksInitialized =
true;
976 isStore ? MsanMetadataPtrForStore_1_8 : MsanMetadataPtrForLoad_1_8;
994void MemorySanitizer::initializeModule(
Module &M) {
995 auto &
DL =
M.getDataLayout();
997 TargetTriple =
Triple(
M.getTargetTriple());
999 bool ShadowPassed =
ClShadowBase.getNumOccurrences() > 0;
1000 bool OriginPassed =
ClOriginBase.getNumOccurrences() > 0;
1002 if (ShadowPassed || OriginPassed) {
1007 MapParams = &CustomMapParams;
1009 switch (TargetTriple.getOS()) {
1011 switch (TargetTriple.getArch()) {
1026 switch (TargetTriple.getArch()) {
1035 switch (TargetTriple.getArch()) {
1069 C = &(
M.getContext());
1071 IntptrTy = IRB.getIntPtrTy(
DL);
1072 OriginTy = IRB.getInt32Ty();
1073 PtrTy = IRB.getPtrTy();
1078 if (!CompileKernel) {
1080 M.getOrInsertGlobal(
"__msan_track_origins", IRB.getInt32Ty(), [&] {
1081 return new GlobalVariable(
1082 M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
1083 IRB.getInt32(TrackOrigins),
"__msan_track_origins");
1087 M.getOrInsertGlobal(
"__msan_keep_going", IRB.getInt32Ty(), [&] {
1088 return new GlobalVariable(M, IRB.getInt32Ty(), true,
1089 GlobalValue::WeakODRLinkage,
1090 IRB.getInt32(Recover),
"__msan_keep_going");
1105struct VarArgHelper {
1106 virtual ~VarArgHelper() =
default;
1121 virtual void finalizeInstrumentation() = 0;
1124struct MemorySanitizerVisitor;
1129 MemorySanitizerVisitor &Visitor);
1136 if (TypeSizeFixed <= 8)
1145class NextNodeIRBuilder :
public IRBuilder<> {
1158struct MemorySanitizerVisitor :
public InstVisitor<MemorySanitizerVisitor> {
1160 MemorySanitizer &MS;
1163 std::unique_ptr<VarArgHelper> VAHelper;
1171 bool PropagateShadow;
1175 struct ShadowOriginAndInsertPoint {
1181 : Shadow(S), Origin(
O), OrigIns(
I) {}
1189 int64_t SplittableBlocksCount = 0;
1191 MemorySanitizerVisitor(
Function &
F, MemorySanitizer &MS,
1194 bool SanitizeFunction =
1196 InsertChecks = SanitizeFunction;
1197 PropagateShadow = SanitizeFunction;
1207 MS.initializeCallbacks(*
F.getParent(), TLI);
1208 FnPrologueEnd =
IRBuilder<>(
F.getEntryBlock().getFirstNonPHI())
1211 if (MS.CompileKernel) {
1213 insertKmsanPrologue(IRB);
1217 <<
"MemorySanitizer is not inserting checks into '"
1218 <<
F.getName() <<
"'\n");
1221 bool instrumentWithCalls(
Value *V) {
1223 if (isa<Constant>(V))
1226 ++SplittableBlocksCount;
1232 return I.getParent() == FnPrologueEnd->
getParent() &&
1233 (&
I == FnPrologueEnd ||
I.comesBefore(FnPrologueEnd));
1241 if (MS.TrackOrigins <= 1)
1243 return IRB.
CreateCall(MS.MsanChainOriginFn, V);
1248 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1260 const Align IntptrAlignment =
DL.getABITypeAlign(MS.IntptrTy);
1261 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1273 auto [InsertPt,
Index] =
1285 Align CurrentAlignment = Alignment;
1286 if (Alignment >= IntptrAlignment && IntptrSize >
kOriginSize) {
1287 Value *IntptrOrigin = originToIntptr(IRB, Origin);
1289 for (
unsigned i = 0; i <
Size / IntptrSize; ++i) {
1294 CurrentAlignment = IntptrAlignment;
1312 Value *ConvertedShadow = convertShadowToScalar(Shadow, IRB);
1313 if (
auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
1321 paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
1330 if (instrumentWithCalls(ConvertedShadow) &&
1333 Value *ConvertedShadow2 =
1339 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1343 paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), OriginPtr, StoreSize,
1348 void materializeStores() {
1351 Value *Val =
SI->getValueOperand();
1353 Value *Shadow =
SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
1354 Value *ShadowPtr, *OriginPtr;
1356 const Align Alignment =
SI->getAlign();
1358 std::tie(ShadowPtr, OriginPtr) =
1359 getShadowOriginPtr(
Addr, IRB, ShadowTy, Alignment,
true);
1368 if (MS.TrackOrigins && !
SI->isAtomic())
1369 storeOrigin(IRB,
Addr, Shadow, getOrigin(Val), OriginPtr,
1376 if (MS.TrackOrigins < 2)
1379 if (LazyWarningDebugLocationCount.
empty())
1380 for (
const auto &
I : InstrumentationList)
1381 ++LazyWarningDebugLocationCount[
I.OrigIns->getDebugLoc()];
1395 if (
Instruction *OI = dyn_cast_or_null<Instruction>(Origin)) {
1397 auto NewDebugLoc = OI->getDebugLoc();
1404 IRBOrigin.SetCurrentDebugLocation(NewDebugLoc);
1405 Origin = updateOrigin(Origin, IRBOrigin);
1410 if (MS.CompileKernel || MS.TrackOrigins)
1424 if (instrumentWithCalls(ConvertedShadow) &&
1428 ConvertedShadow = convertShadowToScalar(ConvertedShadow, IRB);
1429 Value *ConvertedShadow2 =
1432 Fn, {ConvertedShadow2,
1433 MS.TrackOrigins && Origin ? Origin : (
Value *)IRB.
getInt32(0)});
1437 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1440 !MS.Recover, MS.ColdCallWeights);
1443 insertWarningFn(IRB, Origin);
1448 void materializeInstructionChecks(
1453 bool Combine = !MS.TrackOrigins;
1455 Value *Shadow =
nullptr;
1456 for (
const auto &ShadowData : InstructionChecks) {
1460 Value *ConvertedShadow = ShadowData.Shadow;
1462 if (
auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
1469 insertWarningFn(IRB, ShadowData.Origin);
1479 materializeOneCheck(IRB, ConvertedShadow, ShadowData.Origin);
1484 Shadow = ConvertedShadow;
1488 Shadow = convertToBool(Shadow, IRB,
"_mscmp");
1489 ConvertedShadow = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1490 Shadow = IRB.
CreateOr(Shadow, ConvertedShadow,
"_msor");
1496 materializeOneCheck(IRB, Shadow,
nullptr);
1500 void materializeChecks() {
1506 for (
auto I = InstrumentationList.begin();
1507 I != InstrumentationList.end();) {
1508 auto OrigIns =
I->OrigIns;
1512 auto J = std::find_if(
I + 1, InstrumentationList.end(),
1513 [OrigIns](
const ShadowOriginAndInsertPoint &R) {
1514 return OrigIns != R.OrigIns;
1528 MS.ParamTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1529 {Zero, IRB.getInt32(0)},
"param_shadow");
1530 MS.RetvalTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1531 {Zero, IRB.getInt32(1)},
"retval_shadow");
1532 MS.VAArgTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1533 {Zero, IRB.getInt32(2)},
"va_arg_shadow");
1534 MS.VAArgOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1535 {Zero, IRB.getInt32(3)},
"va_arg_origin");
1536 MS.VAArgOverflowSizeTLS =
1537 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1538 {Zero, IRB.getInt32(4)},
"va_arg_overflow_size");
1539 MS.ParamOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1540 {Zero, IRB.getInt32(5)},
"param_origin");
1541 MS.RetvalOriginTLS =
1542 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1543 {Zero, IRB.getInt32(6)},
"retval_origin");
1545 MS.MsanMetadataAlloca = IRB.
CreateAlloca(MS.MsanMetadata, 0u);
1562 for (
PHINode *PN : ShadowPHINodes) {
1563 PHINode *PNS = cast<PHINode>(getShadow(PN));
1564 PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr;
1565 size_t NumValues = PN->getNumIncomingValues();
1566 for (
size_t v = 0;
v < NumValues;
v++) {
1567 PNS->
addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
1569 PNO->
addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
1573 VAHelper->finalizeInstrumentation();
1577 if (InstrumentLifetimeStart) {
1578 for (
auto Item : LifetimeStartList) {
1579 instrumentAlloca(*Item.second, Item.first);
1580 AllocaSet.
remove(Item.second);
1586 instrumentAlloca(*AI);
1589 materializeChecks();
1593 materializeStores();
1599 Type *getShadowTy(
Value *V) {
return getShadowTy(
V->getType()); }
1611 if (
VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
1612 uint32_t EltSize =
DL.getTypeSizeInBits(VT->getElementType());
1614 VT->getElementCount());
1616 if (
ArrayType *AT = dyn_cast<ArrayType>(OrigTy)) {
1617 return ArrayType::get(getShadowTy(AT->getElementType()),
1618 AT->getNumElements());
1620 if (
StructType *ST = dyn_cast<StructType>(OrigTy)) {
1622 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
1623 Elements.push_back(getShadowTy(
ST->getElementType(i)));
1625 LLVM_DEBUG(
dbgs() <<
"getShadowTy: " << *ST <<
" ===> " << *Res <<
"\n");
1641 Value *ShadowBool = convertToBool(ShadowItem, IRB);
1643 if (Aggregator != FalseVal)
1644 Aggregator = IRB.
CreateOr(Aggregator, ShadowBool);
1646 Aggregator = ShadowBool;
1655 if (!
Array->getNumElements())
1659 Value *Aggregator = convertShadowToScalar(FirstItem, IRB);
1663 Value *ShadowInner = convertShadowToScalar(ShadowItem, IRB);
1664 Aggregator = IRB.
CreateOr(Aggregator, ShadowInner);
1674 return collapseStructShadow(
Struct, V, IRB);
1675 if (
ArrayType *Array = dyn_cast<ArrayType>(
V->getType()))
1676 return collapseArrayShadow(Array, V, IRB);
1677 if (isa<VectorType>(
V->getType())) {
1678 if (isa<ScalableVectorType>(
V->getType()))
1681 V->getType()->getPrimitiveSizeInBits().getFixedValue();
1689 Type *VTy =
V->getType();
1691 return convertToBool(convertShadowToScalar(V, IRB), IRB,
name);
1698 Type *ptrToIntPtrType(
Type *PtrTy)
const {
1699 if (
VectorType *VectTy = dyn_cast<VectorType>(PtrTy)) {
1700 return VectorType::get(ptrToIntPtrType(VectTy->getElementType()),
1701 VectTy->getElementCount());
1707 Type *getPtrToShadowPtrType(
Type *IntPtrTy,
Type *ShadowTy)
const {
1708 if (
VectorType *VectTy = dyn_cast<VectorType>(IntPtrTy)) {
1709 return VectorType::get(
1710 getPtrToShadowPtrType(VectTy->getElementType(), ShadowTy),
1711 VectTy->getElementCount());
1713 assert(IntPtrTy == MS.IntptrTy);
1718 if (
VectorType *VectTy = dyn_cast<VectorType>(IntPtrTy)) {
1720 VectTy->getElementCount(),
1721 constToIntPtr(VectTy->getElementType(),
C));
1723 assert(IntPtrTy == MS.IntptrTy);
1724 return ConstantInt::get(MS.IntptrTy,
C);
1735 Type *IntptrTy = ptrToIntPtrType(
Addr->getType());
1738 if (
uint64_t AndMask = MS.MapParams->AndMask)
1739 OffsetLong = IRB.
CreateAnd(OffsetLong, constToIntPtr(IntptrTy, ~AndMask));
1741 if (
uint64_t XorMask = MS.MapParams->XorMask)
1742 OffsetLong = IRB.
CreateXor(OffsetLong, constToIntPtr(IntptrTy, XorMask));
1754 std::pair<Value *, Value *>
1761 assert(VectTy->getElementType()->isPointerTy());
1763 Type *IntptrTy = ptrToIntPtrType(
Addr->getType());
1764 Value *ShadowOffset = getShadowPtrOffset(
Addr, IRB);
1765 Value *ShadowLong = ShadowOffset;
1766 if (
uint64_t ShadowBase = MS.MapParams->ShadowBase) {
1768 IRB.
CreateAdd(ShadowLong, constToIntPtr(IntptrTy, ShadowBase));
1771 ShadowLong, getPtrToShadowPtrType(IntptrTy, ShadowTy));
1773 Value *OriginPtr =
nullptr;
1774 if (MS.TrackOrigins) {
1775 Value *OriginLong = ShadowOffset;
1776 uint64_t OriginBase = MS.MapParams->OriginBase;
1777 if (OriginBase != 0)
1779 IRB.
CreateAdd(OriginLong, constToIntPtr(IntptrTy, OriginBase));
1782 OriginLong = IRB.
CreateAnd(OriginLong, constToIntPtr(IntptrTy, ~Mask));
1785 OriginLong, getPtrToShadowPtrType(IntptrTy, MS.OriginTy));
1787 return std::make_pair(ShadowPtr, OriginPtr);
1790 template <
typename... ArgsTy>
1795 {MS.MsanMetadataAlloca, std::forward<ArgsTy>(Args)...});
1796 return IRB.
CreateLoad(MS.MsanMetadata, MS.MsanMetadataAlloca);
1799 return IRB.
CreateCall(Callee, {std::forward<ArgsTy>(Args)...});
1802 std::pair<Value *, Value *> getShadowOriginPtrKernelNoVec(
Value *
Addr,
1806 Value *ShadowOriginPtrs;
1813 ShadowOriginPtrs = createMetadataCall(IRB, Getter, AddrCast);
1815 Value *SizeVal = ConstantInt::get(MS.IntptrTy,
Size);
1816 ShadowOriginPtrs = createMetadataCall(
1818 isStore ? MS.MsanMetadataPtrForStoreN : MS.MsanMetadataPtrForLoadN,
1825 return std::make_pair(ShadowPtr, OriginPtr);
1831 std::pair<Value *, Value *> getShadowOriginPtrKernel(
Value *
Addr,
1838 return getShadowOriginPtrKernelNoVec(
Addr, IRB, ShadowTy,
isStore);
1842 unsigned NumElements = cast<FixedVectorType>(VectTy)->getNumElements();
1843 Value *ShadowPtrs = ConstantInt::getNullValue(
1845 Value *OriginPtrs =
nullptr;
1846 if (MS.TrackOrigins)
1847 OriginPtrs = ConstantInt::getNullValue(
1849 for (
unsigned i = 0; i < NumElements; ++i) {
1852 auto [ShadowPtr, OriginPtr] =
1853 getShadowOriginPtrKernelNoVec(OneAddr, IRB, ShadowTy,
isStore);
1856 ShadowPtrs, ShadowPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1857 if (MS.TrackOrigins)
1859 OriginPtrs, OriginPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1861 return {ShadowPtrs, OriginPtrs};
1868 if (MS.CompileKernel)
1869 return getShadowOriginPtrKernel(
Addr, IRB, ShadowTy,
isStore);
1870 return getShadowOriginPtrUserspace(
Addr, IRB, ShadowTy, Alignment);
1885 if (!MS.TrackOrigins)
1899 Value *getOriginPtrForRetval() {
1901 return MS.RetvalOriginTLS;
1906 assert(!ShadowMap.
count(V) &&
"Values may only have one shadow");
1907 ShadowMap[
V] = PropagateShadow ? SV : getCleanShadow(V);
1912 if (!MS.TrackOrigins)
1914 assert(!OriginMap.
count(V) &&
"Values may only have one origin");
1915 LLVM_DEBUG(
dbgs() <<
"ORIGIN: " << *V <<
" ==> " << *Origin <<
"\n");
1916 OriginMap[
V] = Origin;
1920 Type *ShadowTy = getShadowTy(OrigTy);
1930 Constant *getCleanShadow(
Value *V) {
return getCleanShadow(
V->getType()); }
1935 if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
1937 if (
ArrayType *AT = dyn_cast<ArrayType>(ShadowTy)) {
1939 getPoisonedShadow(AT->getElementType()));
1942 if (
StructType *ST = dyn_cast<StructType>(ShadowTy)) {
1944 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
1945 Vals.
push_back(getPoisonedShadow(
ST->getElementType(i)));
1953 Type *ShadowTy = getShadowTy(V);
1956 return getPoisonedShadow(ShadowTy);
1968 if (!PropagateShadow ||
I->getMetadata(LLVMContext::MD_nosanitize))
1969 return getCleanShadow(V);
1971 Value *Shadow = ShadowMap[
V];
1973 LLVM_DEBUG(
dbgs() <<
"No shadow: " << *V <<
"\n" << *(
I->getParent()));
1975 assert(Shadow &&
"No shadow for a value");
1979 if (
UndefValue *U = dyn_cast<UndefValue>(V)) {
1980 Value *
AllOnes = (PropagateShadow && PoisonUndef) ? getPoisonedShadow(V)
1981 : getCleanShadow(V);
1986 if (
Argument *
A = dyn_cast<Argument>(V)) {
1988 Value *&ShadowPtr = ShadowMap[
V];
1993 unsigned ArgOffset = 0;
1995 for (
auto &FArg :
F->args()) {
1996 if (!FArg.getType()->isSized() || FArg.getType()->isScalableTy()) {
1998 ?
"vscale not fully supported\n"
1999 :
"Arg is not sized\n"));
2001 ShadowPtr = getCleanShadow(V);
2002 setOrigin(
A, getCleanOrigin());
2008 unsigned Size = FArg.hasByValAttr()
2009 ?
DL.getTypeAllocSize(FArg.getParamByValType())
2010 :
DL.getTypeAllocSize(FArg.getType());
2014 if (FArg.hasByValAttr()) {
2018 const Align ArgAlign =
DL.getValueOrABITypeAlignment(
2019 FArg.getParamAlign(), FArg.getParamByValType());
2020 Value *CpShadowPtr, *CpOriginPtr;
2021 std::tie(CpShadowPtr, CpOriginPtr) =
2022 getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign,
2024 if (!PropagateShadow || Overflow) {
2026 EntryIRB.CreateMemSet(
2030 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2032 Value *Cpy = EntryIRB.CreateMemCpy(CpShadowPtr, CopyAlign,
Base,
2037 if (MS.TrackOrigins) {
2038 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2042 EntryIRB.CreateMemCpy(
2051 if (!PropagateShadow || Overflow || FArg.hasByValAttr() ||
2052 (MS.EagerChecks && FArg.hasAttribute(Attribute::NoUndef))) {
2053 ShadowPtr = getCleanShadow(V);
2054 setOrigin(
A, getCleanOrigin());
2057 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2058 ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg),
Base,
2060 if (MS.TrackOrigins) {
2061 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2062 setOrigin(
A, EntryIRB.CreateLoad(MS.OriginTy, OriginPtr));
2066 <<
" ARG: " << FArg <<
" ==> " << *ShadowPtr <<
"\n");
2072 assert(ShadowPtr &&
"Could not find shadow for an argument");
2076 return getCleanShadow(V);
2081 return getShadow(
I->getOperand(i));
2086 if (!MS.TrackOrigins)
2088 if (!PropagateShadow || isa<Constant>(V) || isa<InlineAsm>(V))
2089 return getCleanOrigin();
2090 assert((isa<Instruction>(V) || isa<Argument>(V)) &&
2091 "Unexpected value type in getOrigin()");
2093 if (
I->getMetadata(LLVMContext::MD_nosanitize))
2094 return getCleanOrigin();
2096 Value *Origin = OriginMap[
V];
2097 assert(Origin &&
"Missing origin");
2103 return getOrigin(
I->getOperand(i));
2116 LLVM_DEBUG(
dbgs() <<
"Skipping check of " << *Shadow <<
" before "
2117 << *OrigIns <<
"\n");
2122 assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy) ||
2123 isa<StructType>(ShadowTy) || isa<ArrayType>(ShadowTy)) &&
2124 "Can only insert checks for integer, vector, and aggregate shadow "
2127 InstrumentationList.push_back(
2128 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
2137 Value *Shadow, *Origin;
2139 Shadow = getShadow(Val);
2142 Origin = getOrigin(Val);
2144 Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
2147 Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
2149 insertShadowCheck(Shadow, Origin, OrigIns);
2154 case AtomicOrdering::NotAtomic:
2155 return AtomicOrdering::NotAtomic;
2156 case AtomicOrdering::Unordered:
2157 case AtomicOrdering::Monotonic:
2158 case AtomicOrdering::Release:
2159 return AtomicOrdering::Release;
2160 case AtomicOrdering::Acquire:
2161 case AtomicOrdering::AcquireRelease:
2162 return AtomicOrdering::AcquireRelease;
2163 case AtomicOrdering::SequentiallyConsistent:
2164 return AtomicOrdering::SequentiallyConsistent;
2170 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2171 uint32_t OrderingTable[NumOrderings] = {};
2173 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2174 OrderingTable[(
int)AtomicOrderingCABI::release] =
2175 (int)AtomicOrderingCABI::release;
2176 OrderingTable[(int)AtomicOrderingCABI::consume] =
2177 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2178 OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
2179 (
int)AtomicOrderingCABI::acq_rel;
2180 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2181 (
int)AtomicOrderingCABI::seq_cst;
2188 case AtomicOrdering::NotAtomic:
2189 return AtomicOrdering::NotAtomic;
2190 case AtomicOrdering::Unordered:
2191 case AtomicOrdering::Monotonic:
2192 case AtomicOrdering::Acquire:
2193 return AtomicOrdering::Acquire;
2194 case AtomicOrdering::Release:
2195 case AtomicOrdering::AcquireRelease:
2196 return AtomicOrdering::AcquireRelease;
2197 case AtomicOrdering::SequentiallyConsistent:
2198 return AtomicOrdering::SequentiallyConsistent;
2204 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2205 uint32_t OrderingTable[NumOrderings] = {};
2207 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2208 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2209 OrderingTable[(int)AtomicOrderingCABI::consume] =
2210 (
int)AtomicOrderingCABI::acquire;
2211 OrderingTable[(int)AtomicOrderingCABI::release] =
2212 OrderingTable[(
int)AtomicOrderingCABI::acq_rel] =
2213 (int)AtomicOrderingCABI::acq_rel;
2214 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2215 (
int)AtomicOrderingCABI::seq_cst;
2223 if (
I.getMetadata(LLVMContext::MD_nosanitize))
2226 if (isInPrologue(
I))
2231 setShadow(&
I, getCleanShadow(&
I));
2232 setOrigin(&
I, getCleanOrigin());
2244 assert(
I.getType()->isSized() &&
"Load type must have size");
2245 assert(!
I.getMetadata(LLVMContext::MD_nosanitize));
2246 NextNodeIRBuilder IRB(&
I);
2247 Type *ShadowTy = getShadowTy(&
I);
2249 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
2250 const Align Alignment =
I.getAlign();
2251 if (PropagateShadow) {
2252 std::tie(ShadowPtr, OriginPtr) =
2253 getShadowOriginPtr(
Addr, IRB, ShadowTy, Alignment,
false);
2257 setShadow(&
I, getCleanShadow(&
I));
2261 insertShadowCheck(
I.getPointerOperand(), &
I);
2266 if (MS.TrackOrigins) {
2267 if (PropagateShadow) {
2272 setOrigin(&
I, getCleanOrigin());
2282 StoreList.push_back(&
I);
2284 insertShadowCheck(
I.getPointerOperand(), &
I);
2288 assert(isa<AtomicRMWInst>(
I) || isa<AtomicCmpXchgInst>(
I));
2292 Value *Val =
I.getOperand(1);
2293 Value *ShadowPtr = getShadowOriginPtr(
Addr, IRB, getShadowTy(Val),
Align(1),
2298 insertShadowCheck(
Addr, &
I);
2303 if (isa<AtomicCmpXchgInst>(
I))
2304 insertShadowCheck(Val, &
I);
2308 setShadow(&
I, getCleanShadow(&
I));
2309 setOrigin(&
I, getCleanOrigin());
2324 insertShadowCheck(
I.getOperand(1), &
I);
2328 setOrigin(&
I, getOrigin(&
I, 0));
2332 insertShadowCheck(
I.getOperand(2), &
I);
2334 auto *Shadow0 = getShadow(&
I, 0);
2335 auto *Shadow1 = getShadow(&
I, 1);
2338 setOriginForNaryOp(
I);
2343 auto *Shadow0 = getShadow(&
I, 0);
2344 auto *Shadow1 = getShadow(&
I, 1);
2347 setOriginForNaryOp(
I);
2353 setShadow(&
I, IRB.
CreateSExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2354 setOrigin(&
I, getOrigin(&
I, 0));
2359 setShadow(&
I, IRB.
CreateZExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2360 setOrigin(&
I, getOrigin(&
I, 0));
2365 setShadow(&
I, IRB.
CreateTrunc(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2366 setOrigin(&
I, getOrigin(&
I, 0));
2373 if (
auto *CI = dyn_cast<CallInst>(
I.getOperand(0)))
2374 if (CI->isMustTailCall())
2378 setOrigin(&
I, getOrigin(&
I, 0));
2384 "_msprop_ptrtoint"));
2385 setOrigin(&
I, getOrigin(&
I, 0));
2391 "_msprop_inttoptr"));
2392 setOrigin(&
I, getOrigin(&
I, 0));
2395 void visitFPToSIInst(
CastInst &
I) { handleShadowOr(
I); }
2396 void visitFPToUIInst(
CastInst &
I) { handleShadowOr(
I); }
2397 void visitSIToFPInst(
CastInst &
I) { handleShadowOr(
I); }
2398 void visitUIToFPInst(
CastInst &
I) { handleShadowOr(
I); }
2399 void visitFPExtInst(
CastInst &
I) { handleShadowOr(
I); }
2400 void visitFPTruncInst(
CastInst &
I) { handleShadowOr(
I); }
2415 Value *S2 = getShadow(&
I, 1);
2416 Value *V1 =
I.getOperand(0);
2425 setShadow(&
I, IRB.
CreateOr({S1S2, V1S2, S1V2}));
2426 setOriginForNaryOp(
I);
2437 Value *S2 = getShadow(&
I, 1);
2447 setShadow(&
I, IRB.
CreateOr({S1S2, V1S2, S1V2}));
2448 setOriginForNaryOp(
I);
2466 template <
bool CombineShadow>
class Combiner {
2467 Value *Shadow =
nullptr;
2468 Value *Origin =
nullptr;
2470 MemorySanitizerVisitor *MSV;
2474 : IRB(IRB), MSV(MSV) {}
2478 if (CombineShadow) {
2483 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
2484 Shadow = IRB.
CreateOr(Shadow, OpShadow,
"_msprop");
2488 if (MSV->MS.TrackOrigins) {
2493 Constant *ConstOrigin = dyn_cast<Constant>(OpOrigin);
2495 if (!ConstOrigin || !ConstOrigin->
isNullValue()) {
2496 Value *
Cond = MSV->convertToBool(OpShadow, IRB);
2506 Value *OpShadow = MSV->getShadow(V);
2507 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) :
nullptr;
2508 return Add(OpShadow, OpOrigin);
2514 if (CombineShadow) {
2516 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(
I));
2517 MSV->setShadow(
I, Shadow);
2519 if (MSV->MS.TrackOrigins) {
2521 MSV->setOrigin(
I, Origin);
2528 if (MSV->MS.TrackOrigins) {
2540 if (!MS.TrackOrigins)
2543 OriginCombiner
OC(
this, IRB);
2544 for (
Use &
Op :
I.operands())
2549 size_t VectorOrPrimitiveTypeSizeInBits(
Type *Ty) {
2551 "Vector of pointers is not a valid shadow type");
2552 return Ty->
isVectorTy() ? cast<FixedVectorType>(Ty)->getNumElements() *
2561 Type *srcTy =
V->getType();
2564 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
2565 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
2566 if (srcSizeInBits > 1 && dstSizeInBits == 1)
2572 cast<VectorType>(dstTy)->getElementCount() ==
2573 cast<VectorType>(srcTy)->getElementCount())
2584 Type *ShadowTy = getShadowTy(V);
2585 if (
V->getType() == ShadowTy)
2587 if (
V->getType()->isPtrOrPtrVectorTy())
2596 ShadowAndOriginCombiner
SC(
this, IRB);
2597 for (
Use &
Op :
I.operands())
2617 if (
auto *VTy = dyn_cast<VectorType>(Ty)) {
2618 unsigned NumElements = cast<FixedVectorType>(VTy)->getNumElements();
2619 Type *EltTy = VTy->getElementType();
2621 for (
unsigned Idx = 0;
Idx < NumElements; ++
Idx) {
2624 const APInt &
V = Elt->getValue();
2626 Elements.push_back(ConstantInt::get(EltTy, V2));
2628 Elements.push_back(ConstantInt::get(EltTy, 1));
2633 if (
ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg)) {
2634 const APInt &
V = Elt->getValue();
2636 ShadowMul = ConstantInt::get(Ty, V2);
2638 ShadowMul = ConstantInt::get(Ty, 1);
2644 IRB.
CreateMul(getShadow(OtherArg), ShadowMul,
"msprop_mul_cst"));
2645 setOrigin(&
I, getOrigin(OtherArg));
2649 Constant *constOp0 = dyn_cast<Constant>(
I.getOperand(0));
2650 Constant *constOp1 = dyn_cast<Constant>(
I.getOperand(1));
2651 if (constOp0 && !constOp1)
2652 handleMulByConstant(
I, constOp0,
I.getOperand(1));
2653 else if (constOp1 && !constOp0)
2654 handleMulByConstant(
I, constOp1,
I.getOperand(0));
2669 insertShadowCheck(
I.getOperand(1), &
I);
2670 setShadow(&
I, getShadow(&
I, 0));
2671 setOrigin(&
I, getOrigin(&
I, 0));
2688 void handleEqualityComparison(
ICmpInst &
I) {
2692 Value *Sa = getShadow(
A);
2693 Value *Sb = getShadow(
B);
2719 setOriginForNaryOp(
I);
2727 void handleRelationalComparisonExact(
ICmpInst &
I) {
2731 Value *Sa = getShadow(
A);
2732 Value *Sb = getShadow(
B);
2743 bool IsSigned =
I.isSigned();
2745 auto GetMinMaxUnsigned = [&](
Value *
V,
Value *S) {
2755 V = IRB.
CreateXor(V, ConstantInt::get(
V->getType(), MinVal));
2760 return std::make_pair(Min, Max);
2763 auto [Amin, Amax] = GetMinMaxUnsigned(
A, Sa);
2764 auto [Bmin, Bmax] = GetMinMaxUnsigned(
B, Sb);
2770 setOriginForNaryOp(
I);
2777 void handleSignedRelationalComparison(
ICmpInst &
I) {
2781 if ((constOp = dyn_cast<Constant>(
I.getOperand(1)))) {
2782 op =
I.getOperand(0);
2783 pre =
I.getPredicate();
2784 }
else if ((constOp = dyn_cast<Constant>(
I.getOperand(0)))) {
2785 op =
I.getOperand(1);
2786 pre =
I.getSwappedPredicate();
2799 setShadow(&
I, Shadow);
2800 setOrigin(&
I, getOrigin(
op));
2811 if (
I.isEquality()) {
2812 handleEqualityComparison(
I);
2818 handleRelationalComparisonExact(
I);
2822 handleSignedRelationalComparison(
I);
2827 if ((isa<Constant>(
I.getOperand(0)) || isa<Constant>(
I.getOperand(1)))) {
2828 handleRelationalComparisonExact(
I);
2835 void visitFCmpInst(
FCmpInst &
I) { handleShadowOr(
I); }
2842 Value *S2 = getShadow(&
I, 1);
2847 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
2848 setOriginForNaryOp(
I);
2859 Value *S0 = getShadow(&
I, 0);
2861 Value *S2 = getShadow(&
I, 2);
2867 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
2868 setOriginForNaryOp(
I);
2882 getShadow(
I.getArgOperand(1));
2885 {I.getArgOperand(0), I.getArgOperand(1),
2886 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2887 I.eraseFromParent();
2905 getShadow(
I.getArgOperand(1));
2908 {I.getArgOperand(0), I.getArgOperand(1),
2909 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2910 I.eraseFromParent();
2918 {I.getArgOperand(0),
2919 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
2920 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2921 I.eraseFromParent();
2924 void visitVAStartInst(
VAStartInst &
I) { VAHelper->visitVAStartInst(
I); }
2926 void visitVACopyInst(
VACopyInst &
I) { VAHelper->visitVACopyInst(
I); }
2935 Value *Shadow = getShadow(&
I, 1);
2936 Value *ShadowPtr, *OriginPtr;
2940 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
2945 insertShadowCheck(
Addr, &
I);
2948 if (MS.TrackOrigins)
2961 Type *ShadowTy = getShadowTy(&
I);
2962 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
2963 if (PropagateShadow) {
2967 std::tie(ShadowPtr, OriginPtr) =
2968 getShadowOriginPtr(
Addr, IRB, ShadowTy, Alignment,
false);
2972 setShadow(&
I, getCleanShadow(&
I));
2976 insertShadowCheck(
Addr, &
I);
2978 if (MS.TrackOrigins) {
2979 if (PropagateShadow)
2980 setOrigin(&
I, IRB.
CreateLoad(MS.OriginTy, OriginPtr));
2982 setOrigin(&
I, getCleanOrigin());
2995 if (!(
RetTy->isIntOrIntVectorTy() ||
RetTy->isFPOrFPVectorTy()))
2998 unsigned NumArgOperands =
I.arg_size();
2999 for (
unsigned i = 0; i < NumArgOperands; ++i) {
3000 Type *Ty =
I.getArgOperand(i)->getType();
3006 ShadowAndOriginCombiner
SC(
this, IRB);
3007 for (
unsigned i = 0; i < NumArgOperands; ++i)
3008 SC.Add(
I.getArgOperand(i));
3025 unsigned NumArgOperands =
I.arg_size();
3026 if (NumArgOperands == 0)
3029 if (NumArgOperands == 2 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
3030 I.getArgOperand(1)->getType()->isVectorTy() &&
3031 I.getType()->isVoidTy() && !
I.onlyReadsMemory()) {
3033 return handleVectorStoreIntrinsic(
I);
3036 if (NumArgOperands == 1 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
3037 I.getType()->isVectorTy() &&
I.onlyReadsMemory()) {
3039 return handleVectorLoadIntrinsic(
I);
3042 if (
I.doesNotAccessMemory())
3043 if (maybeHandleSimpleNomemIntrinsic(
I))
3051 if (handleUnknownIntrinsicUnlogged(
I)) {
3063 setShadow(&
I, getShadow(&
I, 0));
3064 setOrigin(&
I, getOrigin(&
I, 0));
3072 InstrumentLifetimeStart =
false;
3073 LifetimeStartList.push_back(std::make_pair(&
I, AI));
3079 Type *OpType =
Op->getType();
3082 setOrigin(&
I, getOrigin(
Op));
3087 Value *Src =
I.getArgOperand(0);
3093 Constant *IsZeroPoison = cast<Constant>(
I.getOperand(1));
3096 BoolShadow = IRB.
CreateOr(BoolShadow, BoolZeroPoison,
"_mscz_bs");
3099 Value *OutputShadow =
3100 IRB.
CreateSExt(BoolShadow, getShadowTy(Src),
"_mscz_os");
3102 setShadow(&
I, OutputShadow);
3103 setOriginForNaryOp(
I);
3121 void handleVectorConvertIntrinsic(
IntrinsicInst &
I,
int NumUsedElements,
3122 bool HasRoundingMode =
false) {
3124 Value *CopyOp, *ConvertOp;
3126 assert((!HasRoundingMode ||
3127 isa<ConstantInt>(
I.getArgOperand(
I.arg_size() - 1))) &&
3128 "Invalid rounding mode");
3130 switch (
I.arg_size() - HasRoundingMode) {
3132 CopyOp =
I.getArgOperand(0);
3133 ConvertOp =
I.getArgOperand(1);
3136 ConvertOp =
I.getArgOperand(0);
3150 Value *ConvertShadow = getShadow(ConvertOp);
3151 Value *AggShadow =
nullptr;
3154 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), 0));
3155 for (
int i = 1; i < NumUsedElements; ++i) {
3157 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), i));
3158 AggShadow = IRB.
CreateOr(AggShadow, MoreShadow);
3161 AggShadow = ConvertShadow;
3164 insertShadowCheck(AggShadow, getOrigin(ConvertOp), &
I);
3171 Value *ResultShadow = getShadow(CopyOp);
3172 Type *EltTy = cast<VectorType>(ResultShadow->
getType())->getElementType();
3173 for (
int i = 0; i < NumUsedElements; ++i) {
3175 ResultShadow, ConstantInt::getNullValue(EltTy),
3178 setShadow(&
I, ResultShadow);
3179 setOrigin(&
I, getOrigin(CopyOp));
3181 setShadow(&
I, getCleanShadow(&
I));
3182 setOrigin(&
I, getCleanOrigin());
3190 S = CreateShadowCast(IRB, S, IRB.
getInt64Ty(),
true);
3193 return CreateShadowCast(IRB, S2,
T,
true);
3201 return CreateShadowCast(IRB, S2,
T,
true);
3218 void handleVectorShiftIntrinsic(
IntrinsicInst &
I,
bool Variable) {
3224 Value *S2 = getShadow(&
I, 1);
3225 Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2)
3226 : Lower64ShadowExtend(IRB, S2, getShadowTy(&
I));
3227 Value *V1 =
I.getOperand(0);
3230 {IRB.CreateBitCast(S1, V1->getType()), V2});
3232 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3233 setOriginForNaryOp(
I);
3237 Type *getMMXVectorTy(
unsigned EltSizeInBits) {
3238 const unsigned X86_MMXSizeInBits = 64;
3239 assert(EltSizeInBits != 0 && (X86_MMXSizeInBits % EltSizeInBits) == 0 &&
3240 "Illegal MMX vector element size");
3242 X86_MMXSizeInBits / EltSizeInBits);
3249 case Intrinsic::x86_sse2_packsswb_128:
3250 case Intrinsic::x86_sse2_packuswb_128:
3251 return Intrinsic::x86_sse2_packsswb_128;
3253 case Intrinsic::x86_sse2_packssdw_128:
3254 case Intrinsic::x86_sse41_packusdw:
3255 return Intrinsic::x86_sse2_packssdw_128;
3257 case Intrinsic::x86_avx2_packsswb:
3258 case Intrinsic::x86_avx2_packuswb:
3259 return Intrinsic::x86_avx2_packsswb;
3261 case Intrinsic::x86_avx2_packssdw:
3262 case Intrinsic::x86_avx2_packusdw:
3263 return Intrinsic::x86_avx2_packssdw;
3265 case Intrinsic::x86_mmx_packsswb:
3266 case Intrinsic::x86_mmx_packuswb:
3267 return Intrinsic::x86_mmx_packsswb;
3269 case Intrinsic::x86_mmx_packssdw:
3270 return Intrinsic::x86_mmx_packssdw;
3284 unsigned MMXEltSizeInBits = 0) {
3288 Value *S2 = getShadow(&
I, 1);
3289 assert(
S1->getType()->isVectorTy());
3295 MMXEltSizeInBits ? getMMXVectorTy(MMXEltSizeInBits) :
S1->
getType();
3296 if (MMXEltSizeInBits) {
3304 if (MMXEltSizeInBits) {
3310 {}, {S1_ext, S2_ext},
nullptr,
3311 "_msprop_vector_pack");
3312 if (MMXEltSizeInBits)
3315 setOriginForNaryOp(
I);
3319 Constant *createDppMask(
unsigned Width,
unsigned Mask) {
3332 const unsigned Width =
3333 cast<FixedVectorType>(S->
getType())->getNumElements();
3339 Value *DstMaskV = createDppMask(Width, DstMask);
3359 Value *S0 = getShadow(&
I, 0);
3363 const unsigned Width =
3364 cast<FixedVectorType>(S->
getType())->getNumElements();
3365 assert(Width == 2 || Width == 4 || Width == 8);
3367 const unsigned Mask = cast<ConstantInt>(
I.getArgOperand(2))->getZExtValue();
3368 const unsigned SrcMask =
Mask >> 4;
3369 const unsigned DstMask =
Mask & 0xf;
3372 Value *SI1 = findDppPoisonedOutput(IRB, S, SrcMask, DstMask);
3377 SI1, findDppPoisonedOutput(IRB, S, SrcMask << 4, DstMask << 4));
3384 setOriginForNaryOp(
I);
3388 C = CreateAppToShadowCast(IRB,
C);
3402 Value *Sc = getShadow(&
I, 2);
3403 Value *Oc = MS.TrackOrigins ? getOrigin(
C) : nullptr;
3408 C = convertBlendvToSelectMask(IRB,
C);
3409 Sc = convertBlendvToSelectMask(IRB, Sc);
3415 handleSelectLikeInst(
I,
C,
T,
F);
3419 void handleVectorSadIntrinsic(
IntrinsicInst &
I,
bool IsMMX =
false) {
3420 const unsigned SignificantBitsPerResultElement = 16;
3422 unsigned ZeroBitsPerResultElement =
3426 auto *Shadow0 = getShadow(&
I, 0);
3427 auto *Shadow1 = getShadow(&
I, 1);
3432 S = IRB.
CreateLShr(S, ZeroBitsPerResultElement);
3435 setOriginForNaryOp(
I);
3440 unsigned MMXEltSizeInBits = 0) {
3442 MMXEltSizeInBits ? getMMXVectorTy(MMXEltSizeInBits * 2) :
I.
getType();
3444 auto *Shadow0 = getShadow(&
I, 0);
3445 auto *Shadow1 = getShadow(&
I, 1);
3452 setOriginForNaryOp(
I);
3460 Type *ResTy = getShadowTy(&
I);
3461 auto *Shadow0 = getShadow(&
I, 0);
3462 auto *Shadow1 = getShadow(&
I, 1);
3467 setOriginForNaryOp(
I);
3475 auto *Shadow0 = getShadow(&
I, 0);
3476 auto *Shadow1 = getShadow(&
I, 1);
3478 Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&
I));
3480 setOriginForNaryOp(
I);
3489 setOrigin(&
I, getOrigin(&
I, 0));
3497 Value *OperandShadow = getShadow(&
I, 0);
3499 Value *OperandUnsetOrPoison = IRB.
CreateOr(OperandUnsetBits, OperandShadow);
3507 setOrigin(&
I, getOrigin(&
I, 0));
3515 Value *OperandShadow = getShadow(&
I, 0);
3516 Value *OperandSetOrPoison = IRB.
CreateOr(
I.getOperand(0), OperandShadow);
3524 setOrigin(&
I, getOrigin(&
I, 0));
3532 getShadowOriginPtr(
Addr, IRB, Ty,
Align(1),
true).first;
3537 insertShadowCheck(
Addr, &
I);
3548 Value *ShadowPtr, *OriginPtr;
3549 std::tie(ShadowPtr, OriginPtr) =
3550 getShadowOriginPtr(
Addr, IRB, Ty, Alignment,
false);
3553 insertShadowCheck(
Addr, &
I);
3556 Value *Origin = MS.TrackOrigins ? IRB.
CreateLoad(MS.OriginTy, OriginPtr)
3558 insertShadowCheck(Shadow, Origin, &
I);
3566 Value *PassThru =
I.getArgOperand(2);
3569 insertShadowCheck(
Ptr, &
I);
3570 insertShadowCheck(Mask, &
I);
3573 if (!PropagateShadow) {
3574 setShadow(&
I, getCleanShadow(&
I));
3575 setOrigin(&
I, getCleanOrigin());
3579 Type *ShadowTy = getShadowTy(&
I);
3580 Type *ElementShadowTy = cast<VectorType>(ShadowTy)->getElementType();
3581 auto [ShadowPtr, OriginPtr] =
3582 getShadowOriginPtr(
Ptr, IRB, ElementShadowTy,
Align,
false);
3586 getShadow(PassThru),
"_msmaskedexpload");
3588 setShadow(&
I, Shadow);
3591 setOrigin(&
I, getCleanOrigin());
3596 Value *Values =
I.getArgOperand(0);
3602 insertShadowCheck(
Ptr, &
I);
3603 insertShadowCheck(Mask, &
I);
3606 Value *Shadow = getShadow(Values);
3607 Type *ElementShadowTy =
3608 getShadowTy(cast<VectorType>(Values->
getType())->getElementType());
3609 auto [ShadowPtr, OriginPtrs] =
3610 getShadowOriginPtr(
Ptr, IRB, ElementShadowTy,
Align,
true);
3619 Value *Ptrs =
I.getArgOperand(0);
3620 const Align Alignment(
3621 cast<ConstantInt>(
I.getArgOperand(1))->getZExtValue());
3623 Value *PassThru =
I.getArgOperand(3);
3625 Type *PtrsShadowTy = getShadowTy(Ptrs);
3627 insertShadowCheck(Mask, &
I);
3631 insertShadowCheck(MaskedPtrShadow, getOrigin(Ptrs), &
I);
3634 if (!PropagateShadow) {
3635 setShadow(&
I, getCleanShadow(&
I));
3636 setOrigin(&
I, getCleanOrigin());
3640 Type *ShadowTy = getShadowTy(&
I);
3641 Type *ElementShadowTy = cast<VectorType>(ShadowTy)->getElementType();
3642 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
3643 Ptrs, IRB, ElementShadowTy, Alignment,
false);
3647 getShadow(PassThru),
"_msmaskedgather");
3649 setShadow(&
I, Shadow);
3652 setOrigin(&
I, getCleanOrigin());
3657 Value *Values =
I.getArgOperand(0);
3658 Value *Ptrs =
I.getArgOperand(1);
3659 const Align Alignment(
3660 cast<ConstantInt>(
I.getArgOperand(2))->getZExtValue());
3663 Type *PtrsShadowTy = getShadowTy(Ptrs);
3665 insertShadowCheck(Mask, &
I);
3669 insertShadowCheck(MaskedPtrShadow, getOrigin(Ptrs), &
I);
3672 Value *Shadow = getShadow(Values);
3673 Type *ElementShadowTy =
3674 getShadowTy(cast<VectorType>(Values->
getType())->getElementType());
3675 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
3676 Ptrs, IRB, ElementShadowTy, Alignment,
true);
3685 Value *
V =
I.getArgOperand(0);
3687 const Align Alignment(
3688 cast<ConstantInt>(
I.getArgOperand(2))->getZExtValue());
3690 Value *Shadow = getShadow(V);
3693 insertShadowCheck(
Ptr, &
I);
3694 insertShadowCheck(Mask, &
I);
3699 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
3700 Ptr, IRB, Shadow->
getType(), Alignment,
true);
3704 if (!MS.TrackOrigins)
3707 auto &
DL =
F.getDataLayout();
3708 paintOrigin(IRB, getOrigin(V), OriginPtr,
3716 const Align Alignment(
3717 cast<ConstantInt>(
I.getArgOperand(1))->getZExtValue());
3719 Value *PassThru =
I.getArgOperand(3);
3722 insertShadowCheck(
Ptr, &
I);
3723 insertShadowCheck(Mask, &
I);
3726 if (!PropagateShadow) {
3727 setShadow(&
I, getCleanShadow(&
I));
3728 setOrigin(&
I, getCleanOrigin());
3732 Type *ShadowTy = getShadowTy(&
I);
3733 Value *ShadowPtr, *OriginPtr;
3734 std::tie(ShadowPtr, OriginPtr) =
3735 getShadowOriginPtr(
Ptr, IRB, ShadowTy, Alignment,
false);
3737 getShadow(PassThru),
"_msmaskedld"));
3739 if (!MS.TrackOrigins)
3746 Value *NotNull = convertToBool(MaskedPassThruShadow, IRB,
"_mscmp");
3751 setOrigin(&
I, Origin);
3761 Type *ShadowTy = getShadowTy(&
I);
3764 Value *SMask = getShadow(&
I, 1);
3769 {getShadow(&I, 0), I.getOperand(1)});
3772 setOriginForNaryOp(
I);
3777 for (
unsigned X = OddElements ? 1 : 0;
X < Width;
X += 2) {
3794 cast<FixedVectorType>(
I.getArgOperand(0)->getType())->getNumElements();
3795 assert(isa<ConstantInt>(
I.getArgOperand(2)) &&
3796 "pclmul 3rd operand must be a constant");
3797 unsigned Imm = cast<ConstantInt>(
I.getArgOperand(2))->getZExtValue();
3799 getPclmulMask(Width, Imm & 0x01));
3801 getPclmulMask(Width, Imm & 0x10));
3802 ShadowAndOriginCombiner SOC(
this, IRB);
3803 SOC.Add(Shuf0, getOrigin(&
I, 0));
3804 SOC.Add(Shuf1, getOrigin(&
I, 1));
3812 cast<FixedVectorType>(
I.getArgOperand(0)->getType())->getNumElements();
3814 Value *Second = getShadow(&
I, 1);
3817 Mask.push_back(Width);
3818 for (
unsigned i = 1; i < Width; i++)
3822 setShadow(&
I, Shadow);
3823 setOriginForNaryOp(
I);
3828 Value *Shadow0 = getShadow(&
I, 0);
3829 Value *Shadow1 = getShadow(&
I, 1);
3835 setShadow(&
I, Shadow);
3836 setOriginForNaryOp(
I);
3842 cast<FixedVectorType>(
I.getArgOperand(0)->getType())->getNumElements();
3844 Value *Second = getShadow(&
I, 1);
3848 Mask.push_back(Width);
3849 for (
unsigned i = 1; i < Width; i++)
3853 setShadow(&
I, Shadow);
3854 setOriginForNaryOp(
I);
3861 assert(
I.getArgOperand(0)->getType() ==
I.getType());
3863 assert(isa<ConstantInt>(
I.getArgOperand(1)));
3866 ShadowAndOriginCombiner
SC(
this, IRB);
3867 SC.Add(
I.getArgOperand(0));
3875 assert(
I.getType()->isIntOrIntVectorTy());
3876 assert(
I.getArgOperand(0)->getType() ==
I.getType());
3880 setShadow(&
I, getShadow(&
I, 0));
3881 setOrigin(&
I, getOrigin(&
I, 0));
3886 Value *Shadow = getShadow(&
I, 0);
3887 setShadow(&
I, IRB.
CreateICmpNE(Shadow, getCleanShadow(Shadow)));
3888 setOrigin(&
I, getOrigin(&
I, 0));
3893 Value *Shadow0 = getShadow(&
I, 0);
3894 Value *Shadow1 = getShadow(&
I, 1);
3897 IRB.
CreateICmpNE(ShadowElt0, getCleanShadow(ShadowElt0));
3903 setShadow(&
I, Shadow);
3904 setOriginForNaryOp(
I);
3921 void handleNEONVectorStoreIntrinsic(
IntrinsicInst &
I,
bool useLane) {
3925 int numArgOperands =
I.arg_size();
3928 assert(numArgOperands >= 1);
3929 Value *
Addr =
I.getArgOperand(numArgOperands - 1);
3931 int skipTrailingOperands = 1;
3934 insertShadowCheck(
Addr, &
I);
3938 skipTrailingOperands++;
3939 assert(numArgOperands >=
static_cast<int>(skipTrailingOperands));
3941 I.getArgOperand(numArgOperands - skipTrailingOperands)->getType()));
3946 for (
int i = 0; i < numArgOperands - skipTrailingOperands; i++) {
3947 assert(isa<FixedVectorType>(
I.getArgOperand(i)->getType()));
3948 Value *Shadow = getShadow(&
I, i);
3949 ShadowArgs.
append(1, Shadow);
3964 cast<FixedVectorType>(
I.getArgOperand(0)->getType())->getElementType(),
3965 cast<FixedVectorType>(
I.getArgOperand(0)->getType())->getNumElements() *
3966 (numArgOperands - skipTrailingOperands));
3967 Type *OutputShadowTy = getShadowTy(OutputVectorTy);
3971 I.getArgOperand(numArgOperands - skipTrailingOperands));
3973 Value *OutputShadowPtr, *OutputOriginPtr;
3975 std::tie(OutputShadowPtr, OutputOriginPtr) = getShadowOriginPtr(
3976 Addr, IRB, OutputShadowTy,
Align(1),
true);
3977 ShadowArgs.
append(1, OutputShadowPtr);
3983 if (MS.TrackOrigins) {
3991 OriginCombiner
OC(
this, IRB);
3992 for (
int i = 0; i < numArgOperands - skipTrailingOperands; i++)
3993 OC.Add(
I.getArgOperand(i));
3996 OC.DoneAndStoreOrigin(
DL.getTypeStoreSize(OutputVectorTy),
4020 unsigned int trailingVerbatimArgs) {
4023 assert(trailingVerbatimArgs <
I.arg_size());
4027 for (
unsigned int i = 0; i <
I.arg_size() - trailingVerbatimArgs; i++) {
4028 Value *Shadow = getShadow(&
I, i);
4036 for (
unsigned int i =
I.arg_size() - trailingVerbatimArgs; i <
I.arg_size();
4038 Value *Arg =
I.getArgOperand(i);
4044 Value *CombinedShadow = CI;
4047 for (
unsigned int i =
I.arg_size() - trailingVerbatimArgs; i <
I.arg_size();
4050 CreateShadowCast(IRB, getShadow(&
I, i), CombinedShadow->
getType());
4051 CombinedShadow = IRB.
CreateOr(Shadow, CombinedShadow,
"_msprop");
4056 setOriginForNaryOp(
I);
4065 switch (
I.getIntrinsicID()) {
4066 case Intrinsic::uadd_with_overflow:
4067 case Intrinsic::sadd_with_overflow:
4068 case Intrinsic::usub_with_overflow:
4069 case Intrinsic::ssub_with_overflow:
4070 case Intrinsic::umul_with_overflow:
4071 case Intrinsic::smul_with_overflow:
4072 handleArithmeticWithOverflow(
I);
4074 case Intrinsic::abs:
4075 handleAbsIntrinsic(
I);
4077 case Intrinsic::is_fpclass:
4080 case Intrinsic::lifetime_start:
4081 handleLifetimeStart(
I);
4083 case Intrinsic::launder_invariant_group:
4084 case Intrinsic::strip_invariant_group:
4085 handleInvariantGroup(
I);
4087 case Intrinsic::bswap:
4090 case Intrinsic::ctlz:
4091 case Intrinsic::cttz:
4092 handleCountZeroes(
I);
4094 case Intrinsic::masked_compressstore:
4095 handleMaskedCompressStore(
I);
4097 case Intrinsic::masked_expandload:
4098 handleMaskedExpandLoad(
I);
4100 case Intrinsic::masked_gather:
4101 handleMaskedGather(
I);
4103 case Intrinsic::masked_scatter:
4104 handleMaskedScatter(
I);
4106 case Intrinsic::masked_store:
4107 handleMaskedStore(
I);
4109 case Intrinsic::masked_load:
4110 handleMaskedLoad(
I);
4112 case Intrinsic::vector_reduce_and:
4113 handleVectorReduceAndIntrinsic(
I);
4115 case Intrinsic::vector_reduce_or:
4116 handleVectorReduceOrIntrinsic(
I);
4118 case Intrinsic::vector_reduce_add:
4119 case Intrinsic::vector_reduce_xor:
4120 case Intrinsic::vector_reduce_mul:
4121 handleVectorReduceIntrinsic(
I);
4123 case Intrinsic::x86_sse_stmxcsr:
4126 case Intrinsic::x86_sse_ldmxcsr:
4129 case Intrinsic::x86_avx512_vcvtsd2usi64:
4130 case Intrinsic::x86_avx512_vcvtsd2usi32:
4131 case Intrinsic::x86_avx512_vcvtss2usi64:
4132 case Intrinsic::x86_avx512_vcvtss2usi32:
4133 case Intrinsic::x86_avx512_cvttss2usi64:
4134 case Intrinsic::x86_avx512_cvttss2usi:
4135 case Intrinsic::x86_avx512_cvttsd2usi64:
4136 case Intrinsic::x86_avx512_cvttsd2usi:
4137 case Intrinsic::x86_avx512_cvtusi2ss:
4138 case Intrinsic::x86_avx512_cvtusi642sd:
4139 case Intrinsic::x86_avx512_cvtusi642ss:
4140 handleVectorConvertIntrinsic(
I, 1,
true);
4142 case Intrinsic::x86_sse2_cvtsd2si64:
4143 case Intrinsic::x86_sse2_cvtsd2si:
4144 case Intrinsic::x86_sse2_cvtsd2ss:
4145 case Intrinsic::x86_sse2_cvttsd2si64:
4146 case Intrinsic::x86_sse2_cvttsd2si:
4147 case Intrinsic::x86_sse_cvtss2si64:
4148 case Intrinsic::x86_sse_cvtss2si:
4149 case Intrinsic::x86_sse_cvttss2si64:
4150 case Intrinsic::x86_sse_cvttss2si:
4151 handleVectorConvertIntrinsic(
I, 1);
4153 case Intrinsic::x86_sse_cvtps2pi:
4154 case Intrinsic::x86_sse_cvttps2pi:
4155 handleVectorConvertIntrinsic(
I, 2);
4158 case Intrinsic::x86_avx512_psll_w_512:
4159 case Intrinsic::x86_avx512_psll_d_512:
4160 case Intrinsic::x86_avx512_psll_q_512:
4161 case Intrinsic::x86_avx512_pslli_w_512:
4162 case Intrinsic::x86_avx512_pslli_d_512:
4163 case Intrinsic::x86_avx512_pslli_q_512:
4164 case Intrinsic::x86_avx512_psrl_w_512:
4165 case Intrinsic::x86_avx512_psrl_d_512:
4166 case Intrinsic::x86_avx512_psrl_q_512:
4167 case Intrinsic::x86_avx512_psra_w_512:
4168 case Intrinsic::x86_avx512_psra_d_512:
4169 case Intrinsic::x86_avx512_psra_q_512:
4170 case Intrinsic::x86_avx512_psrli_w_512:
4171 case Intrinsic::x86_avx512_psrli_d_512:
4172 case Intrinsic::x86_avx512_psrli_q_512:
4173 case Intrinsic::x86_avx512_psrai_w_512:
4174 case Intrinsic::x86_avx512_psrai_d_512:
4175 case Intrinsic::x86_avx512_psrai_q_512:
4176 case Intrinsic::x86_avx512_psra_q_256:
4177 case Intrinsic::x86_avx512_psra_q_128:
4178 case Intrinsic::x86_avx512_psrai_q_256:
4179 case Intrinsic::x86_avx512_psrai_q_128:
4180 case Intrinsic::x86_avx2_psll_w:
4181 case Intrinsic::x86_avx2_psll_d:
4182 case Intrinsic::x86_avx2_psll_q:
4183 case Intrinsic::x86_avx2_pslli_w:
4184 case Intrinsic::x86_avx2_pslli_d:
4185 case Intrinsic::x86_avx2_pslli_q:
4186 case Intrinsic::x86_avx2_psrl_w:
4187 case Intrinsic::x86_avx2_psrl_d:
4188 case Intrinsic::x86_avx2_psrl_q:
4189 case Intrinsic::x86_avx2_psra_w:
4190 case Intrinsic::x86_avx2_psra_d:
4191 case Intrinsic::x86_avx2_psrli_w:
4192 case Intrinsic::x86_avx2_psrli_d:
4193 case Intrinsic::x86_avx2_psrli_q:
4194 case Intrinsic::x86_avx2_psrai_w:
4195 case Intrinsic::x86_avx2_psrai_d:
4196 case Intrinsic::x86_sse2_psll_w:
4197 case Intrinsic::x86_sse2_psll_d:
4198 case Intrinsic::x86_sse2_psll_q:
4199 case Intrinsic::x86_sse2_pslli_w:
4200 case Intrinsic::x86_sse2_pslli_d:
4201 case Intrinsic::x86_sse2_pslli_q:
4202 case Intrinsic::x86_sse2_psrl_w:
4203 case Intrinsic::x86_sse2_psrl_d:
4204 case Intrinsic::x86_sse2_psrl_q:
4205 case Intrinsic::x86_sse2_psra_w:
4206 case Intrinsic::x86_sse2_psra_d:
4207 case Intrinsic::x86_sse2_psrli_w:
4208 case Intrinsic::x86_sse2_psrli_d:
4209 case Intrinsic::x86_sse2_psrli_q:
4210 case Intrinsic::x86_sse2_psrai_w:
4211 case Intrinsic::x86_sse2_psrai_d:
4212 case Intrinsic::x86_mmx_psll_w:
4213 case Intrinsic::x86_mmx_psll_d:
4214 case Intrinsic::x86_mmx_psll_q:
4215 case Intrinsic::x86_mmx_pslli_w:
4216 case Intrinsic::x86_mmx_pslli_d:
4217 case Intrinsic::x86_mmx_pslli_q:
4218 case Intrinsic::x86_mmx_psrl_w:
4219 case Intrinsic::x86_mmx_psrl_d:
4220 case Intrinsic::x86_mmx_psrl_q:
4221 case Intrinsic::x86_mmx_psra_w:
4222 case Intrinsic::x86_mmx_psra_d:
4223 case Intrinsic::x86_mmx_psrli_w:
4224 case Intrinsic::x86_mmx_psrli_d:
4225 case Intrinsic::x86_mmx_psrli_q:
4226 case Intrinsic::x86_mmx_psrai_w:
4227 case Intrinsic::x86_mmx_psrai_d:
4228 case Intrinsic::aarch64_neon_rshrn:
4229 case Intrinsic::aarch64_neon_sqrshl:
4230 case Intrinsic::aarch64_neon_sqrshrn:
4231 case Intrinsic::aarch64_neon_sqrshrun:
4232 case Intrinsic::aarch64_neon_sqshl:
4233 case Intrinsic::aarch64_neon_sqshlu:
4234 case Intrinsic::aarch64_neon_sqshrn:
4235 case Intrinsic::aarch64_neon_sqshrun:
4236 case Intrinsic::aarch64_neon_srshl:
4237 case Intrinsic::aarch64_neon_sshl:
4238 case Intrinsic::aarch64_neon_uqrshl:
4239 case Intrinsic::aarch64_neon_uqrshrn:
4240 case Intrinsic::aarch64_neon_uqshl:
4241 case Intrinsic::aarch64_neon_uqshrn:
4242 case Intrinsic::aarch64_neon_urshl:
4243 case Intrinsic::aarch64_neon_ushl:
4245 handleVectorShiftIntrinsic(
I,
false);
4247 case Intrinsic::x86_avx2_psllv_d:
4248 case Intrinsic::x86_avx2_psllv_d_256:
4249 case Intrinsic::x86_avx512_psllv_d_512:
4250 case Intrinsic::x86_avx2_psllv_q:
4251 case Intrinsic::x86_avx2_psllv_q_256:
4252 case Intrinsic::x86_avx512_psllv_q_512:
4253 case Intrinsic::x86_avx2_psrlv_d:
4254 case Intrinsic::x86_avx2_psrlv_d_256:
4255 case Intrinsic::x86_avx512_psrlv_d_512:
4256 case Intrinsic::x86_avx2_psrlv_q:
4257 case Intrinsic::x86_avx2_psrlv_q_256:
4258 case Intrinsic::x86_avx512_psrlv_q_512:
4259 case Intrinsic::x86_avx2_psrav_d:
4260 case Intrinsic::x86_avx2_psrav_d_256:
4261 case Intrinsic::x86_avx512_psrav_d_512:
4262 case Intrinsic::x86_avx512_psrav_q_128:
4263 case Intrinsic::x86_avx512_psrav_q_256:
4264 case Intrinsic::x86_avx512_psrav_q_512:
4265 handleVectorShiftIntrinsic(
I,
true);
4268 case Intrinsic::x86_sse2_packsswb_128:
4269 case Intrinsic::x86_sse2_packssdw_128:
4270 case Intrinsic::x86_sse2_packuswb_128:
4271 case Intrinsic::x86_sse41_packusdw:
4272 case Intrinsic::x86_avx2_packsswb:
4273 case Intrinsic::x86_avx2_packssdw:
4274 case Intrinsic::x86_avx2_packuswb:
4275 case Intrinsic::x86_avx2_packusdw:
4276 handleVectorPackIntrinsic(
I);
4279 case Intrinsic::x86_sse41_pblendvb:
4280 case Intrinsic::x86_sse41_blendvpd:
4281 case Intrinsic::x86_sse41_blendvps:
4282 case Intrinsic::x86_avx_blendv_pd_256:
4283 case Intrinsic::x86_avx_blendv_ps_256:
4284 case Intrinsic::x86_avx2_pblendvb:
4285 handleBlendvIntrinsic(
I);
4288 case Intrinsic::x86_avx_dp_ps_256:
4289 case Intrinsic::x86_sse41_dppd:
4290 case Intrinsic::x86_sse41_dpps:
4291 handleDppIntrinsic(
I);
4294 case Intrinsic::x86_mmx_packsswb:
4295 case Intrinsic::x86_mmx_packuswb:
4296 handleVectorPackIntrinsic(
I, 16);
4299 case Intrinsic::x86_mmx_packssdw:
4300 handleVectorPackIntrinsic(
I, 32);
4303 case Intrinsic::x86_mmx_psad_bw:
4304 handleVectorSadIntrinsic(
I,
true);
4306 case Intrinsic::x86_sse2_psad_bw:
4307 case Intrinsic::x86_avx2_psad_bw:
4308 handleVectorSadIntrinsic(
I);
4311 case Intrinsic::x86_sse2_pmadd_wd:
4312 case Intrinsic::x86_avx2_pmadd_wd:
4313 case Intrinsic::x86_ssse3_pmadd_ub_sw_128:
4314 case Intrinsic::x86_avx2_pmadd_ub_sw:
4315 handleVectorPmaddIntrinsic(
I);
4318 case Intrinsic::x86_ssse3_pmadd_ub_sw:
4319 handleVectorPmaddIntrinsic(
I, 8);
4322 case Intrinsic::x86_mmx_pmadd_wd:
4323 handleVectorPmaddIntrinsic(
I, 16);
4326 case Intrinsic::x86_sse_cmp_ss:
4327 case Intrinsic::x86_sse2_cmp_sd:
4328 case Intrinsic::x86_sse_comieq_ss:
4329 case Intrinsic::x86_sse_comilt_ss:
4330 case Intrinsic::x86_sse_comile_ss:
4331 case Intrinsic::x86_sse_comigt_ss:
4332 case Intrinsic::x86_sse_comige_ss:
4333 case Intrinsic::x86_sse_comineq_ss:
4334 case Intrinsic::x86_sse_ucomieq_ss:
4335 case Intrinsic::x86_sse_ucomilt_ss:
4336 case Intrinsic::x86_sse_ucomile_ss:
4337 case Intrinsic::x86_sse_ucomigt_ss:
4338 case Intrinsic::x86_sse_ucomige_ss:
4339 case Intrinsic::x86_sse_ucomineq_ss:
4340 case Intrinsic::x86_sse2_comieq_sd:
4341 case Intrinsic::x86_sse2_comilt_sd:
4342 case Intrinsic::x86_sse2_comile_sd:
4343 case Intrinsic::x86_sse2_comigt_sd:
4344 case Intrinsic::x86_sse2_comige_sd:
4345 case Intrinsic::x86_sse2_comineq_sd:
4346 case Intrinsic::x86_sse2_ucomieq_sd:
4347 case Intrinsic::x86_sse2_ucomilt_sd:
4348 case Intrinsic::x86_sse2_ucomile_sd:
4349 case Intrinsic::x86_sse2_ucomigt_sd:
4350 case Intrinsic::x86_sse2_ucomige_sd:
4351 case Intrinsic::x86_sse2_ucomineq_sd:
4352 handleVectorCompareScalarIntrinsic(
I);
4355 case Intrinsic::x86_avx_cmp_pd_256:
4356 case Intrinsic::x86_avx_cmp_ps_256:
4357 case Intrinsic::x86_sse2_cmp_pd:
4358 case Intrinsic::x86_sse_cmp_ps:
4359 handleVectorComparePackedIntrinsic(
I);
4362 case Intrinsic::x86_bmi_bextr_32:
4363 case Intrinsic::x86_bmi_bextr_64:
4364 case Intrinsic::x86_bmi_bzhi_32:
4365 case Intrinsic::x86_bmi_bzhi_64:
4366 case Intrinsic::x86_bmi_pdep_32:
4367 case Intrinsic::x86_bmi_pdep_64:
4368 case Intrinsic::x86_bmi_pext_32:
4369 case Intrinsic::x86_bmi_pext_64:
4370 handleBmiIntrinsic(
I);
4373 case Intrinsic::x86_pclmulqdq:
4374 case Intrinsic::x86_pclmulqdq_256:
4375 case Intrinsic::x86_pclmulqdq_512:
4376 handlePclmulIntrinsic(
I);
4379 case Intrinsic::x86_avx_round_pd_256:
4380 case Intrinsic::x86_avx_round_ps_256:
4381 case Intrinsic::x86_sse41_round_pd:
4382 case Intrinsic::x86_sse41_round_ps:
4383 handleRoundPdPsIntrinsic(
I);
4386 case Intrinsic::x86_sse41_round_sd:
4387 case Intrinsic::x86_sse41_round_ss:
4388 handleUnarySdSsIntrinsic(
I);
4391 case Intrinsic::x86_sse2_max_sd:
4392 case Intrinsic::x86_sse_max_ss:
4393 case Intrinsic::x86_sse2_min_sd:
4394 case Intrinsic::x86_sse_min_ss:
4395 handleBinarySdSsIntrinsic(
I);
4398 case Intrinsic::x86_avx_vtestc_pd:
4399 case Intrinsic::x86_avx_vtestc_pd_256:
4400 case Intrinsic::x86_avx_vtestc_ps:
4401 case Intrinsic::x86_avx_vtestc_ps_256:
4402 case Intrinsic::x86_avx_vtestnzc_pd:
4403 case Intrinsic::x86_avx_vtestnzc_pd_256:
4404 case Intrinsic::x86_avx_vtestnzc_ps:
4405 case Intrinsic::x86_avx_vtestnzc_ps_256:
4406 case Intrinsic::x86_avx_vtestz_pd:
4407 case Intrinsic::x86_avx_vtestz_pd_256:
4408 case Intrinsic::x86_avx_vtestz_ps:
4409 case Intrinsic::x86_avx_vtestz_ps_256:
4410 case Intrinsic::x86_avx_ptestc_256:
4411 case Intrinsic::x86_avx_ptestnzc_256:
4412 case Intrinsic::x86_avx_ptestz_256:
4413 case Intrinsic::x86_sse41_ptestc:
4414 case Intrinsic::x86_sse41_ptestnzc:
4415 case Intrinsic::x86_sse41_ptestz:
4416 handleVtestIntrinsic(
I);
4419 case Intrinsic::fshl:
4420 case Intrinsic::fshr:
4421 handleFunnelShift(
I);
4424 case Intrinsic::is_constant:
4426 setShadow(&
I, getCleanShadow(&
I));
4427 setOrigin(&
I, getCleanOrigin());
4430 case Intrinsic::aarch64_neon_st1x2:
4431 case Intrinsic::aarch64_neon_st1x3:
4432 case Intrinsic::aarch64_neon_st1x4:
4433 case Intrinsic::aarch64_neon_st2:
4434 case Intrinsic::aarch64_neon_st3:
4435 case Intrinsic::aarch64_neon_st4: {
4436 handleNEONVectorStoreIntrinsic(
I,
false);
4440 case Intrinsic::aarch64_neon_st2lane:
4441 case Intrinsic::aarch64_neon_st3lane:
4442 case Intrinsic::aarch64_neon_st4lane: {
4443 handleNEONVectorStoreIntrinsic(
I,
true);
4456 case Intrinsic::aarch64_neon_tbl1:
4457 case Intrinsic::aarch64_neon_tbl2:
4458 case Intrinsic::aarch64_neon_tbl3:
4459 case Intrinsic::aarch64_neon_tbl4:
4460 case Intrinsic::aarch64_neon_tbx1:
4461 case Intrinsic::aarch64_neon_tbx2:
4462 case Intrinsic::aarch64_neon_tbx3:
4463 case Intrinsic::aarch64_neon_tbx4: {
4465 handleIntrinsicByApplyingToShadow(
I, 1);
4469 case Intrinsic::aarch64_neon_fmulx:
4470 case Intrinsic::aarch64_neon_pmul:
4471 case Intrinsic::aarch64_neon_pmull:
4472 case Intrinsic::aarch64_neon_smull:
4473 case Intrinsic::aarch64_neon_pmull64:
4474 case Intrinsic::aarch64_neon_umull: {
4475 handleNEONVectorMultiplyIntrinsic(
I);
4480 if (!handleUnknownIntrinsic(
I))
4481 visitInstruction(
I);
4486 void visitLibAtomicLoad(
CallBase &CB) {
4488 assert(isa<CallInst>(CB));
4497 Value *NewOrdering =
4501 NextNodeIRBuilder NextIRB(&CB);
4502 Value *SrcShadowPtr, *SrcOriginPtr;
4503 std::tie(SrcShadowPtr, SrcOriginPtr) =
4504 getShadowOriginPtr(SrcPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
4506 Value *DstShadowPtr =
4507 getShadowOriginPtr(DstPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
4511 NextIRB.CreateMemCpy(DstShadowPtr,
Align(1), SrcShadowPtr,
Align(1),
Size);
4512 if (MS.TrackOrigins) {
4513 Value *SrcOrigin = NextIRB.CreateAlignedLoad(MS.OriginTy, SrcOriginPtr,
4515 Value *NewOrigin = updateOrigin(SrcOrigin, NextIRB);
4516 NextIRB.CreateCall(MS.MsanSetOriginFn, {DstPtr, Size, NewOrigin});
4520 void visitLibAtomicStore(
CallBase &CB) {
4527 Value *NewOrdering =
4531 Value *DstShadowPtr =
4549 visitAsmInstruction(CB);
4551 visitInstruction(CB);
4560 case LibFunc_atomic_load:
4561 if (!isa<CallInst>(CB)) {
4562 llvm::errs() <<
"MSAN -- cannot instrument invoke of libatomic load."
4566 visitLibAtomicLoad(CB);
4568 case LibFunc_atomic_store:
4569 visitLibAtomicStore(CB);
4576 if (
auto *Call = dyn_cast<CallInst>(&CB)) {
4577 assert(!isa<IntrinsicInst>(Call) &&
"intrinsics are handled elsewhere");
4585 B.addAttribute(Attribute::Memory).addAttribute(Attribute::Speculatable);
4587 Call->removeFnAttrs(
B);
4589 Func->removeFnAttrs(
B);
4595 bool MayCheckCall = MS.EagerChecks;
4599 MayCheckCall &= !
Func->getName().starts_with(
"__sanitizer_unaligned_");
4602 unsigned ArgOffset = 0;
4605 if (!
A->getType()->isSized()) {
4606 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is not sized: " << CB <<
"\n");
4610 if (
A->getType()->isScalableTy()) {
4611 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is vscale: " << CB <<
"\n");
4613 insertShadowCheck(
A, &CB);
4622 bool EagerCheck = MayCheckCall && !ByVal && NoUndef;
4625 insertShadowCheck(
A, &CB);
4626 Size =
DL.getTypeAllocSize(
A->getType());
4632 Value *ArgShadow = getShadow(
A);
4633 Value *ArgShadowBase = getShadowPtrForArgument(IRB, ArgOffset);
4635 <<
" Shadow: " << *ArgShadow <<
"\n");
4639 assert(
A->getType()->isPointerTy() &&
4640 "ByVal argument is not a pointer!");
4648 Value *AShadowPtr, *AOriginPtr;
4649 std::tie(AShadowPtr, AOriginPtr) =
4650 getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(), Alignment,
4652 if (!PropagateShadow) {
4659 if (MS.TrackOrigins) {
4660 Value *ArgOriginBase = getOriginPtrForArgument(IRB, ArgOffset);
4674 Size =
DL.getTypeAllocSize(
A->getType());
4679 Constant *Cst = dyn_cast<Constant>(ArgShadow);
4680 if (MS.TrackOrigins && !(Cst && Cst->
isNullValue())) {
4682 getOriginPtrForArgument(IRB, ArgOffset));
4686 assert(Store !=
nullptr);
4695 if (FT->isVarArg()) {
4696 VAHelper->visitCallBase(CB, IRB);
4703 if (isa<CallInst>(CB) && cast<CallInst>(CB).isMustTailCall())
4706 if (MayCheckCall && CB.
hasRetAttr(Attribute::NoUndef)) {
4707 setShadow(&CB, getCleanShadow(&CB));
4708 setOrigin(&CB, getCleanOrigin());
4714 Value *
Base = getShadowPtrForRetval(IRBBefore);
4715 IRBBefore.CreateAlignedStore(getCleanShadow(&CB),
Base,
4718 if (isa<CallInst>(CB)) {
4722 BasicBlock *NormalDest = cast<InvokeInst>(CB).getNormalDest();
4727 setShadow(&CB, getCleanShadow(&CB));
4728 setOrigin(&CB, getCleanOrigin());
4735 "Could not find insertion point for retval shadow load");
4738 Value *RetvalShadow = IRBAfter.CreateAlignedLoad(
4741 setShadow(&CB, RetvalShadow);
4742 if (MS.TrackOrigins)
4743 setOrigin(&CB, IRBAfter.CreateLoad(MS.OriginTy, getOriginPtrForRetval()));
4747 if (
auto *
I = dyn_cast<BitCastInst>(RetVal)) {
4748 RetVal =
I->getOperand(0);
4750 if (
auto *
I = dyn_cast<CallInst>(RetVal)) {
4751 return I->isMustTailCall();
4758 Value *RetVal =
I.getReturnValue();
4764 Value *ShadowPtr = getShadowPtrForRetval(IRB);
4765 bool HasNoUndef =
F.hasRetAttribute(Attribute::NoUndef);
4766 bool StoreShadow = !(MS.EagerChecks && HasNoUndef);
4769 bool EagerCheck = (MS.EagerChecks && HasNoUndef) || (
F.getName() ==
"main");
4771 Value *Shadow = getShadow(RetVal);
4772 bool StoreOrigin =
true;
4774 insertShadowCheck(RetVal, &
I);
4775 Shadow = getCleanShadow(RetVal);
4776 StoreOrigin =
false;
4783 if (MS.TrackOrigins && StoreOrigin)
4784 IRB.
CreateStore(getOrigin(RetVal), getOriginPtrForRetval());
4790 if (!PropagateShadow) {
4791 setShadow(&
I, getCleanShadow(&
I));
4792 setOrigin(&
I, getCleanOrigin());
4796 ShadowPHINodes.push_back(&
I);
4797 setShadow(&
I, IRB.
CreatePHI(getShadowTy(&
I),
I.getNumIncomingValues(),
4799 if (MS.TrackOrigins)
4801 &
I, IRB.
CreatePHI(MS.OriginTy,
I.getNumIncomingValues(),
"_msphi_o"));
4818 IRB.
CreateCall(MS.MsanPoisonStackFn, {&I, Len});
4820 Value *ShadowBase, *OriginBase;
4821 std::tie(ShadowBase, OriginBase) = getShadowOriginPtr(
4828 if (PoisonStack && MS.TrackOrigins) {
4829 Value *Idptr = getLocalVarIdptr(
I);
4831 Value *Descr = getLocalVarDescription(
I);
4832 IRB.
CreateCall(MS.MsanSetAllocaOriginWithDescriptionFn,
4833 {&I, Len, Idptr, Descr});
4835 IRB.
CreateCall(MS.MsanSetAllocaOriginNoDescriptionFn, {&I, Len, Idptr});
4841 Value *Descr = getLocalVarDescription(
I);
4843 IRB.
CreateCall(MS.MsanPoisonAllocaFn, {&I, Len, Descr});
4845 IRB.
CreateCall(MS.MsanUnpoisonAllocaFn, {&I, Len});
4852 NextNodeIRBuilder IRB(InsPoint);
4854 TypeSize TS =
DL.getTypeAllocSize(
I.getAllocatedType());
4856 if (
I.isArrayAllocation())
4860 if (MS.CompileKernel)
4861 poisonAllocaKmsan(
I, IRB, Len);
4863 poisonAllocaUserspace(
I, IRB, Len);
4867 setShadow(&
I, getCleanShadow(&
I));
4868 setOrigin(&
I, getCleanOrigin());
4880 handleSelectLikeInst(
I,
B,
C,
D);
4886 Value *Sb = getShadow(
B);
4887 Value *Sc = getShadow(
C);
4888 Value *Sd = getShadow(
D);
4890 Value *Ob = MS.TrackOrigins ? getOrigin(
B) : nullptr;
4891 Value *Oc = MS.TrackOrigins ? getOrigin(
C) : nullptr;
4892 Value *Od = MS.TrackOrigins ? getOrigin(
D) : nullptr;
4897 if (
I.getType()->isAggregateType()) {
4901 Sa1 = getPoisonedShadow(getShadowTy(
I.getType()));
4909 C = CreateAppToShadowCast(IRB,
C);
4910 D = CreateAppToShadowCast(IRB,
D);
4917 if (MS.TrackOrigins) {
4920 if (
B->getType()->isVectorTy()) {
4921 B = convertToBool(
B, IRB);
4922 Sb = convertToBool(Sb, IRB);
4933 setShadow(&
I, getCleanShadow(&
I));
4934 setOrigin(&
I, getCleanOrigin());
4938 setShadow(&
I, getCleanShadow(&
I));
4939 setOrigin(&
I, getCleanOrigin());
4943 setShadow(&
I, getCleanShadow(&
I));
4944 setOrigin(&
I, getCleanOrigin());
4951 Value *Agg =
I.getAggregateOperand();
4953 Value *AggShadow = getShadow(Agg);
4957 setShadow(&
I, ResShadow);
4958 setOriginForNaryOp(
I);
4964 Value *AggShadow = getShadow(
I.getAggregateOperand());
4965 Value *InsShadow = getShadow(
I.getInsertedValueOperand());
4971 setOriginForNaryOp(
I);
4975 if (
CallInst *CI = dyn_cast<CallInst>(&
I)) {
4978 errs() <<
"ZZZ " <<
I.getOpcodeName() <<
"\n";
4980 errs() <<
"QQQ " <<
I <<
"\n";
5007 insertShadowCheck(Operand, &
I);
5014 auto Size =
DL.getTypeStoreSize(ElemTy);
5016 if (MS.CompileKernel) {
5017 IRB.
CreateCall(MS.MsanInstrumentAsmStoreFn, {Operand, SizeVal});
5023 auto [ShadowPtr,
_] =
5024 getShadowOriginPtrUserspace(Operand, IRB, IRB.
getInt8Ty(),
Align(1));
5035 int NumRetOutputs = 0;
5037 Type *
RetTy = cast<Value>(CB)->getType();
5038 if (!
RetTy->isVoidTy()) {
5040 auto *
ST = dyn_cast<StructType>(
RetTy);
5042 NumRetOutputs =
ST->getNumElements();
5048 switch (
Info.Type) {
5056 return NumOutputs - NumRetOutputs;
5079 int OutputArgs = getNumOutputArgs(IA, CB);
5085 for (
int i = OutputArgs; i < NumOperands; i++) {
5093 for (
int i = 0; i < OutputArgs; i++) {
5099 setShadow(&
I, getCleanShadow(&
I));
5100 setOrigin(&
I, getCleanOrigin());
5105 setShadow(&
I, getCleanShadow(&
I));
5106 setOrigin(&
I, getCleanOrigin());
5114 for (
size_t i = 0, n =
I.getNumOperands(); i < n; i++) {
5115 Value *Operand =
I.getOperand(i);
5117 insertShadowCheck(Operand, &
I);
5119 setShadow(&
I, getCleanShadow(&
I));
5120 setOrigin(&
I, getCleanOrigin());
5124struct VarArgHelperBase :
public VarArgHelper {
5126 MemorySanitizer &MS;
5127 MemorySanitizerVisitor &MSV;
5129 const unsigned VAListTagSize;
5131 VarArgHelperBase(
Function &
F, MemorySanitizer &MS,
5132 MemorySanitizerVisitor &MSV,
unsigned VAListTagSize)
5133 :
F(
F), MS(MS), MSV(MSV), VAListTagSize(VAListTagSize) {}
5137 return IRB.
CreateAdd(
Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
5153 return getShadowPtrForVAArgument(IRB, ArgOffset);
5167 unsigned BaseOffset) {
5176 TailSize,
Align(8));
5181 Value *VAListTag =
I.getArgOperand(0);
5183 auto [ShadowPtr, OriginPtr] = MSV.getShadowOriginPtr(
5184 VAListTag, IRB, IRB.
getInt8Ty(), Alignment,
true);
5187 VAListTagSize, Alignment,
false);
5194 unpoisonVAListTagForInst(
I);
5200 unpoisonVAListTagForInst(
I);
5205struct VarArgAMD64Helper :
public VarArgHelperBase {
5208 static const unsigned AMD64GpEndOffset = 48;
5209 static const unsigned AMD64FpEndOffsetSSE = 176;
5211 static const unsigned AMD64FpEndOffsetNoSSE = AMD64GpEndOffset;
5213 unsigned AMD64FpEndOffset;
5216 Value *VAArgOverflowSize =
nullptr;
5218 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
5220 VarArgAMD64Helper(
Function &
F, MemorySanitizer &MS,
5221 MemorySanitizerVisitor &MSV)
5222 : VarArgHelperBase(
F, MS, MSV, 24) {
5223 AMD64FpEndOffset = AMD64FpEndOffsetSSE;
5224 for (
const auto &Attr :
F.getAttributes().getFnAttrs()) {
5225 if (Attr.isStringAttribute() &&
5226 (Attr.getKindAsString() ==
"target-features")) {
5227 if (Attr.getValueAsString().contains(
"-sse"))
5228 AMD64FpEndOffset = AMD64FpEndOffsetNoSSE;
5234 ArgKind classifyArgument(
Value *arg) {
5237 if (
T->isX86_FP80Ty())
5239 if (
T->isFPOrFPVectorTy())
5240 return AK_FloatingPoint;
5241 if (
T->isIntegerTy() &&
T->getPrimitiveSizeInBits() <= 64)
5242 return AK_GeneralPurpose;
5243 if (
T->isPointerTy())
5244 return AK_GeneralPurpose;
5257 unsigned GpOffset = 0;
5258 unsigned FpOffset = AMD64GpEndOffset;
5259 unsigned OverflowOffset = AMD64FpEndOffset;
5264 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
5271 assert(
A->getType()->isPointerTy());
5273 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
5275 unsigned BaseOffset = OverflowOffset;
5276 Value *ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
5277 Value *OriginBase =
nullptr;
5278 if (MS.TrackOrigins)
5279 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
5280 OverflowOffset += AlignedSize;
5283 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
5287 Value *ShadowPtr, *OriginPtr;
5288 std::tie(ShadowPtr, OriginPtr) =
5293 if (MS.TrackOrigins)
5297 ArgKind AK = classifyArgument(
A);
5298 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
5300 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
5302 Value *ShadowBase, *OriginBase =
nullptr;
5304 case AK_GeneralPurpose:
5305 ShadowBase = getShadowPtrForVAArgument(IRB, GpOffset);
5306 if (MS.TrackOrigins)
5307 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset);
5311 case AK_FloatingPoint:
5312 ShadowBase = getShadowPtrForVAArgument(IRB, FpOffset);
5313 if (MS.TrackOrigins)
5314 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
5321 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
5323 unsigned BaseOffset = OverflowOffset;
5324 ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
5325 if (MS.TrackOrigins) {
5326 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
5328 OverflowOffset += AlignedSize;
5331 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
5340 Value *Shadow = MSV.getShadow(
A);
5342 if (MS.TrackOrigins) {
5343 Value *Origin = MSV.getOrigin(
A);
5345 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
5351 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
5352 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
5355 void finalizeInstrumentation()
override {
5356 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
5357 "finalizeInstrumentation called twice");
5358 if (!VAStartInstrumentationList.
empty()) {
5365 ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), VAArgOverflowSize);
5372 Intrinsic::umin, CopySize,
5376 if (MS.TrackOrigins) {
5386 for (
CallInst *OrigInst : VAStartInstrumentationList) {
5387 NextNodeIRBuilder IRB(OrigInst);
5388 Value *VAListTag = OrigInst->getArgOperand(0);
5392 ConstantInt::get(MS.IntptrTy, 16)),
5395 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
5397 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
5398 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
5400 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
5402 if (MS.TrackOrigins)
5403 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
5404 Alignment, AMD64FpEndOffset);
5407 ConstantInt::get(MS.IntptrTy, 8)),
5409 Value *OverflowArgAreaPtr =
5410 IRB.
CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
5411 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
5412 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
5413 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
5417 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
5419 if (MS.TrackOrigins) {
5422 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
5430struct VarArgAArch64Helper :
public VarArgHelperBase {
5431 static const unsigned kAArch64GrArgSize = 64;
5432 static const unsigned kAArch64VrArgSize = 128;
5434 static const unsigned AArch64GrBegOffset = 0;
5435 static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
5437 static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
5438 static const unsigned AArch64VrEndOffset =
5439 AArch64VrBegOffset + kAArch64VrArgSize;
5440 static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
5443 Value *VAArgOverflowSize =
nullptr;
5445 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
5447 VarArgAArch64Helper(
Function &
F, MemorySanitizer &MS,
5448 MemorySanitizerVisitor &MSV)
5449 : VarArgHelperBase(
F, MS, MSV, 32) {}
5452 std::pair<ArgKind, uint64_t> classifyArgument(
Type *
T) {
5453 if (
T->isIntOrPtrTy() &&
T->getPrimitiveSizeInBits() <= 64)
5454 return {AK_GeneralPurpose, 1};
5455 if (
T->isFloatingPointTy() &&
T->getPrimitiveSizeInBits() <= 128)
5456 return {AK_FloatingPoint, 1};
5458 if (
T->isArrayTy()) {
5459 auto R = classifyArgument(
T->getArrayElementType());
5460 R.second *=
T->getScalarType()->getArrayNumElements();
5465 auto R = classifyArgument(FV->getScalarType());
5466 R.second *= FV->getNumElements();
5471 return {AK_Memory, 0};
5484 unsigned GrOffset = AArch64GrBegOffset;
5485 unsigned VrOffset = AArch64VrBegOffset;
5486 unsigned OverflowOffset = AArch64VAEndOffset;
5491 auto [AK, RegNum] = classifyArgument(
A->getType());
5492 if (AK == AK_GeneralPurpose &&
5493 (GrOffset + RegNum * 8) > AArch64GrEndOffset)
5495 if (AK == AK_FloatingPoint &&
5496 (VrOffset + RegNum * 16) > AArch64VrEndOffset)
5500 case AK_GeneralPurpose:
5501 Base = getShadowPtrForVAArgument(IRB, GrOffset);
5502 GrOffset += 8 * RegNum;
5504 case AK_FloatingPoint:
5505 Base = getShadowPtrForVAArgument(IRB, VrOffset);
5506 VrOffset += 16 * RegNum;
5513 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
5515 unsigned BaseOffset = OverflowOffset;
5516 Base = getShadowPtrForVAArgument(IRB, BaseOffset);
5517 OverflowOffset += AlignedSize;
5520 CleanUnusedTLS(IRB,
Base, BaseOffset);
5532 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
5533 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
5540 ConstantInt::get(MS.IntptrTy, offset)),
5549 ConstantInt::get(MS.IntptrTy, offset)),
5552 return IRB.
CreateSExt(SaveArea32, MS.IntptrTy);
5555 void finalizeInstrumentation()
override {
5556 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
5557 "finalizeInstrumentation called twice");
5558 if (!VAStartInstrumentationList.empty()) {
5565 ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset), VAArgOverflowSize);
5572 Intrinsic::umin, CopySize,
5578 Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
5579 Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize);
5583 for (
CallInst *OrigInst : VAStartInstrumentationList) {
5584 NextNodeIRBuilder IRB(OrigInst);
5586 Value *VAListTag = OrigInst->getArgOperand(0);
5603 Value *StackSaveAreaPtr =
5604 IRB.
CreateIntToPtr(getVAField64(IRB, VAListTag, 0), RegSaveAreaPtrTy);
5607 Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
5608 Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
5611 IRB.
CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea), RegSaveAreaPtrTy);
5614 Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
5615 Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
5618 IRB.
CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea), RegSaveAreaPtrTy);
5624 Value *GrRegSaveAreaShadowPtrOff =
5625 IRB.
CreateAdd(GrArgSize, GrOffSaveArea);
5627 Value *GrRegSaveAreaShadowPtr =
5628 MSV.getShadowOriginPtr(GrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
5634 Value *GrCopySize = IRB.
CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
5640 Value *VrRegSaveAreaShadowPtrOff =
5641 IRB.
CreateAdd(VrArgSize, VrOffSaveArea);
5643 Value *VrRegSaveAreaShadowPtr =
5644 MSV.getShadowOriginPtr(VrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
5651 VrRegSaveAreaShadowPtrOff);
5652 Value *VrCopySize = IRB.
CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
5658 Value *StackSaveAreaShadowPtr =
5659 MSV.getShadowOriginPtr(StackSaveAreaPtr, IRB, IRB.
getInt8Ty(),
5664 VAArgTLSCopy, IRB.
getInt32(AArch64VAEndOffset));
5667 Align(16), VAArgOverflowSize);
5673struct VarArgPowerPCHelper :
public VarArgHelperBase {
5675 Value *VAArgSize =
nullptr;
5677 VarArgPowerPCHelper(
Function &
F, MemorySanitizer &MS,
5678 MemorySanitizerVisitor &MSV,
unsigned VAListTagSize)
5679 : VarArgHelperBase(
F, MS, MSV, VAListTagSize) {}
5689 Triple TargetTriple(
F.getParent()->getTargetTriple());
5693 if (TargetTriple.isPPC64()) {
5694 if (TargetTriple.isPPC64ELFv2ABI())
5702 unsigned VAArgOffset = VAArgBase;
5706 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
5708 assert(
A->getType()->isPointerTy());
5710 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
5713 ArgAlign =
Align(8);
5714 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
5717 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
5719 Value *AShadowPtr, *AOriginPtr;
5720 std::tie(AShadowPtr, AOriginPtr) =
5721 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
5731 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
5733 if (
A->getType()->isArrayTy()) {
5736 Type *ElementTy =
A->getType()->getArrayElementType();
5738 ArgAlign =
Align(
DL.getTypeAllocSize(ElementTy));
5739 }
else if (
A->getType()->isVectorTy()) {
5741 ArgAlign =
Align(ArgSize);
5744 ArgAlign =
Align(8);
5745 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
5746 if (
DL.isBigEndian()) {
5750 VAArgOffset += (8 - ArgSize);
5754 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
5758 VAArgOffset += ArgSize;
5762 VAArgBase = VAArgOffset;
5766 ConstantInt::get(MS.IntptrTy, VAArgOffset - VAArgBase);
5769 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
5772 void finalizeInstrumentation()
override {
5773 assert(!VAArgSize && !VAArgTLSCopy &&
5774 "finalizeInstrumentation called twice");
5777 Value *CopySize = VAArgSize;
5779 if (!VAStartInstrumentationList.empty()) {
5789 Intrinsic::umin, CopySize,
5797 Triple TargetTriple(
F.getParent()->getTargetTriple());
5798 for (
CallInst *OrigInst : VAStartInstrumentationList) {
5799 NextNodeIRBuilder IRB(OrigInst);
5800 Value *VAListTag = OrigInst->getArgOperand(0);
5804 if (!TargetTriple.isPPC64()) {
5806 IRB.
CreateAdd(RegSaveAreaPtrPtr, ConstantInt::get(MS.IntptrTy, 8));
5808 RegSaveAreaPtrPtr = IRB.
CreateIntToPtr(RegSaveAreaPtrPtr, MS.PtrTy);
5811 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
5813 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
5815 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
5816 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
5818 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
5825struct VarArgSystemZHelper :
public VarArgHelperBase {
5826 static const unsigned SystemZGpOffset = 16;
5827 static const unsigned SystemZGpEndOffset = 56;
5828 static const unsigned SystemZFpOffset = 128;
5829 static const unsigned SystemZFpEndOffset = 160;
5830 static const unsigned SystemZMaxVrArgs = 8;
5831 static const unsigned SystemZRegSaveAreaSize = 160;
5832 static const unsigned SystemZOverflowOffset = 160;
5833 static const unsigned SystemZVAListTagSize = 32;
5834 static const unsigned SystemZOverflowArgAreaPtrOffset = 16;
5835 static const unsigned SystemZRegSaveAreaPtrOffset = 24;
5837 bool IsSoftFloatABI;
5840 Value *VAArgOverflowSize =
nullptr;
5842 enum class ArgKind {
5850 enum class ShadowExtension {
None,
Zero, Sign };
5852 VarArgSystemZHelper(
Function &
F, MemorySanitizer &MS,
5853 MemorySanitizerVisitor &MSV)
5854 : VarArgHelperBase(
F, MS, MSV, SystemZVAListTagSize),
5855 IsSoftFloatABI(
F.getFnAttribute(
"use-soft-float").getValueAsBool()) {}
5857 ArgKind classifyArgument(
Type *
T) {
5864 if (
T->isIntegerTy(128) ||
T->isFP128Ty())
5865 return ArgKind::Indirect;
5866 if (
T->isFloatingPointTy())
5867 return IsSoftFloatABI ? ArgKind::GeneralPurpose : ArgKind::FloatingPoint;
5868 if (
T->isIntegerTy() ||
T->isPointerTy())
5869 return ArgKind::GeneralPurpose;
5870 if (
T->isVectorTy())
5871 return ArgKind::Vector;
5872 return ArgKind::Memory;
5875 ShadowExtension getShadowExtension(
const CallBase &CB,
unsigned ArgNo) {
5885 return ShadowExtension::Zero;
5889 return ShadowExtension::Sign;
5891 return ShadowExtension::None;
5895 unsigned GpOffset = SystemZGpOffset;
5896 unsigned FpOffset = SystemZFpOffset;
5897 unsigned VrIndex = 0;
5898 unsigned OverflowOffset = SystemZOverflowOffset;
5905 ArgKind AK = classifyArgument(
T);
5906 if (AK == ArgKind::Indirect) {
5908 AK = ArgKind::GeneralPurpose;
5910 if (AK == ArgKind::GeneralPurpose && GpOffset >= SystemZGpEndOffset)
5911 AK = ArgKind::Memory;
5912 if (AK == ArgKind::FloatingPoint && FpOffset >= SystemZFpEndOffset)
5913 AK = ArgKind::Memory;
5914 if (AK == ArgKind::Vector && (VrIndex >= SystemZMaxVrArgs || !IsFixed))
5915 AK = ArgKind::Memory;
5916 Value *ShadowBase =
nullptr;
5917 Value *OriginBase =
nullptr;
5918 ShadowExtension SE = ShadowExtension::None;
5920 case ArgKind::GeneralPurpose: {
5925 SE = getShadowExtension(CB, ArgNo);
5927 if (SE == ShadowExtension::None) {
5929 assert(ArgAllocSize <= ArgSize);
5930 GapSize = ArgSize - ArgAllocSize;
5932 ShadowBase = getShadowAddrForVAArgument(IRB, GpOffset + GapSize);
5933 if (MS.TrackOrigins)
5934 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset + GapSize);
5936 GpOffset += ArgSize;
5942 case ArgKind::FloatingPoint: {
5951 ShadowBase = getShadowAddrForVAArgument(IRB, FpOffset);
5952 if (MS.TrackOrigins)
5953 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
5955 FpOffset += ArgSize;
5961 case ArgKind::Vector: {
5968 case ArgKind::Memory: {
5976 SE = getShadowExtension(CB, ArgNo);
5978 SE == ShadowExtension::None ? ArgSize - ArgAllocSize : 0;
5980 getShadowAddrForVAArgument(IRB, OverflowOffset + GapSize);
5981 if (MS.TrackOrigins)
5983 getOriginPtrForVAArgument(IRB, OverflowOffset + GapSize);
5984 OverflowOffset += ArgSize;
5991 case ArgKind::Indirect:
5994 if (ShadowBase ==
nullptr)
5996 Value *Shadow = MSV.getShadow(
A);
5997 if (SE != ShadowExtension::None)
5998 Shadow = MSV.CreateShadowCast(IRB, Shadow, IRB.
getInt64Ty(),
5999 SE == ShadowExtension::Sign);
6000 ShadowBase = IRB.
CreateIntToPtr(ShadowBase, MS.PtrTy,
"_msarg_va_s");
6002 if (MS.TrackOrigins) {
6003 Value *Origin = MSV.getOrigin(
A);
6005 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
6009 Constant *OverflowSize = ConstantInt::get(
6010 IRB.
getInt64Ty(), OverflowOffset - SystemZOverflowOffset);
6011 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
6018 ConstantInt::get(MS.IntptrTy, SystemZRegSaveAreaPtrOffset)),
6021 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
6023 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
6024 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(), Alignment,
6029 unsigned RegSaveAreaSize =
6030 IsSoftFloatABI ? SystemZGpEndOffset : SystemZRegSaveAreaSize;
6031 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
6033 if (MS.TrackOrigins)
6034 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
6035 Alignment, RegSaveAreaSize);
6044 ConstantInt::get(MS.IntptrTy, SystemZOverflowArgAreaPtrOffset)),
6046 Value *OverflowArgAreaPtr = IRB.
CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
6047 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
6049 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
6050 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
6053 SystemZOverflowOffset);
6054 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
6056 if (MS.TrackOrigins) {
6058 SystemZOverflowOffset);
6059 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
6064 void finalizeInstrumentation()
override {
6065 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
6066 "finalizeInstrumentation called twice");
6067 if (!VAStartInstrumentationList.empty()) {
6074 IRB.
CreateAdd(ConstantInt::get(MS.IntptrTy, SystemZOverflowOffset),
6082 Intrinsic::umin, CopySize,
6086 if (MS.TrackOrigins) {
6096 for (
CallInst *OrigInst : VAStartInstrumentationList) {
6097 NextNodeIRBuilder IRB(OrigInst);
6098 Value *VAListTag = OrigInst->getArgOperand(0);
6099 copyRegSaveArea(IRB, VAListTag);
6100 copyOverflowArea(IRB, VAListTag);
6106struct VarArgI386Helper :
public VarArgHelperBase {
6108 Value *VAArgSize =
nullptr;
6110 VarArgI386Helper(
Function &
F, MemorySanitizer &MS,
6111 MemorySanitizerVisitor &MSV)
6112 : VarArgHelperBase(
F, MS, MSV, 4) {}
6116 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
6117 unsigned VAArgOffset = 0;
6120 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
6122 assert(
A->getType()->isPointerTy());
6124 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
6126 if (ArgAlign < IntptrSize)
6127 ArgAlign =
Align(IntptrSize);
6128 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
6130 Value *
Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
6132 Value *AShadowPtr, *AOriginPtr;
6133 std::tie(AShadowPtr, AOriginPtr) =
6134 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
6144 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
6146 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
6147 if (
DL.isBigEndian()) {
6150 if (ArgSize < IntptrSize)
6151 VAArgOffset += (IntptrSize - ArgSize);
6154 Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
6157 VAArgOffset += ArgSize;
6163 Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
6166 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
6169 void finalizeInstrumentation()
override {
6170 assert(!VAArgSize && !VAArgTLSCopy &&
6171 "finalizeInstrumentation called twice");
6174 Value *CopySize = VAArgSize;
6176 if (!VAStartInstrumentationList.empty()) {
6185 Intrinsic::umin, CopySize,
6193 for (
CallInst *OrigInst : VAStartInstrumentationList) {
6194 NextNodeIRBuilder IRB(OrigInst);
6195 Value *VAListTag = OrigInst->getArgOperand(0);
6196 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
6197 Value *RegSaveAreaPtrPtr =
6199 PointerType::get(*MS.C, 0));
6200 Value *RegSaveAreaPtr =
6201 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
6202 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
6204 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
6206 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
6207 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
6209 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
6217struct VarArgGenericHelper :
public VarArgHelperBase {
6219 Value *VAArgSize =
nullptr;
6221 VarArgGenericHelper(
Function &
F, MemorySanitizer &MS,
6222 MemorySanitizerVisitor &MSV,
const unsigned VAListTagSize)
6223 : VarArgHelperBase(
F, MS, MSV, VAListTagSize) {}
6226 unsigned VAArgOffset = 0;
6228 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
6233 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
6234 if (
DL.isBigEndian()) {
6237 if (ArgSize < IntptrSize)
6238 VAArgOffset += (IntptrSize - ArgSize);
6240 Value *
Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
6241 VAArgOffset += ArgSize;
6242 VAArgOffset =
alignTo(VAArgOffset, IntptrSize);
6248 Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
6251 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
6254 void finalizeInstrumentation()
override {
6255 assert(!VAArgSize && !VAArgTLSCopy &&
6256 "finalizeInstrumentation called twice");
6259 Value *CopySize = VAArgSize;
6261 if (!VAStartInstrumentationList.empty()) {
6270 Intrinsic::umin, CopySize,
6278 for (
CallInst *OrigInst : VAStartInstrumentationList) {
6279 NextNodeIRBuilder IRB(OrigInst);
6280 Value *VAListTag = OrigInst->getArgOperand(0);
6281 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
6282 Value *RegSaveAreaPtrPtr =
6284 PointerType::get(*MS.C, 0));
6285 Value *RegSaveAreaPtr =
6286 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
6287 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
6289 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
6291 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
6292 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
6294 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
6302using VarArgARM32Helper = VarArgGenericHelper;
6303using VarArgRISCVHelper = VarArgGenericHelper;
6304using VarArgMIPSHelper = VarArgGenericHelper;
6305using VarArgLoongArch64Helper = VarArgGenericHelper;
6308struct VarArgNoOpHelper :
public VarArgHelper {
6309 VarArgNoOpHelper(
Function &
F, MemorySanitizer &MS,
6310 MemorySanitizerVisitor &MSV) {}
6318 void finalizeInstrumentation()
override {}
6324 MemorySanitizerVisitor &Visitor) {
6327 Triple TargetTriple(Func.getParent()->getTargetTriple());
6330 return new VarArgI386Helper(Func, Msan, Visitor);
6333 return new VarArgAMD64Helper(Func, Msan, Visitor);
6335 if (TargetTriple.
isARM())
6336 return new VarArgARM32Helper(Func, Msan, Visitor, 4);
6339 return new VarArgAArch64Helper(Func, Msan, Visitor);
6342 return new VarArgSystemZHelper(Func, Msan, Visitor);
6347 return new VarArgPowerPCHelper(Func, Msan, Visitor, 12);
6350 return new VarArgPowerPCHelper(Func, Msan, Visitor, 8);
6353 return new VarArgRISCVHelper(Func, Msan, Visitor, 4);
6356 return new VarArgRISCVHelper(Func, Msan, Visitor, 8);
6359 return new VarArgMIPSHelper(Func, Msan, Visitor, 4);
6362 return new VarArgMIPSHelper(Func, Msan, Visitor, 8);
6365 return new VarArgLoongArch64Helper(Func, Msan, Visitor,
6368 return new VarArgNoOpHelper(Func, Msan, Visitor);
6375 if (
F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
6378 MemorySanitizerVisitor Visitor(
F, *
this, TLI);
6382 B.addAttribute(Attribute::Memory).addAttribute(Attribute::Speculatable);
6385 return Visitor.runOnFunction();
This file implements a class to represent arbitrary precision integral constant values and operations...
static bool isStore(int Opcode)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
static const size_t kNumberOfAccessSizes
VarLocInsertPt getNextNode(const DbgRecord *DVR)
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static AtomicOrdering addReleaseOrdering(AtomicOrdering AO)
static AtomicOrdering addAcquireOrdering(AtomicOrdering AO)
static bool isAMustTailRetVal(Value *RetVal)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool runOnFunction(Function &F, bool PostInlining)
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This is the interface for a simple mod/ref and alias analysis over globals.
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
Module.h This file contains the declarations for the Module class.
static const MemoryMapParams Linux_LoongArch64_MemoryMapParams
static const PlatformMemoryMapParams Linux_S390_MemoryMapParams
static const Align kMinOriginAlignment
static const MemoryMapParams Linux_X86_64_MemoryMapParams
static cl::opt< uint64_t > ClShadowBase("msan-shadow-base", cl::desc("Define custom MSan ShadowBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClDumpStrictInstructions("msan-dump-strict-instructions", cl::desc("print out instructions with default strict semantics"), cl::Hidden, cl::init(false))
static const PlatformMemoryMapParams Linux_X86_MemoryMapParams
static cl::opt< uint64_t > ClOriginBase("msan-origin-base", cl::desc("Define custom MSan OriginBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClCheckConstantShadow("msan-check-constant-shadow", cl::desc("Insert checks for constant shadow values"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_LoongArch_MemoryMapParams
static const MemoryMapParams NetBSD_X86_64_MemoryMapParams
static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams
static const unsigned kOriginSize
static cl::opt< bool > ClWithComdat("msan-with-comdat", cl::desc("Place MSan constructors in comdat sections"), cl::Hidden, cl::init(false))
static cl::opt< int > ClTrackOrigins("msan-track-origins", cl::desc("Track origins (allocation sites) of poisoned memory"), cl::Hidden, cl::init(0))
Track origins of uninitialized values.
static cl::opt< int > ClInstrumentationWithCallThreshold("msan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented requires more than " "this number of checks and origin stores, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(3500))
static cl::opt< int > ClPoisonStackPattern("msan-poison-stack-pattern", cl::desc("poison uninitialized stack variables with the given pattern"), cl::Hidden, cl::init(0xff))
static const Align kShadowTLSAlignment
static cl::opt< bool > ClHandleICmpExact("msan-handle-icmp-exact", cl::desc("exact handling of relational integer ICmp"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams
static Constant * getOrInsertGlobal(Module &M, StringRef Name, Type *Ty)
static cl::opt< bool > ClDumpStrictIntrinsics("msan-dump-strict-intrinsics", cl::desc("Prints 'unknown' intrinsics that were handled heuristically. " "Use -msan-dump-strict-instructions to print intrinsics that " "could not be handled exactly nor heuristically."), cl::Hidden, cl::init(false))
static const MemoryMapParams Linux_S390X_MemoryMapParams
static cl::opt< bool > ClPoisonUndef("msan-poison-undef", cl::desc("poison undef temps"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClPoisonStack("msan-poison-stack", cl::desc("poison uninitialized stack variables"), cl::Hidden, cl::init(true))
static const MemoryMapParams Linux_I386_MemoryMapParams
const char kMsanInitName[]
static cl::opt< bool > ClPrintStackNames("msan-print-stack-names", cl::desc("Print name of local stack variable"), cl::Hidden, cl::init(true))
static const MemoryMapParams Linux_AArch64_MemoryMapParams
static cl::opt< uint64_t > ClAndMask("msan-and-mask", cl::desc("Define custom MSan AndMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleLifetimeIntrinsics("msan-handle-lifetime-intrinsics", cl::desc("when possible, poison scoped variables at the beginning of the scope " "(slower, but more precise)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKeepGoing("msan-keep-going", cl::desc("keep going after reporting a UMR"), cl::Hidden, cl::init(false))
static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams
static GlobalVariable * createPrivateConstGlobalForString(Module &M, StringRef Str)
Create a non-const global initialized with the given string.
static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClEagerChecks("msan-eager-checks", cl::desc("check arguments and return values at function call boundaries"), cl::Hidden, cl::init(false))
static cl::opt< int > ClDisambiguateWarning("msan-disambiguate-warning-threshold", cl::desc("Define threshold for number of checks per " "debug location to force origin update."), cl::Hidden, cl::init(3))
static VarArgHelper * CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, MemorySanitizerVisitor &Visitor)
static const MemoryMapParams Linux_MIPS64_MemoryMapParams
static const MemoryMapParams Linux_PowerPC64_MemoryMapParams
static cl::opt< uint64_t > ClXorMask("msan-xor-mask", cl::desc("Define custom MSan XorMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleAsmConservative("msan-handle-asm-conservative", cl::desc("conservative handling of inline assembly"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams
static const PlatformMemoryMapParams FreeBSD_ARM_MemoryMapParams
static const unsigned kParamTLSSize
static cl::opt< bool > ClHandleICmp("msan-handle-icmp", cl::desc("propagate shadow through ICmpEQ and ICmpNE"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClEnableKmsan("msan-kernel", cl::desc("Enable KernelMemorySanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClPoisonStackWithCall("msan-poison-stack-with-call", cl::desc("poison uninitialized stack variables with a call"), cl::Hidden, cl::init(false))
static const PlatformMemoryMapParams NetBSD_X86_MemoryMapParams
static const unsigned kRetvalTLSSize
static const MemoryMapParams FreeBSD_AArch64_MemoryMapParams
const char kMsanModuleCtorName[]
static const MemoryMapParams FreeBSD_I386_MemoryMapParams
static cl::opt< bool > ClCheckAccessAddress("msan-check-access-address", cl::desc("report accesses through a pointer which has poisoned shadow"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDisableChecks("msan-disable-checks", cl::desc("Apply no_sanitize to the whole file"), cl::Hidden, cl::init(false))
FunctionAnalysisManager FAM
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Class for arbitrary precision integers.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
an instruction to allocate memory on the stack
void setAlignment(Align Align)
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
LLVM Basic Block Representation.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
InstListType::iterator iterator
Instruction iterators...
This class represents a no-op cast from one type to another.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
MaybeAlign getParamAlign(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
void setArgOperand(unsigned i, Value *v)
FunctionType * getFunctionType() const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_SGT
signed greater than
@ ICMP_SGE
signed greater or equal
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static Constant * getString(LLVMContext &Context, StringRef Initializer, bool AddNull=true)
This method constructs a CDS and initializes it with a text string.
static Constant * get(LLVMContext &Context, ArrayRef< uint8_t > Elts)
get() constructors - Return a constant with vector type with an element count and element type matchi...
This is the shared class of boolean and integer constants.
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
static ConstantInt * getBool(LLVMContext &Context, bool V)
static Constant * get(StructType *T, ArrayRef< Constant * > V)
static Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static Constant * getAllOnesValue(Type *Ty)
bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
bool isZeroValue() const
Return true if the value is negative zero or null value.
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
static bool shouldExecute(unsigned CounterName)
This instruction compares its operands according to the predicate given to the constructor.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
This class represents a freeze function that returns random concrete value if an operand is either a ...
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
void setComdat(Comdat *C)
@ PrivateLinkage
Like Internal, but omit from symbol table.
@ ExternalLinkage
Externally visible function.
Analysis pass providing a never-invalidated alias analysis result.
This instruction compares its operands according to the predicate given to the constructor.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
CallInst * CreateMaskedCompressStore(Value *Val, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr)
Create a call to Masked Compress Store intrinsic.
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
CallInst * CreateAndReduce(Value *Src)
Create a vector int AND reduction intrinsic of the source vector.
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memset to the specified pointer and the specified value.
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateTypeSize(Type *DstType, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
ConstantInt * getInt8(uint8_t C)
Get a constant 8-bit value.
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Value * CreateUDiv(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
CallInst * CreateOrReduce(Value *Src)
Create a vector int OR reduction intrinsic of the source vector.
Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
DebugLoc getCurrentDebugLocation() const
Get location information used by debugging information.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
LLVMContext & getContext() const
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Value * CreateIsNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg == 0.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Type * getVoidTy()
Fetch the type representing void.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
CallInst * CreateMaskedExpandLoad(Type *Ty, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Expand Load intrinsic.
Value * CreateInBoundsPtrAdd(Value *Ptr, Value *Offset, const Twine &Name="")
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memcpy between the specified pointers.
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
CallInst * CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, Value *Mask=nullptr)
Create a call to Masked Scatter intrinsic.
CallInst * CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Gather intrinsic.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
std::vector< ConstraintInfo > ConstraintInfoVector
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
This instruction inserts a single (scalar) element into a VectorType value.
This instruction inserts a struct field of array element value into an aggregate value.
Base class for instruction visitors.
void visit(Iterator Start, Iterator End)
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
This class represents a cast from an integer to a pointer.
Class to represent integer types.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
An instruction for reading from memory.
MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
This class wraps the llvm.memcpy intrinsic.
This class wraps the llvm.memmove intrinsic.
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
A Module instance is used to store all the information related to an LLVM module.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void abandon()
Mark an analysis as abandoned.
This class represents a cast from a pointer to an integer.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
This class represents a sign extension of integer types.
This class represents the LLVM 'select' instruction.
bool remove(const value_type &X)
Remove an item from the set vector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
This instruction constructs a fixed permutation of two input vectors.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Class to represent struct types.
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Triple - Helper class for working with autoconf configuration names.
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
bool isRISCV32() const
Tests whether the target is 32-bit RISC-V.
bool isPPC32() const
Tests whether the target is 32-bit PowerPC (little and big endian).
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isRISCV64() const
Tests whether the target is 64-bit RISC-V.
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
bool isARM() const
Tests whether the target is ARM (little and big endian).
bool isPPC64() const
Tests whether the target is 64-bit PowerPC (little and big endian).
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
bool isSystemZ() const
Tests whether the target is SystemZ.
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
bool isPPC_FP128Ty() const
Return true if this is powerpc long double.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static Type * getVoidTy(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
'undef' values are things that do not have specified contents.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
This represents the llvm.va_copy intrinsic.
This represents the llvm.va_start intrinsic.
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void setName(const Twine &Name)
Change the name of the value.
StringRef getName() const
Return a constant reference to the value's name.
Type * getElementType() const
This class represents zero extension of integer types.
int getNumOccurrences() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
This class provides various memory handling functions that manipulate MemoryBlock instances.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
@ C
The default llvm calling convention, compatible with C.
@ SC
CHAIN = SC CHAIN, Imm128 - System call.
initializer< Ty > init(const Ty &Val)
Function * Kernel
Summary of a kernel (=entry point for target offloading).
NodeAddr< FuncNode * > Func
This is an optimization pass for GlobalISel generic memory operations.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
@ Or
Bitwise or logical OR of integers.
std::pair< Instruction *, Value * > SplitBlockAndInsertSimpleForLoop(Value *End, Instruction *SplitBefore)
Insert a for (int i = 0; i < End; i++) loop structure (with the exception that End is assumed > 0,...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
constexpr unsigned BitWidth
void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
iterator_range< df_iterator< T > > depth_first(const T &G)
Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
A CRTP mix-in to automatically provide informational APIs needed for passes.