184#include "llvm/IR/IntrinsicsAArch64.h"
185#include "llvm/IR/IntrinsicsX86.h"
216#define DEBUG_TYPE "msan"
219 "Controls which checks to insert");
222 "Controls which instruction to instrument");
240 "msan-track-origins",
245 cl::desc(
"keep going after reporting a UMR"),
254 "msan-poison-stack-with-call",
259 "msan-poison-stack-pattern",
260 cl::desc(
"poison uninitialized stack variables with the given pattern"),
265 cl::desc(
"Print name of local stack variable"),
270 cl::desc(
"Poison fully undef temporary values. "
271 "Partially undefined constant vectors "
272 "are unaffected by this flag (see "
273 "-msan-poison-undef-vectors)."),
277 "msan-poison-undef-vectors",
278 cl::desc(
"Precisely poison partially undefined constant vectors. "
279 "If false (legacy behavior), the entire vector is "
280 "considered fully initialized, which may lead to false "
281 "negatives. Fully undefined constant vectors are "
282 "unaffected by this flag (see -msan-poison-undef)."),
286 "msan-precise-disjoint-or",
287 cl::desc(
"Precisely poison disjoint OR. If false (legacy behavior), "
288 "disjointedness is ignored (i.e., 1|1 is initialized)."),
293 cl::desc(
"propagate shadow through ICmpEQ and ICmpNE"),
298 cl::desc(
"exact handling of relational integer ICmp"),
302 "msan-handle-lifetime-intrinsics",
304 "when possible, poison scoped variables at the beginning of the scope "
305 "(slower, but more precise)"),
316 "msan-handle-asm-conservative",
327 "msan-check-access-address",
328 cl::desc(
"report accesses through a pointer which has poisoned shadow"),
333 cl::desc(
"check arguments and return values at function call boundaries"),
337 "msan-dump-strict-instructions",
338 cl::desc(
"print out instructions with default strict semantics i.e.,"
339 "check that all the inputs are fully initialized, and mark "
340 "the output as fully initialized. These semantics are applied "
341 "to instructions that could not be handled explicitly nor "
350 "msan-dump-heuristic-instructions",
351 cl::desc(
"Prints 'unknown' instructions that were handled heuristically. "
352 "Use -msan-dump-strict-instructions to print instructions that "
353 "could not be handled explicitly nor heuristically."),
357 "msan-instrumentation-with-call-threshold",
359 "If the function being instrumented requires more than "
360 "this number of checks and origin stores, use callbacks instead of "
361 "inline checks (-1 means never use callbacks)."),
366 cl::desc(
"Enable KernelMemorySanitizer instrumentation"),
376 cl::desc(
"Insert checks for constant shadow values"),
383 cl::desc(
"Place MSan constructors in comdat sections"),
389 cl::desc(
"Define custom MSan AndMask"),
393 cl::desc(
"Define custom MSan XorMask"),
397 cl::desc(
"Define custom MSan ShadowBase"),
401 cl::desc(
"Define custom MSan OriginBase"),
406 cl::desc(
"Define threshold for number of checks per "
407 "debug location to force origin update."),
419struct MemoryMapParams {
426struct PlatformMemoryMapParams {
427 const MemoryMapParams *bits32;
428 const MemoryMapParams *bits64;
590class MemorySanitizer {
599 MemorySanitizer(MemorySanitizer &&) =
delete;
600 MemorySanitizer &operator=(MemorySanitizer &&) =
delete;
601 MemorySanitizer(
const MemorySanitizer &) =
delete;
602 MemorySanitizer &operator=(
const MemorySanitizer &) =
delete;
604 bool sanitizeFunction(Function &
F, TargetLibraryInfo &TLI);
607 friend struct MemorySanitizerVisitor;
608 friend struct VarArgHelperBase;
609 friend struct VarArgAMD64Helper;
610 friend struct VarArgAArch64Helper;
611 friend struct VarArgPowerPC64Helper;
612 friend struct VarArgPowerPC32Helper;
613 friend struct VarArgSystemZHelper;
614 friend struct VarArgI386Helper;
615 friend struct VarArgGenericHelper;
617 void initializeModule(
Module &M);
618 void initializeCallbacks(
Module &M,
const TargetLibraryInfo &TLI);
619 void createKernelApi(
Module &M,
const TargetLibraryInfo &TLI);
620 void createUserspaceApi(
Module &M,
const TargetLibraryInfo &TLI);
622 template <
typename... ArgsTy>
623 FunctionCallee getOrInsertMsanMetadataFunction(
Module &M, StringRef Name,
649 Value *ParamOriginTLS;
655 Value *RetvalOriginTLS;
661 Value *VAArgOriginTLS;
664 Value *VAArgOverflowSizeTLS;
667 bool CallbacksInitialized =
false;
670 FunctionCallee WarningFn;
674 FunctionCallee MaybeWarningVarSizeFn;
679 FunctionCallee MsanSetAllocaOriginWithDescriptionFn;
681 FunctionCallee MsanSetAllocaOriginNoDescriptionFn;
684 FunctionCallee MsanPoisonStackFn;
688 FunctionCallee MsanChainOriginFn;
691 FunctionCallee MsanSetOriginFn;
694 FunctionCallee MemmoveFn, MemcpyFn, MemsetFn;
697 StructType *MsanContextStateTy;
698 FunctionCallee MsanGetContextStateFn;
701 FunctionCallee MsanPoisonAllocaFn, MsanUnpoisonAllocaFn;
707 FunctionCallee MsanMetadataPtrForLoadN, MsanMetadataPtrForStoreN;
708 FunctionCallee MsanMetadataPtrForLoad_1_8[4];
709 FunctionCallee MsanMetadataPtrForStore_1_8[4];
710 FunctionCallee MsanInstrumentAsmStoreFn;
713 Value *MsanMetadataAlloca;
716 FunctionCallee getKmsanShadowOriginAccessFn(
bool isStore,
int size);
719 const MemoryMapParams *MapParams;
723 MemoryMapParams CustomMapParams;
725 MDNode *ColdCallWeights;
728 MDNode *OriginStoreWeights;
731void insertModuleCtor(
Module &M) {
768 if (!Options.Kernel) {
777 MemorySanitizer Msan(*
F.getParent(), Options);
796 OS, MapClassName2PassName);
802 if (Options.EagerChecks)
803 OS <<
"eager-checks;";
804 OS <<
"track-origins=" << Options.TrackOrigins;
820template <
typename... ArgsTy>
822MemorySanitizer::getOrInsertMsanMetadataFunction(
Module &M,
StringRef Name,
827 std::forward<ArgsTy>(Args)...);
830 return M.getOrInsertFunction(Name, MsanMetadata,
831 std::forward<ArgsTy>(Args)...);
840 RetvalOriginTLS =
nullptr;
842 ParamOriginTLS =
nullptr;
844 VAArgOriginTLS =
nullptr;
845 VAArgOverflowSizeTLS =
nullptr;
847 WarningFn =
M.getOrInsertFunction(
"__msan_warning",
849 IRB.getVoidTy(), IRB.getInt32Ty());
860 MsanGetContextStateFn =
861 M.getOrInsertFunction(
"__msan_get_context_state", PtrTy);
865 for (
int ind = 0,
size = 1; ind < 4; ind++,
size <<= 1) {
866 std::string name_load =
867 "__msan_metadata_ptr_for_load_" + std::to_string(
size);
868 std::string name_store =
869 "__msan_metadata_ptr_for_store_" + std::to_string(
size);
870 MsanMetadataPtrForLoad_1_8[ind] =
871 getOrInsertMsanMetadataFunction(M, name_load, PtrTy);
872 MsanMetadataPtrForStore_1_8[ind] =
873 getOrInsertMsanMetadataFunction(M, name_store, PtrTy);
876 MsanMetadataPtrForLoadN = getOrInsertMsanMetadataFunction(
877 M,
"__msan_metadata_ptr_for_load_n", PtrTy, IntptrTy);
878 MsanMetadataPtrForStoreN = getOrInsertMsanMetadataFunction(
879 M,
"__msan_metadata_ptr_for_store_n", PtrTy, IntptrTy);
882 MsanPoisonAllocaFn =
M.getOrInsertFunction(
883 "__msan_poison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
884 MsanUnpoisonAllocaFn =
M.getOrInsertFunction(
885 "__msan_unpoison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy);
889 return M.getOrInsertGlobal(Name, Ty, [&] {
891 nullptr, Name,
nullptr,
897void MemorySanitizer::createUserspaceApi(
Module &M,
905 StringRef WarningFnName = Recover ?
"__msan_warning_with_origin"
906 :
"__msan_warning_with_origin_noreturn";
907 WarningFn =
M.getOrInsertFunction(WarningFnName,
909 IRB.getVoidTy(), IRB.getInt32Ty());
912 Recover ?
"__msan_warning" :
"__msan_warning_noreturn";
913 WarningFn =
M.getOrInsertFunction(WarningFnName, IRB.getVoidTy());
940 IRB.getIntPtrTy(
M.getDataLayout()));
944 unsigned AccessSize = 1 << AccessSizeIndex;
945 std::string FunctionName =
"__msan_maybe_warning_" +
itostr(AccessSize);
946 MaybeWarningFn[AccessSizeIndex] =
M.getOrInsertFunction(
948 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt32Ty());
949 MaybeWarningVarSizeFn =
M.getOrInsertFunction(
950 "__msan_maybe_warning_N", TLI.
getAttrList(
C, {},
false),
951 IRB.getVoidTy(), PtrTy, IRB.getInt64Ty(), IRB.getInt32Ty());
952 FunctionName =
"__msan_maybe_store_origin_" +
itostr(AccessSize);
953 MaybeStoreOriginFn[AccessSizeIndex] =
M.getOrInsertFunction(
955 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), PtrTy,
959 MsanSetAllocaOriginWithDescriptionFn =
960 M.getOrInsertFunction(
"__msan_set_alloca_origin_with_descr",
961 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy, PtrTy);
962 MsanSetAllocaOriginNoDescriptionFn =
963 M.getOrInsertFunction(
"__msan_set_alloca_origin_no_descr",
964 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
965 MsanPoisonStackFn =
M.getOrInsertFunction(
"__msan_poison_stack",
966 IRB.getVoidTy(), PtrTy, IntptrTy);
970void MemorySanitizer::initializeCallbacks(
Module &M,
973 if (CallbacksInitialized)
979 MsanChainOriginFn =
M.getOrInsertFunction(
980 "__msan_chain_origin",
983 MsanSetOriginFn =
M.getOrInsertFunction(
985 IRB.getVoidTy(), PtrTy, IntptrTy, IRB.getInt32Ty());
987 M.getOrInsertFunction(
"__msan_memmove", PtrTy, PtrTy, PtrTy, IntptrTy);
989 M.getOrInsertFunction(
"__msan_memcpy", PtrTy, PtrTy, PtrTy, IntptrTy);
990 MemsetFn =
M.getOrInsertFunction(
"__msan_memset",
992 PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
994 MsanInstrumentAsmStoreFn =
M.getOrInsertFunction(
995 "__msan_instrument_asm_store", IRB.getVoidTy(), PtrTy, IntptrTy);
998 createKernelApi(M, TLI);
1000 createUserspaceApi(M, TLI);
1002 CallbacksInitialized =
true;
1008 isStore ? MsanMetadataPtrForStore_1_8 : MsanMetadataPtrForLoad_1_8;
1026void MemorySanitizer::initializeModule(
Module &M) {
1027 auto &
DL =
M.getDataLayout();
1029 TargetTriple =
M.getTargetTriple();
1031 bool ShadowPassed =
ClShadowBase.getNumOccurrences() > 0;
1032 bool OriginPassed =
ClOriginBase.getNumOccurrences() > 0;
1034 if (ShadowPassed || OriginPassed) {
1039 MapParams = &CustomMapParams;
1041 switch (TargetTriple.getOS()) {
1043 switch (TargetTriple.getArch()) {
1058 switch (TargetTriple.getArch()) {
1067 switch (TargetTriple.getArch()) {
1101 C = &(
M.getContext());
1103 IntptrTy = IRB.getIntPtrTy(
DL);
1104 OriginTy = IRB.getInt32Ty();
1105 PtrTy = IRB.getPtrTy();
1110 if (!CompileKernel) {
1112 M.getOrInsertGlobal(
"__msan_track_origins", IRB.getInt32Ty(), [&] {
1113 return new GlobalVariable(
1114 M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
1115 IRB.getInt32(TrackOrigins),
"__msan_track_origins");
1119 M.getOrInsertGlobal(
"__msan_keep_going", IRB.getInt32Ty(), [&] {
1120 return new GlobalVariable(M, IRB.getInt32Ty(), true,
1121 GlobalValue::WeakODRLinkage,
1122 IRB.getInt32(Recover),
"__msan_keep_going");
1137struct VarArgHelper {
1138 virtual ~VarArgHelper() =
default;
1141 virtual void visitCallBase(CallBase &CB,
IRBuilder<> &IRB) = 0;
1144 virtual void visitVAStartInst(VAStartInst &
I) = 0;
1147 virtual void visitVACopyInst(VACopyInst &
I) = 0;
1153 virtual void finalizeInstrumentation() = 0;
1156struct MemorySanitizerVisitor;
1161 MemorySanitizerVisitor &Visitor);
1168 if (TypeSizeFixed <= 8)
1177class NextNodeIRBuilder :
public IRBuilder<> {
1190struct MemorySanitizerVisitor :
public InstVisitor<MemorySanitizerVisitor> {
1192 MemorySanitizer &MS;
1194 ValueMap<Value *, Value *> ShadowMap, OriginMap;
1195 std::unique_ptr<VarArgHelper> VAHelper;
1196 const TargetLibraryInfo *TLI;
1203 bool PropagateShadow;
1206 bool PoisonUndefVectors;
1208 struct ShadowOriginAndInsertPoint {
1213 ShadowOriginAndInsertPoint(
Value *S,
Value *O, Instruction *
I)
1214 : Shadow(S), Origin(
O), OrigIns(
I) {}
1217 DenseMap<const DILocation *, int> LazyWarningDebugLocationCount;
1218 SmallSetVector<AllocaInst *, 16> AllocaSet;
1221 int64_t SplittableBlocksCount = 0;
1223 MemorySanitizerVisitor(Function &
F, MemorySanitizer &MS,
1224 const TargetLibraryInfo &TLI)
1226 bool SanitizeFunction =
1228 InsertChecks = SanitizeFunction;
1229 PropagateShadow = SanitizeFunction;
1240 MS.initializeCallbacks(*
F.getParent(), TLI);
1242 IRBuilder<>(&
F.getEntryBlock(),
F.getEntryBlock().getFirstNonPHIIt())
1243 .CreateIntrinsic(Intrinsic::donothing, {});
1245 if (MS.CompileKernel) {
1247 insertKmsanPrologue(IRB);
1251 <<
"MemorySanitizer is not inserting checks into '"
1252 <<
F.getName() <<
"'\n");
1255 bool instrumentWithCalls(
Value *V) {
1259 ++SplittableBlocksCount;
1264 bool isInPrologue(Instruction &
I) {
1265 return I.getParent() == FnPrologueEnd->
getParent() &&
1274 if (MS.TrackOrigins <= 1)
1276 return IRB.
CreateCall(MS.MsanChainOriginFn, V);
1280 const DataLayout &
DL =
F.getDataLayout();
1281 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1291 TypeSize TS, Align Alignment) {
1292 const DataLayout &
DL =
F.getDataLayout();
1293 const Align IntptrAlignment =
DL.getABITypeAlign(MS.IntptrTy);
1294 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1306 auto [InsertPt,
Index] =
1318 Align CurrentAlignment = Alignment;
1319 if (Alignment >= IntptrAlignment && IntptrSize >
kOriginSize) {
1320 Value *IntptrOrigin = originToIntptr(IRB, Origin);
1322 for (
unsigned i = 0; i <
Size / IntptrSize; ++i) {
1327 CurrentAlignment = IntptrAlignment;
1340 Value *OriginPtr, Align Alignment) {
1341 const DataLayout &
DL =
F.getDataLayout();
1343 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
1345 Value *ConvertedShadow = convertShadowToScalar(Shadow, IRB);
1354 paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
1361 TypeSize TypeSizeInBits =
DL.getTypeSizeInBits(ConvertedShadow->
getType());
1363 if (instrumentWithCalls(ConvertedShadow) &&
1365 FunctionCallee Fn = MS.MaybeStoreOriginFn[SizeIndex];
1366 Value *ConvertedShadow2 =
1368 CallBase *CB = IRB.
CreateCall(Fn, {ConvertedShadow2, Addr, Origin});
1372 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1376 paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), OriginPtr, StoreSize,
1381 void materializeStores() {
1382 for (StoreInst *SI : StoreList) {
1384 Value *Val =
SI->getValueOperand();
1385 Value *Addr =
SI->getPointerOperand();
1386 Value *Shadow =
SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
1387 Value *ShadowPtr, *OriginPtr;
1389 const Align Alignment =
SI->getAlign();
1391 std::tie(ShadowPtr, OriginPtr) =
1392 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
true);
1394 [[maybe_unused]] StoreInst *NewSI =
1401 if (MS.TrackOrigins && !
SI->isAtomic())
1402 storeOrigin(IRB, Addr, Shadow, getOrigin(Val), OriginPtr,
1409 if (MS.TrackOrigins < 2)
1412 if (LazyWarningDebugLocationCount.
empty())
1413 for (
const auto &
I : InstrumentationList)
1414 ++LazyWarningDebugLocationCount[
I.OrigIns->getDebugLoc()];
1430 auto NewDebugLoc = OI->getDebugLoc();
1437 IRBOrigin.SetCurrentDebugLocation(NewDebugLoc);
1438 Origin = updateOrigin(Origin, IRBOrigin);
1443 if (MS.CompileKernel || MS.TrackOrigins)
1454 const DataLayout &
DL =
F.getDataLayout();
1455 TypeSize TypeSizeInBits =
DL.getTypeSizeInBits(ConvertedShadow->
getType());
1457 if (instrumentWithCalls(ConvertedShadow) && !MS.CompileKernel) {
1459 ConvertedShadow = convertShadowToScalar(ConvertedShadow, IRB);
1460 Value *ConvertedShadow2 =
1464 FunctionCallee Fn = MS.MaybeWarningFn[SizeIndex];
1468 MS.TrackOrigins && Origin ? Origin : (
Value *)IRB.
getInt32(0)});
1472 FunctionCallee Fn = MS.MaybeWarningVarSizeFn;
1475 unsigned ShadowSize =
DL.getTypeAllocSize(ConvertedShadow2->
getType());
1478 {ShadowAlloca, ConstantInt::get(IRB.
getInt64Ty(), ShadowSize),
1479 MS.TrackOrigins && Origin ? Origin : (
Value *)IRB.
getInt32(0)});
1484 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1487 !MS.Recover, MS.ColdCallWeights);
1490 insertWarningFn(IRB, Origin);
1495 void materializeInstructionChecks(
1497 const DataLayout &
DL =
F.getDataLayout();
1500 bool Combine = !MS.TrackOrigins;
1502 Value *Shadow =
nullptr;
1503 for (
const auto &ShadowData : InstructionChecks) {
1504 assert(ShadowData.OrigIns == Instruction);
1507 Value *ConvertedShadow = ShadowData.Shadow;
1516 insertWarningFn(IRB, ShadowData.Origin);
1526 materializeOneCheck(IRB, ConvertedShadow, ShadowData.Origin);
1531 Shadow = ConvertedShadow;
1535 Shadow = convertToBool(Shadow, IRB,
"_mscmp");
1536 ConvertedShadow = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1537 Shadow = IRB.
CreateOr(Shadow, ConvertedShadow,
"_msor");
1543 materializeOneCheck(IRB, Shadow,
nullptr);
1547 void materializeChecks() {
1550 SmallPtrSet<Instruction *, 16>
Done;
1553 for (
auto I = InstrumentationList.begin();
1554 I != InstrumentationList.end();) {
1555 auto OrigIns =
I->OrigIns;
1559 auto J = std::find_if(
I + 1, InstrumentationList.end(),
1560 [OrigIns](
const ShadowOriginAndInsertPoint &R) {
1561 return OrigIns != R.OrigIns;
1575 MS.ParamTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1576 {Zero, IRB.getInt32(0)},
"param_shadow");
1577 MS.RetvalTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1578 {Zero, IRB.getInt32(1)},
"retval_shadow");
1579 MS.VAArgTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1580 {Zero, IRB.getInt32(2)},
"va_arg_shadow");
1581 MS.VAArgOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1582 {Zero, IRB.getInt32(3)},
"va_arg_origin");
1583 MS.VAArgOverflowSizeTLS =
1584 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1585 {Zero, IRB.getInt32(4)},
"va_arg_overflow_size");
1586 MS.ParamOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1587 {Zero, IRB.getInt32(5)},
"param_origin");
1588 MS.RetvalOriginTLS =
1589 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1590 {Zero, IRB.getInt32(6)},
"retval_origin");
1592 MS.MsanMetadataAlloca = IRB.
CreateAlloca(MS.MsanMetadata, 0u);
1605 for (Instruction *
I : Instructions)
1609 for (PHINode *PN : ShadowPHINodes) {
1611 PHINode *PNO = MS.TrackOrigins ?
cast<PHINode>(getOrigin(PN)) : nullptr;
1612 size_t NumValues = PN->getNumIncomingValues();
1613 for (
size_t v = 0;
v < NumValues;
v++) {
1614 PNS->
addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
1616 PNO->
addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
1620 VAHelper->finalizeInstrumentation();
1625 for (
auto Item : LifetimeStartList) {
1626 instrumentAlloca(*Item.second, Item.first);
1627 AllocaSet.
remove(Item.second);
1632 for (AllocaInst *AI : AllocaSet)
1633 instrumentAlloca(*AI);
1636 materializeChecks();
1640 materializeStores();
1646 Type *getShadowTy(
Value *V) {
return getShadowTy(
V->getType()); }
1657 const DataLayout &
DL =
F.getDataLayout();
1659 uint32_t EltSize =
DL.getTypeSizeInBits(VT->getElementType());
1661 VT->getElementCount());
1664 return ArrayType::get(getShadowTy(AT->getElementType()),
1665 AT->getNumElements());
1669 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
1670 Elements.push_back(getShadowTy(
ST->getElementType(i)));
1672 LLVM_DEBUG(
dbgs() <<
"getShadowTy: " << *ST <<
" ===> " << *Res <<
"\n");
1675 uint32_t TypeSize =
DL.getTypeSizeInBits(OrigTy);
1685 for (
unsigned Idx = 0; Idx <
Struct->getNumElements(); Idx++) {
1688 Value *ShadowBool = convertToBool(ShadowItem, IRB);
1690 if (Aggregator != FalseVal)
1691 Aggregator = IRB.
CreateOr(Aggregator, ShadowBool);
1693 Aggregator = ShadowBool;
1700 Value *collapseArrayShadow(ArrayType *Array,
Value *Shadow,
1702 if (!
Array->getNumElements())
1706 Value *Aggregator = convertShadowToScalar(FirstItem, IRB);
1708 for (
unsigned Idx = 1; Idx <
Array->getNumElements(); Idx++) {
1710 Value *ShadowInner = convertShadowToScalar(ShadowItem, IRB);
1711 Aggregator = IRB.
CreateOr(Aggregator, ShadowInner);
1721 return collapseStructShadow(
Struct, V, IRB);
1723 return collapseArrayShadow(Array, V, IRB);
1728 V->getType()->getPrimitiveSizeInBits().getFixedValue();
1736 Type *VTy =
V->getType();
1738 return convertToBool(convertShadowToScalar(V, IRB), IRB,
name);
1745 Type *ptrToIntPtrType(
Type *PtrTy)
const {
1747 return VectorType::get(ptrToIntPtrType(VectTy->getElementType()),
1748 VectTy->getElementCount());
1754 Type *getPtrToShadowPtrType(
Type *IntPtrTy,
Type *ShadowTy)
const {
1756 return VectorType::get(
1757 getPtrToShadowPtrType(VectTy->getElementType(), ShadowTy),
1758 VectTy->getElementCount());
1760 assert(IntPtrTy == MS.IntptrTy);
1767 VectTy->getElementCount(),
1768 constToIntPtr(VectTy->getElementType(),
C));
1770 assert(IntPtrTy == MS.IntptrTy);
1771 return ConstantInt::get(MS.IntptrTy,
C);
1784 Type *IntptrTy = ptrToIntPtrType(Addr->
getType());
1787 if (uint64_t AndMask = MS.MapParams->AndMask)
1788 OffsetLong = IRB.
CreateAnd(OffsetLong, constToIntPtr(IntptrTy, ~AndMask));
1790 if (uint64_t XorMask = MS.MapParams->XorMask)
1791 OffsetLong = IRB.
CreateXor(OffsetLong, constToIntPtr(IntptrTy, XorMask));
1803 std::pair<Value *, Value *>
1805 MaybeAlign Alignment) {
1810 assert(VectTy->getElementType()->isPointerTy());
1812 Type *IntptrTy = ptrToIntPtrType(Addr->
getType());
1813 Value *ShadowOffset = getShadowPtrOffset(Addr, IRB);
1814 Value *ShadowLong = ShadowOffset;
1815 if (uint64_t ShadowBase = MS.MapParams->ShadowBase) {
1817 IRB.
CreateAdd(ShadowLong, constToIntPtr(IntptrTy, ShadowBase));
1820 ShadowLong, getPtrToShadowPtrType(IntptrTy, ShadowTy));
1822 Value *OriginPtr =
nullptr;
1823 if (MS.TrackOrigins) {
1824 Value *OriginLong = ShadowOffset;
1825 uint64_t OriginBase = MS.MapParams->OriginBase;
1826 if (OriginBase != 0)
1828 IRB.
CreateAdd(OriginLong, constToIntPtr(IntptrTy, OriginBase));
1831 OriginLong = IRB.
CreateAnd(OriginLong, constToIntPtr(IntptrTy, ~Mask));
1834 OriginLong, getPtrToShadowPtrType(IntptrTy, MS.OriginTy));
1836 return std::make_pair(ShadowPtr, OriginPtr);
1839 template <
typename... ArgsTy>
1844 {MS.MsanMetadataAlloca, std::forward<ArgsTy>(Args)...});
1845 return IRB.
CreateLoad(MS.MsanMetadata, MS.MsanMetadataAlloca);
1848 return IRB.
CreateCall(Callee, {std::forward<ArgsTy>(Args)...});
1851 std::pair<Value *, Value *> getShadowOriginPtrKernelNoVec(
Value *Addr,
1855 Value *ShadowOriginPtrs;
1856 const DataLayout &
DL =
F.getDataLayout();
1857 TypeSize
Size =
DL.getTypeStoreSize(ShadowTy);
1859 FunctionCallee Getter = MS.getKmsanShadowOriginAccessFn(
isStore,
Size);
1862 ShadowOriginPtrs = createMetadataCall(IRB, Getter, AddrCast);
1864 Value *SizeVal = ConstantInt::get(MS.IntptrTy,
Size);
1865 ShadowOriginPtrs = createMetadataCall(
1867 isStore ? MS.MsanMetadataPtrForStoreN : MS.MsanMetadataPtrForLoadN,
1874 return std::make_pair(ShadowPtr, OriginPtr);
1880 std::pair<Value *, Value *> getShadowOriginPtrKernel(
Value *Addr,
1887 return getShadowOriginPtrKernelNoVec(Addr, IRB, ShadowTy,
isStore);
1892 Value *ShadowPtrs = ConstantInt::getNullValue(
1894 Value *OriginPtrs =
nullptr;
1895 if (MS.TrackOrigins)
1896 OriginPtrs = ConstantInt::getNullValue(
1898 for (
unsigned i = 0; i < NumElements; ++i) {
1901 auto [ShadowPtr, OriginPtr] =
1902 getShadowOriginPtrKernelNoVec(OneAddr, IRB, ShadowTy,
isStore);
1905 ShadowPtrs, ShadowPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1906 if (MS.TrackOrigins)
1908 OriginPtrs, OriginPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1910 return {ShadowPtrs, OriginPtrs};
1913 std::pair<Value *, Value *> getShadowOriginPtr(
Value *Addr,
IRBuilder<> &IRB,
1915 MaybeAlign Alignment,
1917 if (MS.CompileKernel)
1918 return getShadowOriginPtrKernel(Addr, IRB, ShadowTy,
isStore);
1919 return getShadowOriginPtrUserspace(Addr, IRB, ShadowTy, Alignment);
1927 ConstantInt::get(MS.IntptrTy, ArgOffset),
"_msarg");
1932 if (!MS.TrackOrigins)
1935 ConstantInt::get(MS.IntptrTy, ArgOffset),
1945 Value *getOriginPtrForRetval() {
1947 return MS.RetvalOriginTLS;
1952 assert(!ShadowMap.
count(V) &&
"Values may only have one shadow");
1953 ShadowMap[
V] = PropagateShadow ? SV : getCleanShadow(V);
1958 if (!MS.TrackOrigins)
1960 assert(!OriginMap.
count(V) &&
"Values may only have one origin");
1961 LLVM_DEBUG(
dbgs() <<
"ORIGIN: " << *V <<
" ==> " << *Origin <<
"\n");
1962 OriginMap[
V] = Origin;
1966 Type *ShadowTy = getShadowTy(OrigTy);
1976 Constant *getCleanShadow(
Value *V) {
return getCleanShadow(
V->getType()); }
1985 getPoisonedShadow(AT->getElementType()));
1990 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
1991 Vals.
push_back(getPoisonedShadow(
ST->getElementType(i)));
1999 Type *ShadowTy = getShadowTy(V);
2002 return getPoisonedShadow(ShadowTy);
2014 if (!PropagateShadow ||
I->getMetadata(LLVMContext::MD_nosanitize))
2015 return getCleanShadow(V);
2017 Value *Shadow = ShadowMap[
V];
2019 LLVM_DEBUG(
dbgs() <<
"No shadow: " << *V <<
"\n" << *(
I->getParent()));
2020 assert(Shadow &&
"No shadow for a value");
2027 Value *
AllOnes = (PropagateShadow && PoisonUndef) ? getPoisonedShadow(V)
2028 : getCleanShadow(V);
2034 Value *&ShadowPtr = ShadowMap[
V];
2039 unsigned ArgOffset = 0;
2040 const DataLayout &
DL =
F->getDataLayout();
2041 for (
auto &FArg :
F->args()) {
2042 if (!FArg.getType()->isSized() || FArg.getType()->isScalableTy()) {
2044 ?
"vscale not fully supported\n"
2045 :
"Arg is not sized\n"));
2047 ShadowPtr = getCleanShadow(V);
2048 setOrigin(
A, getCleanOrigin());
2054 unsigned Size = FArg.hasByValAttr()
2055 ?
DL.getTypeAllocSize(FArg.getParamByValType())
2056 :
DL.getTypeAllocSize(FArg.getType());
2060 if (FArg.hasByValAttr()) {
2064 const Align ArgAlign =
DL.getValueOrABITypeAlignment(
2065 FArg.getParamAlign(), FArg.getParamByValType());
2066 Value *CpShadowPtr, *CpOriginPtr;
2067 std::tie(CpShadowPtr, CpOriginPtr) =
2068 getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign,
2070 if (!PropagateShadow || Overflow) {
2072 EntryIRB.CreateMemSet(
2076 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2078 [[maybe_unused]]
Value *Cpy = EntryIRB.CreateMemCpy(
2079 CpShadowPtr, CopyAlign,
Base, CopyAlign,
Size);
2082 if (MS.TrackOrigins) {
2083 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2087 EntryIRB.CreateMemCpy(
2096 if (!PropagateShadow || Overflow || FArg.hasByValAttr() ||
2097 (MS.EagerChecks && FArg.hasAttribute(Attribute::NoUndef))) {
2098 ShadowPtr = getCleanShadow(V);
2099 setOrigin(
A, getCleanOrigin());
2102 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2103 ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg),
Base,
2105 if (MS.TrackOrigins) {
2106 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2107 setOrigin(
A, EntryIRB.CreateLoad(MS.OriginTy, OriginPtr));
2111 <<
" ARG: " << FArg <<
" ==> " << *ShadowPtr <<
"\n");
2117 assert(ShadowPtr &&
"Could not find shadow for an argument");
2124 cast<Constant>(V)->containsUndefOrPoisonElement() && PropagateShadow &&
2125 PoisonUndefVectors) {
2128 for (
unsigned i = 0; i != NumElems; ++i) {
2131 : getCleanShadow(Elem);
2135 LLVM_DEBUG(
dbgs() <<
"Partial undef constant vector: " << *V <<
" ==> "
2136 << *ShadowConstant <<
"\n");
2138 return ShadowConstant;
2144 return getCleanShadow(V);
2148 Value *getShadow(Instruction *
I,
int i) {
2149 return getShadow(
I->getOperand(i));
2154 if (!MS.TrackOrigins)
2157 return getCleanOrigin();
2159 "Unexpected value type in getOrigin()");
2161 if (
I->getMetadata(LLVMContext::MD_nosanitize))
2162 return getCleanOrigin();
2164 Value *Origin = OriginMap[
V];
2165 assert(Origin &&
"Missing origin");
2170 Value *getOrigin(Instruction *
I,
int i) {
2171 return getOrigin(
I->getOperand(i));
2178 void insertCheckShadow(
Value *Shadow,
Value *Origin, Instruction *OrigIns) {
2184 LLVM_DEBUG(
dbgs() <<
"Skipping check of " << *Shadow <<
" before "
2185 << *OrigIns <<
"\n");
2192 "Can only insert checks for integer, vector, and aggregate shadow "
2195 InstrumentationList.push_back(
2196 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
2204 void insertCheckShadowOf(
Value *Val, Instruction *OrigIns) {
2206 Value *Shadow, *Origin;
2208 Shadow = getShadow(Val);
2211 Origin = getOrigin(Val);
2218 insertCheckShadow(Shadow, Origin, OrigIns);
2223 case AtomicOrdering::NotAtomic:
2224 return AtomicOrdering::NotAtomic;
2225 case AtomicOrdering::Unordered:
2226 case AtomicOrdering::Monotonic:
2227 case AtomicOrdering::Release:
2228 return AtomicOrdering::Release;
2229 case AtomicOrdering::Acquire:
2230 case AtomicOrdering::AcquireRelease:
2231 return AtomicOrdering::AcquireRelease;
2232 case AtomicOrdering::SequentiallyConsistent:
2233 return AtomicOrdering::SequentiallyConsistent;
2239 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2240 uint32_t OrderingTable[NumOrderings] = {};
2242 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2243 OrderingTable[(
int)AtomicOrderingCABI::release] =
2244 (int)AtomicOrderingCABI::release;
2245 OrderingTable[(int)AtomicOrderingCABI::consume] =
2246 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2247 OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
2248 (
int)AtomicOrderingCABI::acq_rel;
2249 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2250 (
int)AtomicOrderingCABI::seq_cst;
2257 case AtomicOrdering::NotAtomic:
2258 return AtomicOrdering::NotAtomic;
2259 case AtomicOrdering::Unordered:
2260 case AtomicOrdering::Monotonic:
2261 case AtomicOrdering::Acquire:
2262 return AtomicOrdering::Acquire;
2263 case AtomicOrdering::Release:
2264 case AtomicOrdering::AcquireRelease:
2265 return AtomicOrdering::AcquireRelease;
2266 case AtomicOrdering::SequentiallyConsistent:
2267 return AtomicOrdering::SequentiallyConsistent;
2273 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2274 uint32_t OrderingTable[NumOrderings] = {};
2276 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2277 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2278 OrderingTable[(int)AtomicOrderingCABI::consume] =
2279 (
int)AtomicOrderingCABI::acquire;
2280 OrderingTable[(int)AtomicOrderingCABI::release] =
2281 OrderingTable[(
int)AtomicOrderingCABI::acq_rel] =
2282 (int)AtomicOrderingCABI::acq_rel;
2283 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2284 (
int)AtomicOrderingCABI::seq_cst;
2290 using InstVisitor<MemorySanitizerVisitor>
::visit;
2291 void visit(Instruction &
I) {
2292 if (
I.getMetadata(LLVMContext::MD_nosanitize))
2295 if (isInPrologue(
I))
2300 setShadow(&
I, getCleanShadow(&
I));
2301 setOrigin(&
I, getCleanOrigin());
2312 void visitLoadInst(LoadInst &
I) {
2313 assert(
I.getType()->isSized() &&
"Load type must have size");
2314 assert(!
I.getMetadata(LLVMContext::MD_nosanitize));
2315 NextNodeIRBuilder IRB(&
I);
2316 Type *ShadowTy = getShadowTy(&
I);
2317 Value *Addr =
I.getPointerOperand();
2318 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
2319 const Align Alignment =
I.getAlign();
2320 if (PropagateShadow) {
2321 std::tie(ShadowPtr, OriginPtr) =
2322 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
false);
2326 setShadow(&
I, getCleanShadow(&
I));
2330 insertCheckShadowOf(
I.getPointerOperand(), &
I);
2335 if (MS.TrackOrigins) {
2336 if (PropagateShadow) {
2341 setOrigin(&
I, getCleanOrigin());
2350 void visitStoreInst(StoreInst &
I) {
2351 StoreList.push_back(&
I);
2353 insertCheckShadowOf(
I.getPointerOperand(), &
I);
2356 void handleCASOrRMW(Instruction &
I) {
2360 Value *Addr =
I.getOperand(0);
2361 Value *Val =
I.getOperand(1);
2362 Value *ShadowPtr = getShadowOriginPtr(Addr, IRB, getShadowTy(Val),
Align(1),
2367 insertCheckShadowOf(Addr, &
I);
2373 insertCheckShadowOf(Val, &
I);
2377 setShadow(&
I, getCleanShadow(&
I));
2378 setOrigin(&
I, getCleanOrigin());
2381 void visitAtomicRMWInst(AtomicRMWInst &
I) {
2386 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &
I) {
2392 void visitExtractElementInst(ExtractElementInst &
I) {
2393 insertCheckShadowOf(
I.getOperand(1), &
I);
2397 setOrigin(&
I, getOrigin(&
I, 0));
2400 void visitInsertElementInst(InsertElementInst &
I) {
2401 insertCheckShadowOf(
I.getOperand(2), &
I);
2403 auto *Shadow0 = getShadow(&
I, 0);
2404 auto *Shadow1 = getShadow(&
I, 1);
2407 setOriginForNaryOp(
I);
2410 void visitShuffleVectorInst(ShuffleVectorInst &
I) {
2412 auto *Shadow0 = getShadow(&
I, 0);
2413 auto *Shadow1 = getShadow(&
I, 1);
2416 setOriginForNaryOp(
I);
2420 void visitSExtInst(SExtInst &
I) {
2422 setShadow(&
I, IRB.
CreateSExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2423 setOrigin(&
I, getOrigin(&
I, 0));
2426 void visitZExtInst(ZExtInst &
I) {
2428 setShadow(&
I, IRB.
CreateZExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2429 setOrigin(&
I, getOrigin(&
I, 0));
2432 void visitTruncInst(TruncInst &
I) {
2434 setShadow(&
I, IRB.
CreateTrunc(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2435 setOrigin(&
I, getOrigin(&
I, 0));
2438 void visitBitCastInst(BitCastInst &
I) {
2443 if (CI->isMustTailCall())
2447 setOrigin(&
I, getOrigin(&
I, 0));
2450 void visitPtrToIntInst(PtrToIntInst &
I) {
2453 "_msprop_ptrtoint"));
2454 setOrigin(&
I, getOrigin(&
I, 0));
2457 void visitIntToPtrInst(IntToPtrInst &
I) {
2460 "_msprop_inttoptr"));
2461 setOrigin(&
I, getOrigin(&
I, 0));
2464 void visitFPToSIInst(CastInst &
I) { handleShadowOr(
I); }
2465 void visitFPToUIInst(CastInst &
I) { handleShadowOr(
I); }
2466 void visitSIToFPInst(CastInst &
I) { handleShadowOr(
I); }
2467 void visitUIToFPInst(CastInst &
I) { handleShadowOr(
I); }
2468 void visitFPExtInst(CastInst &
I) { handleShadowOr(
I); }
2469 void visitFPTruncInst(CastInst &
I) { handleShadowOr(
I); }
2476 void visitAnd(BinaryOperator &
I) {
2484 Value *S2 = getShadow(&
I, 1);
2485 Value *V1 =
I.getOperand(0);
2486 Value *V2 =
I.getOperand(1);
2494 setShadow(&
I, IRB.
CreateOr({S1S2, V1S2, S1V2}));
2495 setOriginForNaryOp(
I);
2498 void visitOr(BinaryOperator &
I) {
2511 Value *S2 = getShadow(&
I, 1);
2512 Value *V1 =
I.getOperand(0);
2513 Value *V2 =
I.getOperand(1);
2532 S = IRB.
CreateOr(S, DisjointOrShadow,
"_ms_disjoint");
2536 setOriginForNaryOp(
I);
2554 template <
bool CombineShadow>
class Combiner {
2555 Value *Shadow =
nullptr;
2556 Value *Origin =
nullptr;
2558 MemorySanitizerVisitor *MSV;
2561 Combiner(MemorySanitizerVisitor *MSV,
IRBuilder<> &IRB)
2562 : IRB(IRB), MSV(MSV) {}
2566 if (CombineShadow) {
2571 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
2572 Shadow = IRB.
CreateOr(Shadow, OpShadow,
"_msprop");
2576 if (MSV->MS.TrackOrigins) {
2583 if (!ConstOrigin || !ConstOrigin->
isNullValue()) {
2584 Value *
Cond = MSV->convertToBool(OpShadow, IRB);
2594 Value *OpShadow = MSV->getShadow(V);
2595 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) :
nullptr;
2596 return Add(OpShadow, OpOrigin);
2601 void Done(Instruction *
I) {
2602 if (CombineShadow) {
2604 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(
I));
2605 MSV->setShadow(
I, Shadow);
2607 if (MSV->MS.TrackOrigins) {
2609 MSV->setOrigin(
I, Origin);
2615 void DoneAndStoreOrigin(TypeSize TS,
Value *OriginPtr) {
2616 if (MSV->MS.TrackOrigins) {
2623 using ShadowAndOriginCombiner = Combiner<true>;
2624 using OriginCombiner = Combiner<false>;
2627 void setOriginForNaryOp(Instruction &
I) {
2628 if (!MS.TrackOrigins)
2631 OriginCombiner
OC(
this, IRB);
2632 for (Use &
Op :
I.operands())
2637 size_t VectorOrPrimitiveTypeSizeInBits(
Type *Ty) {
2639 "Vector of pointers is not a valid shadow type");
2649 Type *srcTy =
V->getType();
2652 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
2653 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
2654 if (srcSizeInBits > 1 && dstSizeInBits == 1)
2672 Type *ShadowTy = getShadowTy(V);
2673 if (
V->getType() == ShadowTy)
2675 if (
V->getType()->isPtrOrPtrVectorTy())
2682 void handleShadowOr(Instruction &
I) {
2684 ShadowAndOriginCombiner SC(
this, IRB);
2685 for (Use &
Op :
I.operands())
2702 Value *horizontalReduce(IntrinsicInst &
I,
unsigned ReductionFactor,
2705 unsigned TotalNumElems =
2710 TotalNumElems = TotalNumElems * 2;
2713 assert(TotalNumElems % ReductionFactor == 0);
2718 for (
unsigned i = 0; i < ReductionFactor; i++) {
2719 SmallVector<int, 16>
Mask;
2720 for (
unsigned X = 0;
X < TotalNumElems;
X += ReductionFactor)
2721 Mask.push_back(
X + i);
2743 void handlePairwiseShadowOrIntrinsic(IntrinsicInst &
I) {
2744 assert(
I.arg_size() == 1 ||
I.arg_size() == 2);
2746 assert(
I.getType()->isVectorTy());
2747 assert(
I.getArgOperand(0)->getType()->isVectorTy());
2749 [[maybe_unused]] FixedVectorType *ParamType =
2753 [[maybe_unused]] FixedVectorType *
ReturnType =
2761 Value *FirstArgShadow = getShadow(&
I, 0);
2762 Value *SecondArgShadow =
nullptr;
2763 if (
I.arg_size() == 2)
2764 SecondArgShadow = getShadow(&
I, 1);
2766 Value *OrShadow = horizontalReduce(
I, 2, FirstArgShadow,
2769 OrShadow = CreateShadowCast(IRB, OrShadow, getShadowTy(&
I));
2771 setShadow(&
I, OrShadow);
2772 setOriginForNaryOp(
I);
2782 void handlePairwiseShadowOrIntrinsic(IntrinsicInst &
I,
2783 int ReinterpretElemWidth) {
2784 assert(
I.arg_size() == 1 ||
I.arg_size() == 2);
2786 assert(
I.getType()->isVectorTy());
2787 assert(
I.getArgOperand(0)->getType()->isVectorTy());
2789 FixedVectorType *ParamType =
2794 [[maybe_unused]] FixedVectorType *
ReturnType =
2801 FixedVectorType *ReinterpretShadowTy =
nullptr;
2809 Value *FirstArgShadow = getShadow(&
I, 0);
2810 FirstArgShadow = IRB.
CreateBitCast(FirstArgShadow, ReinterpretShadowTy);
2820 Value *SecondArgShadow =
nullptr;
2821 if (
I.arg_size() == 2) {
2822 SecondArgShadow = getShadow(&
I, 1);
2823 SecondArgShadow = IRB.
CreateBitCast(SecondArgShadow, ReinterpretShadowTy);
2826 Value *OrShadow = horizontalReduce(
I, 2, FirstArgShadow,
2829 OrShadow = CreateShadowCast(IRB, OrShadow, getShadowTy(&
I));
2831 setShadow(&
I, OrShadow);
2832 setOriginForNaryOp(
I);
2835 void visitFNeg(UnaryOperator &
I) { handleShadowOr(
I); }
2846 void handleMulByConstant(BinaryOperator &
I, Constant *ConstArg,
2852 Type *EltTy = VTy->getElementType();
2854 for (
unsigned Idx = 0; Idx < NumElements; ++Idx) {
2855 if (ConstantInt *Elt =
2857 const APInt &
V = Elt->getValue();
2858 APInt V2 = APInt(
V.getBitWidth(), 1) <<
V.countr_zero();
2859 Elements.push_back(ConstantInt::get(EltTy, V2));
2861 Elements.push_back(ConstantInt::get(EltTy, 1));
2867 const APInt &
V = Elt->getValue();
2868 APInt V2 = APInt(
V.getBitWidth(), 1) <<
V.countr_zero();
2869 ShadowMul = ConstantInt::get(Ty, V2);
2871 ShadowMul = ConstantInt::get(Ty, 1);
2877 IRB.
CreateMul(getShadow(OtherArg), ShadowMul,
"msprop_mul_cst"));
2878 setOrigin(&
I, getOrigin(OtherArg));
2881 void visitMul(BinaryOperator &
I) {
2884 if (constOp0 && !constOp1)
2885 handleMulByConstant(
I, constOp0,
I.getOperand(1));
2886 else if (constOp1 && !constOp0)
2887 handleMulByConstant(
I, constOp1,
I.getOperand(0));
2892 void visitFAdd(BinaryOperator &
I) { handleShadowOr(
I); }
2893 void visitFSub(BinaryOperator &
I) { handleShadowOr(
I); }
2894 void visitFMul(BinaryOperator &
I) { handleShadowOr(
I); }
2895 void visitAdd(BinaryOperator &
I) { handleShadowOr(
I); }
2896 void visitSub(BinaryOperator &
I) { handleShadowOr(
I); }
2897 void visitXor(BinaryOperator &
I) { handleShadowOr(
I); }
2899 void handleIntegerDiv(Instruction &
I) {
2902 insertCheckShadowOf(
I.getOperand(1), &
I);
2903 setShadow(&
I, getShadow(&
I, 0));
2904 setOrigin(&
I, getOrigin(&
I, 0));
2907 void visitUDiv(BinaryOperator &
I) { handleIntegerDiv(
I); }
2908 void visitSDiv(BinaryOperator &
I) { handleIntegerDiv(
I); }
2909 void visitURem(BinaryOperator &
I) { handleIntegerDiv(
I); }
2910 void visitSRem(BinaryOperator &
I) { handleIntegerDiv(
I); }
2914 void visitFDiv(BinaryOperator &
I) { handleShadowOr(
I); }
2915 void visitFRem(BinaryOperator &
I) { handleShadowOr(
I); }
2921 void handleEqualityComparison(ICmpInst &
I) {
2925 Value *Sa = getShadow(
A);
2926 Value *Sb = getShadow(
B);
2952 setOriginForNaryOp(
I);
2960 void handleRelationalComparisonExact(ICmpInst &
I) {
2964 Value *Sa = getShadow(
A);
2965 Value *Sb = getShadow(
B);
2976 bool IsSigned =
I.isSigned();
2978 auto GetMinMaxUnsigned = [&](
Value *
V,
Value *S) {
2988 V = IRB.
CreateXor(V, ConstantInt::get(
V->getType(), MinVal));
2993 return std::make_pair(Min, Max);
2996 auto [Amin, Amax] = GetMinMaxUnsigned(
A, Sa);
2997 auto [Bmin, Bmax] = GetMinMaxUnsigned(
B, Sb);
3003 setOriginForNaryOp(
I);
3010 void handleSignedRelationalComparison(ICmpInst &
I) {
3015 op =
I.getOperand(0);
3016 pre =
I.getPredicate();
3018 op =
I.getOperand(1);
3019 pre =
I.getSwappedPredicate();
3032 setShadow(&
I, Shadow);
3033 setOrigin(&
I, getOrigin(
op));
3039 void visitICmpInst(ICmpInst &
I) {
3044 if (
I.isEquality()) {
3045 handleEqualityComparison(
I);
3051 handleRelationalComparisonExact(
I);
3055 handleSignedRelationalComparison(
I);
3061 handleRelationalComparisonExact(
I);
3068 void visitFCmpInst(FCmpInst &
I) { handleShadowOr(
I); }
3070 void handleShift(BinaryOperator &
I) {
3075 Value *S2 = getShadow(&
I, 1);
3078 Value *V2 =
I.getOperand(1);
3080 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3081 setOriginForNaryOp(
I);
3084 void visitShl(BinaryOperator &
I) { handleShift(
I); }
3085 void visitAShr(BinaryOperator &
I) { handleShift(
I); }
3086 void visitLShr(BinaryOperator &
I) { handleShift(
I); }
3088 void handleFunnelShift(IntrinsicInst &
I) {
3092 Value *S0 = getShadow(&
I, 0);
3094 Value *S2 = getShadow(&
I, 2);
3097 Value *V2 =
I.getOperand(2);
3100 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3101 setOriginForNaryOp(
I);
3114 void visitMemMoveInst(MemMoveInst &
I) {
3115 getShadow(
I.getArgOperand(1));
3118 {I.getArgOperand(0), I.getArgOperand(1),
3119 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3137 void visitMemCpyInst(MemCpyInst &
I) {
3138 getShadow(
I.getArgOperand(1));
3141 {I.getArgOperand(0), I.getArgOperand(1),
3142 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3147 void visitMemSetInst(MemSetInst &
I) {
3151 {I.getArgOperand(0),
3152 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
3153 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3157 void visitVAStartInst(VAStartInst &
I) { VAHelper->visitVAStartInst(
I); }
3159 void visitVACopyInst(VACopyInst &
I) { VAHelper->visitVACopyInst(
I); }
3165 bool handleVectorStoreIntrinsic(IntrinsicInst &
I) {
3169 Value *Addr =
I.getArgOperand(0);
3170 Value *Shadow = getShadow(&
I, 1);
3171 Value *ShadowPtr, *OriginPtr;
3175 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
3180 insertCheckShadowOf(Addr, &
I);
3183 if (MS.TrackOrigins)
3192 bool handleVectorLoadIntrinsic(IntrinsicInst &
I) {
3196 Value *Addr =
I.getArgOperand(0);
3198 Type *ShadowTy = getShadowTy(&
I);
3199 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
3200 if (PropagateShadow) {
3204 std::tie(ShadowPtr, OriginPtr) =
3205 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
false);
3209 setShadow(&
I, getCleanShadow(&
I));
3213 insertCheckShadowOf(Addr, &
I);
3215 if (MS.TrackOrigins) {
3216 if (PropagateShadow)
3217 setOrigin(&
I, IRB.
CreateLoad(MS.OriginTy, OriginPtr));
3219 setOrigin(&
I, getCleanOrigin());
3239 [[maybe_unused]]
bool
3240 maybeHandleSimpleNomemIntrinsic(IntrinsicInst &
I,
3241 unsigned int trailingFlags) {
3242 Type *RetTy =
I.getType();
3246 unsigned NumArgOperands =
I.arg_size();
3247 assert(NumArgOperands >= trailingFlags);
3248 for (
unsigned i = 0; i < NumArgOperands - trailingFlags; ++i) {
3249 Type *Ty =
I.getArgOperand(i)->getType();
3255 ShadowAndOriginCombiner SC(
this, IRB);
3256 for (
unsigned i = 0; i < NumArgOperands; ++i)
3257 SC.Add(
I.getArgOperand(i));
3274 bool maybeHandleUnknownIntrinsicUnlogged(IntrinsicInst &
I) {
3275 unsigned NumArgOperands =
I.arg_size();
3276 if (NumArgOperands == 0)
3279 if (NumArgOperands == 2 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
3280 I.getArgOperand(1)->getType()->isVectorTy() &&
3281 I.getType()->isVoidTy() && !
I.onlyReadsMemory()) {
3283 return handleVectorStoreIntrinsic(
I);
3286 if (NumArgOperands == 1 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
3287 I.getType()->isVectorTy() &&
I.onlyReadsMemory()) {
3289 return handleVectorLoadIntrinsic(
I);
3292 if (
I.doesNotAccessMemory())
3293 if (maybeHandleSimpleNomemIntrinsic(
I, 0))
3301 bool maybeHandleUnknownIntrinsic(IntrinsicInst &
I) {
3302 if (maybeHandleUnknownIntrinsicUnlogged(
I)) {
3306 LLVM_DEBUG(
dbgs() <<
"UNKNOWN INSTRUCTION HANDLED HEURISTICALLY: " <<
I
3313 void handleInvariantGroup(IntrinsicInst &
I) {
3314 setShadow(&
I, getShadow(&
I, 0));
3315 setOrigin(&
I, getOrigin(&
I, 0));
3318 void handleLifetimeStart(IntrinsicInst &
I) {
3323 LifetimeStartList.push_back(std::make_pair(&
I, AI));
3326 void handleBswap(IntrinsicInst &
I) {
3329 Type *OpType =
Op->getType();
3332 setOrigin(&
I, getOrigin(
Op));
3353 void handleCountLeadingTrailingZeros(IntrinsicInst &
I) {
3355 Value *Src =
I.getArgOperand(0);
3356 Value *SrcShadow = getShadow(Src);
3360 I.getType(),
I.getIntrinsicID(), {Src, False});
3362 I.getType(),
I.getIntrinsicID(), {SrcShadow, False});
3365 ConcreteZerosCount, ShadowZerosCount,
"_mscz_cmp_zeros");
3367 Value *NotAllZeroShadow =
3369 Value *OutputShadow =
3370 IRB.
CreateAnd(CompareConcreteZeros, NotAllZeroShadow,
"_mscz_main");
3376 OutputShadow = IRB.
CreateOr(OutputShadow, BoolZeroPoison,
"_mscz_bs");
3379 OutputShadow = IRB.
CreateSExt(OutputShadow, getShadowTy(Src),
"_mscz_os");
3381 setShadow(&
I, OutputShadow);
3382 setOriginForNaryOp(
I);
3392 void handleNEONVectorConvertIntrinsic(IntrinsicInst &
I) {
3396 Value *S0 = getShadow(&
I, 0);
3405 setShadow(&
I, OutShadow);
3406 setOriginForNaryOp(
I);
3415 FixedVectorType *maybeShrinkVectorShadowType(
Value *Src, IntrinsicInst &
I) {
3435 Value *maybeExtendVectorShadowWithZeros(
Value *Shadow, IntrinsicInst &
I) {
3440 Value *FullShadow = getCleanShadow(&
I);
3441 unsigned ShadowNumElems =
3443 unsigned FullShadowNumElems =
3446 assert((ShadowNumElems == FullShadowNumElems) ||
3447 (ShadowNumElems * 2 == FullShadowNumElems));
3449 if (ShadowNumElems == FullShadowNumElems) {
3450 FullShadow = Shadow;
3454 std::iota(ShadowMask.begin(), ShadowMask.end(), 0);
3479 void handleSSEVectorConvertIntrinsicByProp(IntrinsicInst &
I,
3480 bool HasRoundingMode) {
3481 if (HasRoundingMode) {
3489 Value *Src =
I.getArgOperand(0);
3490 assert(Src->getType()->isVectorTy());
3494 VectorType *ShadowType = maybeShrinkVectorShadowType(Src,
I);
3497 Value *S0 = getShadow(&
I, 0);
3509 Value *FullShadow = maybeExtendVectorShadowWithZeros(Shadow,
I);
3511 setShadow(&
I, FullShadow);
3512 setOriginForNaryOp(
I);
3533 void handleSSEVectorConvertIntrinsic(IntrinsicInst &
I,
int NumUsedElements,
3534 bool HasRoundingMode =
false) {
3536 Value *CopyOp, *ConvertOp;
3538 assert((!HasRoundingMode ||
3540 "Invalid rounding mode");
3542 switch (
I.arg_size() - HasRoundingMode) {
3544 CopyOp =
I.getArgOperand(0);
3545 ConvertOp =
I.getArgOperand(1);
3548 ConvertOp =
I.getArgOperand(0);
3562 Value *ConvertShadow = getShadow(ConvertOp);
3563 Value *AggShadow =
nullptr;
3566 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), 0));
3567 for (
int i = 1; i < NumUsedElements; ++i) {
3569 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), i));
3570 AggShadow = IRB.
CreateOr(AggShadow, MoreShadow);
3573 AggShadow = ConvertShadow;
3576 insertCheckShadow(AggShadow, getOrigin(ConvertOp), &
I);
3583 Value *ResultShadow = getShadow(CopyOp);
3585 for (
int i = 0; i < NumUsedElements; ++i) {
3587 ResultShadow, ConstantInt::getNullValue(EltTy),
3590 setShadow(&
I, ResultShadow);
3591 setOrigin(&
I, getOrigin(CopyOp));
3593 setShadow(&
I, getCleanShadow(&
I));
3594 setOrigin(&
I, getCleanOrigin());
3602 S = CreateShadowCast(IRB, S, IRB.
getInt64Ty(),
true);
3605 return CreateShadowCast(IRB, S2,
T,
true);
3613 return CreateShadowCast(IRB, S2,
T,
true);
3630 void handleVectorShiftIntrinsic(IntrinsicInst &
I,
bool Variable) {
3636 Value *S2 = getShadow(&
I, 1);
3638 : Lower64ShadowExtend(IRB, S2, getShadowTy(&
I));
3639 Value *V1 =
I.getOperand(0);
3640 Value *V2 =
I.getOperand(1);
3642 {IRB.CreateBitCast(S1, V1->getType()), V2});
3644 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3645 setOriginForNaryOp(
I);
3650 Type *getMMXVectorTy(
unsigned EltSizeInBits,
3651 unsigned X86_MMXSizeInBits = 64) {
3652 assert(EltSizeInBits != 0 && (X86_MMXSizeInBits % EltSizeInBits) == 0 &&
3653 "Illegal MMX vector element size");
3655 X86_MMXSizeInBits / EltSizeInBits);
3662 case Intrinsic::x86_sse2_packsswb_128:
3663 case Intrinsic::x86_sse2_packuswb_128:
3664 return Intrinsic::x86_sse2_packsswb_128;
3666 case Intrinsic::x86_sse2_packssdw_128:
3667 case Intrinsic::x86_sse41_packusdw:
3668 return Intrinsic::x86_sse2_packssdw_128;
3670 case Intrinsic::x86_avx2_packsswb:
3671 case Intrinsic::x86_avx2_packuswb:
3672 return Intrinsic::x86_avx2_packsswb;
3674 case Intrinsic::x86_avx2_packssdw:
3675 case Intrinsic::x86_avx2_packusdw:
3676 return Intrinsic::x86_avx2_packssdw;
3678 case Intrinsic::x86_mmx_packsswb:
3679 case Intrinsic::x86_mmx_packuswb:
3680 return Intrinsic::x86_mmx_packsswb;
3682 case Intrinsic::x86_mmx_packssdw:
3683 return Intrinsic::x86_mmx_packssdw;
3685 case Intrinsic::x86_avx512_packssdw_512:
3686 case Intrinsic::x86_avx512_packusdw_512:
3687 return Intrinsic::x86_avx512_packssdw_512;
3689 case Intrinsic::x86_avx512_packsswb_512:
3690 case Intrinsic::x86_avx512_packuswb_512:
3691 return Intrinsic::x86_avx512_packsswb_512;
3707 void handleVectorPackIntrinsic(IntrinsicInst &
I,
3708 unsigned MMXEltSizeInBits = 0) {
3712 Value *S2 = getShadow(&
I, 1);
3713 assert(
S1->getType()->isVectorTy());
3719 MMXEltSizeInBits ? getMMXVectorTy(MMXEltSizeInBits) :
S1->
getType();
3720 if (MMXEltSizeInBits) {
3728 if (MMXEltSizeInBits) {
3734 {S1_ext, S2_ext},
nullptr,
3735 "_msprop_vector_pack");
3736 if (MMXEltSizeInBits)
3739 setOriginForNaryOp(
I);
3743 Constant *createDppMask(
unsigned Width,
unsigned Mask) {
3756 const unsigned Width =
3763 Value *DstMaskV = createDppMask(Width, DstMask);
3780 void handleDppIntrinsic(IntrinsicInst &
I) {
3783 Value *S0 = getShadow(&
I, 0);
3787 const unsigned Width =
3789 assert(Width == 2 || Width == 4 || Width == 8);
3792 const unsigned SrcMask =
Mask >> 4;
3793 const unsigned DstMask =
Mask & 0xf;
3796 Value *SI1 = findDppPoisonedOutput(IRB, S, SrcMask, DstMask);
3801 SI1, findDppPoisonedOutput(IRB, S, SrcMask << 4, DstMask << 4));
3808 setOriginForNaryOp(
I);
3812 C = CreateAppToShadowCast(IRB,
C);
3821 void handleBlendvIntrinsic(IntrinsicInst &
I) {
3826 Value *Sc = getShadow(&
I, 2);
3827 Value *Oc = MS.TrackOrigins ? getOrigin(
C) : nullptr;
3832 C = convertBlendvToSelectMask(IRB,
C);
3833 Sc = convertBlendvToSelectMask(IRB, Sc);
3839 handleSelectLikeInst(
I,
C,
T,
F);
3843 void handleVectorSadIntrinsic(IntrinsicInst &
I,
bool IsMMX =
false) {
3844 const unsigned SignificantBitsPerResultElement = 16;
3846 unsigned ZeroBitsPerResultElement =
3850 auto *Shadow0 = getShadow(&
I, 0);
3851 auto *Shadow1 = getShadow(&
I, 1);
3856 S = IRB.
CreateLShr(S, ZeroBitsPerResultElement);
3859 setOriginForNaryOp(
I);
3877 void handleVectorPmaddIntrinsic(IntrinsicInst &
I,
unsigned ReductionFactor,
3878 unsigned EltSizeInBits = 0) {
3881 [[maybe_unused]] FixedVectorType *
ReturnType =
3886 Value *Va =
nullptr;
3887 Value *Vb =
nullptr;
3888 Value *Sa =
nullptr;
3889 Value *Sb =
nullptr;
3891 assert(
I.arg_size() == 2 ||
I.arg_size() == 3);
3892 if (
I.arg_size() == 2) {
3893 Va =
I.getOperand(0);
3894 Vb =
I.getOperand(1);
3896 Sa = getShadow(&
I, 0);
3897 Sb = getShadow(&
I, 1);
3898 }
else if (
I.arg_size() == 3) {
3900 Va =
I.getOperand(1);
3901 Vb =
I.getOperand(2);
3903 Sa = getShadow(&
I, 1);
3904 Sb = getShadow(&
I, 2);
3913 if (
I.arg_size() == 3) {
3914 [[maybe_unused]]
auto *AccumulatorType =
3916 assert(AccumulatorType == ReturnType);
3919 FixedVectorType *ImplicitReturnType =
ReturnType;
3921 if (EltSizeInBits) {
3923 getMMXVectorTy(EltSizeInBits * ReductionFactor,
3935 ReturnType->getNumElements() * ReductionFactor);
3961 Value *
And = IRB.
CreateOr({SaAndSbNonZero, VaAndSbNonZero, SaAndVbNonZero});
3980 ImplicitReturnType);
3985 OutShadow = CreateShadowCast(IRB, OutShadow, getShadowTy(&
I));
3988 if (
I.arg_size() == 3)
3989 OutShadow = IRB.
CreateOr(OutShadow, getShadow(&
I, 0));
3991 setShadow(&
I, OutShadow);
3992 setOriginForNaryOp(
I);
3998 void handleVectorComparePackedIntrinsic(IntrinsicInst &
I) {
4000 Type *ResTy = getShadowTy(&
I);
4001 auto *Shadow0 = getShadow(&
I, 0);
4002 auto *Shadow1 = getShadow(&
I, 1);
4007 setOriginForNaryOp(
I);
4013 void handleVectorCompareScalarIntrinsic(IntrinsicInst &
I) {
4015 auto *Shadow0 = getShadow(&
I, 0);
4016 auto *Shadow1 = getShadow(&
I, 1);
4018 Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&
I));
4020 setOriginForNaryOp(
I);
4029 void handleVectorReduceIntrinsic(IntrinsicInst &
I,
bool AllowShadowCast) {
4034 if (AllowShadowCast)
4035 S = CreateShadowCast(IRB, S, getShadowTy(&
I));
4039 setOriginForNaryOp(
I);
4049 void handleVectorReduceWithStarterIntrinsic(IntrinsicInst &
I) {
4053 Value *Shadow0 = getShadow(&
I, 0);
4059 setOriginForNaryOp(
I);
4065 void handleVectorReduceOrIntrinsic(IntrinsicInst &
I) {
4069 Value *OperandShadow = getShadow(&
I, 0);
4071 Value *OperandUnsetOrPoison = IRB.
CreateOr(OperandUnsetBits, OperandShadow);
4079 setOrigin(&
I, getOrigin(&
I, 0));
4085 void handleVectorReduceAndIntrinsic(IntrinsicInst &
I) {
4089 Value *OperandShadow = getShadow(&
I, 0);
4090 Value *OperandSetOrPoison = IRB.
CreateOr(
I.getOperand(0), OperandShadow);
4098 setOrigin(&
I, getOrigin(&
I, 0));
4101 void handleStmxcsr(IntrinsicInst &
I) {
4103 Value *Addr =
I.getArgOperand(0);
4106 getShadowOriginPtr(Addr, IRB, Ty,
Align(1),
true).first;
4111 insertCheckShadowOf(Addr, &
I);
4114 void handleLdmxcsr(IntrinsicInst &
I) {
4119 Value *Addr =
I.getArgOperand(0);
4122 Value *ShadowPtr, *OriginPtr;
4123 std::tie(ShadowPtr, OriginPtr) =
4124 getShadowOriginPtr(Addr, IRB, Ty, Alignment,
false);
4127 insertCheckShadowOf(Addr, &
I);
4130 Value *Origin = MS.TrackOrigins ? IRB.
CreateLoad(MS.OriginTy, OriginPtr)
4132 insertCheckShadow(Shadow, Origin, &
I);
4135 void handleMaskedExpandLoad(IntrinsicInst &
I) {
4138 MaybeAlign
Align =
I.getParamAlign(0);
4140 Value *PassThru =
I.getArgOperand(2);
4143 insertCheckShadowOf(
Ptr, &
I);
4144 insertCheckShadowOf(Mask, &
I);
4147 if (!PropagateShadow) {
4148 setShadow(&
I, getCleanShadow(&
I));
4149 setOrigin(&
I, getCleanOrigin());
4153 Type *ShadowTy = getShadowTy(&
I);
4155 auto [ShadowPtr, OriginPtr] =
4156 getShadowOriginPtr(
Ptr, IRB, ElementShadowTy, Align,
false);
4160 getShadow(PassThru),
"_msmaskedexpload");
4162 setShadow(&
I, Shadow);
4165 setOrigin(&
I, getCleanOrigin());
4168 void handleMaskedCompressStore(IntrinsicInst &
I) {
4170 Value *Values =
I.getArgOperand(0);
4172 MaybeAlign
Align =
I.getParamAlign(1);
4176 insertCheckShadowOf(
Ptr, &
I);
4177 insertCheckShadowOf(Mask, &
I);
4180 Value *Shadow = getShadow(Values);
4181 Type *ElementShadowTy =
4183 auto [ShadowPtr, OriginPtrs] =
4184 getShadowOriginPtr(
Ptr, IRB, ElementShadowTy, Align,
true);
4191 void handleMaskedGather(IntrinsicInst &
I) {
4193 Value *Ptrs =
I.getArgOperand(0);
4194 const Align Alignment(
4197 Value *PassThru =
I.getArgOperand(3);
4199 Type *PtrsShadowTy = getShadowTy(Ptrs);
4201 insertCheckShadowOf(Mask, &
I);
4205 insertCheckShadow(MaskedPtrShadow, getOrigin(Ptrs), &
I);
4208 if (!PropagateShadow) {
4209 setShadow(&
I, getCleanShadow(&
I));
4210 setOrigin(&
I, getCleanOrigin());
4214 Type *ShadowTy = getShadowTy(&
I);
4216 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
4217 Ptrs, IRB, ElementShadowTy, Alignment,
false);
4221 getShadow(PassThru),
"_msmaskedgather");
4223 setShadow(&
I, Shadow);
4226 setOrigin(&
I, getCleanOrigin());
4229 void handleMaskedScatter(IntrinsicInst &
I) {
4231 Value *Values =
I.getArgOperand(0);
4232 Value *Ptrs =
I.getArgOperand(1);
4233 const Align Alignment(
4237 Type *PtrsShadowTy = getShadowTy(Ptrs);
4239 insertCheckShadowOf(Mask, &
I);
4243 insertCheckShadow(MaskedPtrShadow, getOrigin(Ptrs), &
I);
4246 Value *Shadow = getShadow(Values);
4247 Type *ElementShadowTy =
4249 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
4250 Ptrs, IRB, ElementShadowTy, Alignment,
true);
4261 void handleMaskedStore(IntrinsicInst &
I) {
4263 Value *
V =
I.getArgOperand(0);
4265 const Align Alignment(
4268 Value *Shadow = getShadow(V);
4271 insertCheckShadowOf(
Ptr, &
I);
4272 insertCheckShadowOf(Mask, &
I);
4277 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
4278 Ptr, IRB, Shadow->
getType(), Alignment,
true);
4282 if (!MS.TrackOrigins)
4285 auto &
DL =
F.getDataLayout();
4286 paintOrigin(IRB, getOrigin(V), OriginPtr,
4295 void handleMaskedLoad(IntrinsicInst &
I) {
4298 const Align Alignment(
4301 Value *PassThru =
I.getArgOperand(3);
4304 insertCheckShadowOf(
Ptr, &
I);
4305 insertCheckShadowOf(Mask, &
I);
4308 if (!PropagateShadow) {
4309 setShadow(&
I, getCleanShadow(&
I));
4310 setOrigin(&
I, getCleanOrigin());
4314 Type *ShadowTy = getShadowTy(&
I);
4315 Value *ShadowPtr, *OriginPtr;
4316 std::tie(ShadowPtr, OriginPtr) =
4317 getShadowOriginPtr(
Ptr, IRB, ShadowTy, Alignment,
false);
4319 getShadow(PassThru),
"_msmaskedld"));
4321 if (!MS.TrackOrigins)
4328 Value *NotNull = convertToBool(MaskedPassThruShadow, IRB,
"_mscmp");
4333 setOrigin(&
I, Origin);
4349 void handleAVXMaskedStore(IntrinsicInst &
I) {
4354 Value *Dst =
I.getArgOperand(0);
4355 assert(Dst->getType()->isPointerTy() &&
"Destination is not a pointer!");
4360 Value *Src =
I.getArgOperand(2);
4365 Value *SrcShadow = getShadow(Src);
4368 insertCheckShadowOf(Dst, &
I);
4369 insertCheckShadowOf(Mask, &
I);
4372 Value *DstShadowPtr;
4373 Value *DstOriginPtr;
4374 std::tie(DstShadowPtr, DstOriginPtr) = getShadowOriginPtr(
4375 Dst, IRB, SrcShadow->
getType(), Alignment,
true);
4377 SmallVector<Value *, 2> ShadowArgs;
4378 ShadowArgs.
append(1, DstShadowPtr);
4379 ShadowArgs.
append(1, Mask);
4390 if (!MS.TrackOrigins)
4394 auto &
DL =
F.getDataLayout();
4395 paintOrigin(IRB, getOrigin(Src), DstOriginPtr,
4396 DL.getTypeStoreSize(SrcShadow->
getType()),
4415 void handleAVXMaskedLoad(IntrinsicInst &
I) {
4420 Value *Src =
I.getArgOperand(0);
4421 assert(Src->getType()->isPointerTy() &&
"Source is not a pointer!");
4429 insertCheckShadowOf(Mask, &
I);
4432 Type *SrcShadowTy = getShadowTy(Src);
4433 Value *SrcShadowPtr, *SrcOriginPtr;
4434 std::tie(SrcShadowPtr, SrcOriginPtr) =
4435 getShadowOriginPtr(Src, IRB, SrcShadowTy, Alignment,
false);
4437 SmallVector<Value *, 2> ShadowArgs;
4438 ShadowArgs.
append(1, SrcShadowPtr);
4439 ShadowArgs.
append(1, Mask);
4448 if (!MS.TrackOrigins)
4455 setOrigin(&
I, PtrSrcOrigin);
4464 assert(isFixedIntVector(Idx));
4465 auto IdxVectorSize =
4473 auto *IdxShadow = getShadow(Idx);
4478 insertCheckShadow(Truncated, getOrigin(Idx),
I);
4483 void handleAVXVpermilvar(IntrinsicInst &
I) {
4485 Value *Shadow = getShadow(&
I, 0);
4486 maskedCheckAVXIndexShadow(IRB,
I.getArgOperand(1), &
I);
4490 Shadow = IRB.
CreateBitCast(Shadow,
I.getArgOperand(0)->getType());
4492 {Shadow, I.getArgOperand(1)});
4495 setOriginForNaryOp(
I);
4500 void handleAVXVpermi2var(IntrinsicInst &
I) {
4505 [[maybe_unused]]
auto ArgVectorSize =
4508 ->getNumElements() == ArgVectorSize);
4510 ->getNumElements() == ArgVectorSize);
4511 assert(
I.getArgOperand(0)->getType() ==
I.getArgOperand(2)->getType());
4512 assert(
I.getType() ==
I.getArgOperand(0)->getType());
4513 assert(
I.getArgOperand(1)->getType()->isIntOrIntVectorTy());
4515 Value *AShadow = getShadow(&
I, 0);
4516 Value *Idx =
I.getArgOperand(1);
4517 Value *BShadow = getShadow(&
I, 2);
4519 maskedCheckAVXIndexShadow(IRB, Idx, &
I);
4523 AShadow = IRB.
CreateBitCast(AShadow,
I.getArgOperand(0)->getType());
4524 BShadow = IRB.
CreateBitCast(BShadow,
I.getArgOperand(2)->getType());
4526 {AShadow, Idx, BShadow});
4528 setOriginForNaryOp(
I);
4531 [[maybe_unused]]
static bool isFixedIntVectorTy(
const Type *
T) {
4535 [[maybe_unused]]
static bool isFixedFPVectorTy(
const Type *
T) {
4539 [[maybe_unused]]
static bool isFixedIntVector(
const Value *V) {
4540 return isFixedIntVectorTy(
V->getType());
4543 [[maybe_unused]]
static bool isFixedFPVector(
const Value *V) {
4544 return isFixedFPVectorTy(
V->getType());
4566 void handleAVX512VectorConvertFPToInt(IntrinsicInst &
I,
bool LastMask) {
4571 Value *WriteThrough;
4575 WriteThrough =
I.getOperand(2);
4576 Mask =
I.getOperand(3);
4579 WriteThrough =
I.getOperand(1);
4580 Mask =
I.getOperand(2);
4585 assert(isFixedIntVector(WriteThrough));
4587 unsigned ANumElements =
4589 [[maybe_unused]]
unsigned WriteThruNumElements =
4591 assert(ANumElements == WriteThruNumElements ||
4592 ANumElements * 2 == WriteThruNumElements);
4595 unsigned MaskNumElements =
Mask->getType()->getScalarSizeInBits();
4596 assert(ANumElements == MaskNumElements ||
4597 ANumElements * 2 == MaskNumElements);
4599 assert(WriteThruNumElements == MaskNumElements);
4603 insertCheckShadowOf(Mask, &
I);
4613 Value *AShadow = getShadow(
A);
4614 AShadow = maybeExtendVectorShadowWithZeros(AShadow,
I);
4616 if (ANumElements * 2 == MaskNumElements) {
4628 "_ms_mask_bitcast");
4638 getShadowTy(&
I),
"_ms_a_shadow");
4640 Value *WriteThroughShadow = getShadow(WriteThrough);
4642 "_ms_writethru_select");
4644 setShadow(&
I, Shadow);
4645 setOriginForNaryOp(
I);
4653 void handleBmiIntrinsic(IntrinsicInst &
I) {
4655 Type *ShadowTy = getShadowTy(&
I);
4658 Value *SMask = getShadow(&
I, 1);
4663 {getShadow(&I, 0), I.getOperand(1)});
4666 setOriginForNaryOp(
I);
4669 static SmallVector<int, 8> getPclmulMask(
unsigned Width,
bool OddElements) {
4670 SmallVector<int, 8>
Mask;
4671 for (
unsigned X = OddElements ? 1 : 0;
X < Width;
X += 2) {
4685 void handlePclmulIntrinsic(IntrinsicInst &
I) {
4690 "pclmul 3rd operand must be a constant");
4693 getPclmulMask(Width, Imm & 0x01));
4695 getPclmulMask(Width, Imm & 0x10));
4696 ShadowAndOriginCombiner SOC(
this, IRB);
4697 SOC.Add(Shuf0, getOrigin(&
I, 0));
4698 SOC.Add(Shuf1, getOrigin(&
I, 1));
4703 void handleUnarySdSsIntrinsic(IntrinsicInst &
I) {
4708 Value *Second = getShadow(&
I, 1);
4710 SmallVector<int, 16>
Mask;
4711 Mask.push_back(Width);
4712 for (
unsigned i = 1; i < Width; i++)
4716 setShadow(&
I, Shadow);
4717 setOriginForNaryOp(
I);
4720 void handleVtestIntrinsic(IntrinsicInst &
I) {
4722 Value *Shadow0 = getShadow(&
I, 0);
4723 Value *Shadow1 = getShadow(&
I, 1);
4729 setShadow(&
I, Shadow);
4730 setOriginForNaryOp(
I);
4733 void handleBinarySdSsIntrinsic(IntrinsicInst &
I) {
4738 Value *Second = getShadow(&
I, 1);
4741 SmallVector<int, 16>
Mask;
4742 Mask.push_back(Width);
4743 for (
unsigned i = 1; i < Width; i++)
4747 setShadow(&
I, Shadow);
4748 setOriginForNaryOp(
I);
4754 void handleRoundPdPsIntrinsic(IntrinsicInst &
I) {
4755 assert(
I.getArgOperand(0)->getType() ==
I.getType());
4760 ShadowAndOriginCombiner SC(
this, IRB);
4761 SC.Add(
I.getArgOperand(0));
4769 void handleAbsIntrinsic(IntrinsicInst &
I) {
4771 Value *Src =
I.getArgOperand(0);
4772 Value *IsIntMinPoison =
I.getArgOperand(1);
4774 assert(
I.getType()->isIntOrIntVectorTy());
4776 assert(Src->getType() ==
I.getType());
4782 Value *SrcShadow = getShadow(Src);
4786 Value *MinValVec = ConstantInt::get(Src->getType(), MinVal);
4789 Value *PoisonedShadow = getPoisonedShadow(Src);
4790 Value *PoisonedIfIntMinShadow =
4793 IRB.
CreateSelect(IsIntMinPoison, PoisonedIfIntMinShadow, SrcShadow);
4795 setShadow(&
I, Shadow);
4796 setOrigin(&
I, getOrigin(&
I, 0));
4799 void handleIsFpClass(IntrinsicInst &
I) {
4801 Value *Shadow = getShadow(&
I, 0);
4802 setShadow(&
I, IRB.
CreateICmpNE(Shadow, getCleanShadow(Shadow)));
4803 setOrigin(&
I, getOrigin(&
I, 0));
4806 void handleArithmeticWithOverflow(IntrinsicInst &
I) {
4808 Value *Shadow0 = getShadow(&
I, 0);
4809 Value *Shadow1 = getShadow(&
I, 1);
4812 IRB.
CreateICmpNE(ShadowElt0, getCleanShadow(ShadowElt0));
4818 setShadow(&
I, Shadow);
4819 setOriginForNaryOp(
I);
4825 Value *Shadow = getShadow(V);
4847 void handleAVX512VectorDownConvert(IntrinsicInst &
I) {
4852 Value *WriteThrough =
I.getOperand(1);
4856 assert(isFixedIntVector(WriteThrough));
4858 unsigned ANumElements =
4860 unsigned OutputNumElements =
4862 assert(ANumElements == OutputNumElements ||
4863 ANumElements * 2 == OutputNumElements);
4866 assert(
Mask->getType()->getScalarSizeInBits() == ANumElements);
4867 insertCheckShadowOf(Mask, &
I);
4878 if (ANumElements != OutputNumElements) {
4880 Mask = IRB.
CreateZExt(Mask, Type::getIntNTy(*MS.C, OutputNumElements),
4887 Value *AShadow = getShadow(
A);
4891 VectorType *ShadowType = maybeShrinkVectorShadowType(
A,
I);
4901 AShadow = IRB.
CreateTrunc(AShadow, ShadowType,
"_ms_trunc_shadow");
4902 AShadow = maybeExtendVectorShadowWithZeros(AShadow,
I);
4904 Value *WriteThroughShadow = getShadow(WriteThrough);
4907 setShadow(&
I, Shadow);
4908 setOriginForNaryOp(
I);
4935 void handleAVX512VectorGenericMaskedFP(IntrinsicInst &
I,
unsigned AIndex,
4936 unsigned WriteThruIndex,
4937 unsigned MaskIndex) {
4940 unsigned NumArgs =
I.arg_size();
4941 assert(AIndex < NumArgs);
4942 assert(WriteThruIndex < NumArgs);
4943 assert(MaskIndex < NumArgs);
4944 assert(AIndex != WriteThruIndex);
4945 assert(AIndex != MaskIndex);
4946 assert(WriteThruIndex != MaskIndex);
4948 Value *
A =
I.getOperand(AIndex);
4949 Value *WriteThru =
I.getOperand(WriteThruIndex);
4953 assert(isFixedFPVector(WriteThru));
4955 [[maybe_unused]]
unsigned ANumElements =
4957 unsigned OutputNumElements =
4959 assert(ANumElements == OutputNumElements);
4961 for (
unsigned i = 0; i < NumArgs; ++i) {
4962 if (i != AIndex && i != WriteThruIndex) {
4965 assert(
I.getOperand(i)->getType()->isIntegerTy());
4966 insertCheckShadowOf(
I.getOperand(i), &
I);
4971 if (
Mask->getType()->getScalarSizeInBits() == 8 && ANumElements < 8)
4973 assert(
Mask->getType()->getScalarSizeInBits() == ANumElements);
4980 Value *AShadow = getShadow(
A);
4986 Value *WriteThruShadow = getShadow(WriteThru);
4989 setShadow(&
I, Shadow);
4991 setOriginForNaryOp(
I);
5001 void visitGenericScalarHalfwordInst(IntrinsicInst &
I) {
5007 Value *WriteThrough =
I.getOperand(2);
5014 insertCheckShadowOf(Mask, &
I);
5018 unsigned NumElements =
5020 assert(NumElements == 8);
5021 assert(
A->getType() ==
B->getType());
5023 assert(
Mask->getType()->getPrimitiveSizeInBits() == NumElements);
5026 Value *ALowerShadow = extractLowerShadow(IRB,
A);
5027 Value *BLowerShadow = extractLowerShadow(IRB,
B);
5029 Value *ABLowerShadow = IRB.
CreateOr(ALowerShadow, BLowerShadow);
5031 Value *WriteThroughLowerShadow = extractLowerShadow(IRB, WriteThrough);
5038 Value *AShadow = getShadow(
A);
5039 Value *DstLowerShadow =
5040 IRB.
CreateSelect(MaskLower, ABLowerShadow, WriteThroughLowerShadow);
5042 AShadow, DstLowerShadow, ConstantInt::get(IRB.
getInt32Ty(), 0),
5045 setShadow(&
I, DstShadow);
5046 setOriginForNaryOp(
I);
5076 void handleAVXGF2P8Affine(IntrinsicInst &
I) {
5087 ->getScalarSizeInBits() == 8);
5089 assert(
A->getType() ==
X->getType());
5091 assert(
B->getType()->isIntegerTy());
5092 assert(
B->getType()->getScalarSizeInBits() == 8);
5094 assert(
I.getType() ==
A->getType());
5096 Value *AShadow = getShadow(
A);
5097 Value *XShadow = getShadow(
X);
5098 Value *BZeroShadow = getCleanShadow(
B);
5101 I.getType(),
I.getIntrinsicID(), {XShadow, AShadow, BZeroShadow});
5103 {X, AShadow, BZeroShadow});
5105 {XShadow, A, BZeroShadow});
5108 Value *BShadow = getShadow(
B);
5109 Value *BBroadcastShadow = getCleanShadow(AShadow);
5114 for (
unsigned i = 0; i < NumElements; i++)
5118 {AShadowXShadow, AShadowX, XShadowA, BBroadcastShadow}));
5119 setOriginForNaryOp(
I);
5133 void handleNEONVectorLoad(IntrinsicInst &
I,
bool WithLane) {
5134 unsigned int numArgs =
I.arg_size();
5137 assert(
I.getType()->isStructTy());
5147 assert(4 <= numArgs && numArgs <= 6);
5161 for (
unsigned int i = 0; i < numArgs - 2; i++)
5162 ShadowArgs.
push_back(getShadow(
I.getArgOperand(i)));
5165 Value *LaneNumber =
I.getArgOperand(numArgs - 2);
5169 insertCheckShadowOf(LaneNumber, &
I);
5172 Value *Src =
I.getArgOperand(numArgs - 1);
5173 assert(Src->getType()->isPointerTy() &&
"Source is not a pointer!");
5175 Type *SrcShadowTy = getShadowTy(Src);
5176 auto [SrcShadowPtr, SrcOriginPtr] =
5177 getShadowOriginPtr(Src, IRB, SrcShadowTy,
Align(1),
false);
5187 if (!MS.TrackOrigins)
5191 setOrigin(&
I, PtrSrcOrigin);
5208 void handleNEONVectorStoreIntrinsic(IntrinsicInst &
I,
bool useLane) {
5212 int numArgOperands =
I.arg_size();
5215 assert(numArgOperands >= 1);
5216 Value *Addr =
I.getArgOperand(numArgOperands - 1);
5218 int skipTrailingOperands = 1;
5221 insertCheckShadowOf(Addr, &
I);
5225 skipTrailingOperands++;
5226 assert(numArgOperands >=
static_cast<int>(skipTrailingOperands));
5228 I.getArgOperand(numArgOperands - skipTrailingOperands)->getType()));
5231 SmallVector<Value *, 8> ShadowArgs;
5233 for (
int i = 0; i < numArgOperands - skipTrailingOperands; i++) {
5235 Value *Shadow = getShadow(&
I, i);
5236 ShadowArgs.
append(1, Shadow);
5253 (numArgOperands - skipTrailingOperands));
5254 Type *OutputShadowTy = getShadowTy(OutputVectorTy);
5258 I.getArgOperand(numArgOperands - skipTrailingOperands));
5260 Value *OutputShadowPtr, *OutputOriginPtr;
5262 std::tie(OutputShadowPtr, OutputOriginPtr) = getShadowOriginPtr(
5263 Addr, IRB, OutputShadowTy,
Align(1),
true);
5264 ShadowArgs.
append(1, OutputShadowPtr);
5270 if (MS.TrackOrigins) {
5278 OriginCombiner
OC(
this, IRB);
5279 for (
int i = 0; i < numArgOperands - skipTrailingOperands; i++)
5280 OC.Add(
I.getArgOperand(i));
5282 const DataLayout &
DL =
F.getDataLayout();
5283 OC.DoneAndStoreOrigin(
DL.getTypeStoreSize(OutputVectorTy),
5310 void handleIntrinsicByApplyingToShadow(IntrinsicInst &
I,
5312 unsigned int trailingVerbatimArgs) {
5315 assert(trailingVerbatimArgs <
I.arg_size());
5317 SmallVector<Value *, 8> ShadowArgs;
5319 for (
unsigned int i = 0; i <
I.arg_size() - trailingVerbatimArgs; i++) {
5320 Value *Shadow = getShadow(&
I, i);
5328 for (
unsigned int i =
I.arg_size() - trailingVerbatimArgs; i <
I.arg_size();
5330 Value *Arg =
I.getArgOperand(i);
5336 Value *CombinedShadow = CI;
5339 for (
unsigned int i =
I.arg_size() - trailingVerbatimArgs; i <
I.arg_size();
5342 CreateShadowCast(IRB, getShadow(&
I, i), CombinedShadow->
getType());
5343 CombinedShadow = IRB.
CreateOr(Shadow, CombinedShadow,
"_msprop");
5348 setOriginForNaryOp(
I);
5354 void handleNEONVectorMultiplyIntrinsic(IntrinsicInst &
I) {
5360 bool maybeHandleCrossPlatformIntrinsic(IntrinsicInst &
I) {
5361 switch (
I.getIntrinsicID()) {
5362 case Intrinsic::uadd_with_overflow:
5363 case Intrinsic::sadd_with_overflow:
5364 case Intrinsic::usub_with_overflow:
5365 case Intrinsic::ssub_with_overflow:
5366 case Intrinsic::umul_with_overflow:
5367 case Intrinsic::smul_with_overflow:
5368 handleArithmeticWithOverflow(
I);
5370 case Intrinsic::abs:
5371 handleAbsIntrinsic(
I);
5373 case Intrinsic::bitreverse:
5374 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
5377 case Intrinsic::is_fpclass:
5380 case Intrinsic::lifetime_start:
5381 handleLifetimeStart(
I);
5383 case Intrinsic::launder_invariant_group:
5384 case Intrinsic::strip_invariant_group:
5385 handleInvariantGroup(
I);
5387 case Intrinsic::bswap:
5390 case Intrinsic::ctlz:
5391 case Intrinsic::cttz:
5392 handleCountLeadingTrailingZeros(
I);
5394 case Intrinsic::masked_compressstore:
5395 handleMaskedCompressStore(
I);
5397 case Intrinsic::masked_expandload:
5398 handleMaskedExpandLoad(
I);
5400 case Intrinsic::masked_gather:
5401 handleMaskedGather(
I);
5403 case Intrinsic::masked_scatter:
5404 handleMaskedScatter(
I);
5406 case Intrinsic::masked_store:
5407 handleMaskedStore(
I);
5409 case Intrinsic::masked_load:
5410 handleMaskedLoad(
I);
5412 case Intrinsic::vector_reduce_and:
5413 handleVectorReduceAndIntrinsic(
I);
5415 case Intrinsic::vector_reduce_or:
5416 handleVectorReduceOrIntrinsic(
I);
5419 case Intrinsic::vector_reduce_add:
5420 case Intrinsic::vector_reduce_xor:
5421 case Intrinsic::vector_reduce_mul:
5424 case Intrinsic::vector_reduce_smax:
5425 case Intrinsic::vector_reduce_smin:
5426 case Intrinsic::vector_reduce_umax:
5427 case Intrinsic::vector_reduce_umin:
5430 case Intrinsic::vector_reduce_fmax:
5431 case Intrinsic::vector_reduce_fmin:
5432 handleVectorReduceIntrinsic(
I,
false);
5435 case Intrinsic::vector_reduce_fadd:
5436 case Intrinsic::vector_reduce_fmul:
5437 handleVectorReduceWithStarterIntrinsic(
I);
5440 case Intrinsic::scmp:
5441 case Intrinsic::ucmp: {
5446 case Intrinsic::fshl:
5447 case Intrinsic::fshr:
5448 handleFunnelShift(
I);
5451 case Intrinsic::is_constant:
5453 setShadow(&
I, getCleanShadow(&
I));
5454 setOrigin(&
I, getCleanOrigin());
5464 bool maybeHandleX86SIMDIntrinsic(IntrinsicInst &
I) {
5465 switch (
I.getIntrinsicID()) {
5466 case Intrinsic::x86_sse_stmxcsr:
5469 case Intrinsic::x86_sse_ldmxcsr:
5476 case Intrinsic::x86_avx512_vcvtsd2usi64:
5477 case Intrinsic::x86_avx512_vcvtsd2usi32:
5478 case Intrinsic::x86_avx512_vcvtss2usi64:
5479 case Intrinsic::x86_avx512_vcvtss2usi32:
5480 case Intrinsic::x86_avx512_cvttss2usi64:
5481 case Intrinsic::x86_avx512_cvttss2usi:
5482 case Intrinsic::x86_avx512_cvttsd2usi64:
5483 case Intrinsic::x86_avx512_cvttsd2usi:
5484 case Intrinsic::x86_avx512_cvtusi2ss:
5485 case Intrinsic::x86_avx512_cvtusi642sd:
5486 case Intrinsic::x86_avx512_cvtusi642ss:
5487 handleSSEVectorConvertIntrinsic(
I, 1,
true);
5489 case Intrinsic::x86_sse2_cvtsd2si64:
5490 case Intrinsic::x86_sse2_cvtsd2si:
5491 case Intrinsic::x86_sse2_cvtsd2ss:
5492 case Intrinsic::x86_sse2_cvttsd2si64:
5493 case Intrinsic::x86_sse2_cvttsd2si:
5494 case Intrinsic::x86_sse_cvtss2si64:
5495 case Intrinsic::x86_sse_cvtss2si:
5496 case Intrinsic::x86_sse_cvttss2si64:
5497 case Intrinsic::x86_sse_cvttss2si:
5498 handleSSEVectorConvertIntrinsic(
I, 1);
5500 case Intrinsic::x86_sse_cvtps2pi:
5501 case Intrinsic::x86_sse_cvttps2pi:
5502 handleSSEVectorConvertIntrinsic(
I, 2);
5510 case Intrinsic::x86_vcvtps2ph_128:
5511 case Intrinsic::x86_vcvtps2ph_256: {
5512 handleSSEVectorConvertIntrinsicByProp(
I,
true);
5521 case Intrinsic::x86_avx512_mask_cvtps2dq_512:
5522 handleAVX512VectorConvertFPToInt(
I,
false);
5527 case Intrinsic::x86_sse2_cvtpd2ps:
5528 case Intrinsic::x86_sse2_cvtps2dq:
5529 case Intrinsic::x86_sse2_cvtpd2dq:
5530 case Intrinsic::x86_sse2_cvttps2dq:
5531 case Intrinsic::x86_sse2_cvttpd2dq:
5532 case Intrinsic::x86_avx_cvt_pd2_ps_256:
5533 case Intrinsic::x86_avx_cvt_ps2dq_256:
5534 case Intrinsic::x86_avx_cvt_pd2dq_256:
5535 case Intrinsic::x86_avx_cvtt_ps2dq_256:
5536 case Intrinsic::x86_avx_cvtt_pd2dq_256: {
5537 handleSSEVectorConvertIntrinsicByProp(
I,
false);
5548 case Intrinsic::x86_avx512_mask_vcvtps2ph_512:
5549 case Intrinsic::x86_avx512_mask_vcvtps2ph_256:
5550 case Intrinsic::x86_avx512_mask_vcvtps2ph_128:
5551 handleAVX512VectorConvertFPToInt(
I,
true);
5555 case Intrinsic::x86_avx512_psll_w_512:
5556 case Intrinsic::x86_avx512_psll_d_512:
5557 case Intrinsic::x86_avx512_psll_q_512:
5558 case Intrinsic::x86_avx512_pslli_w_512:
5559 case Intrinsic::x86_avx512_pslli_d_512:
5560 case Intrinsic::x86_avx512_pslli_q_512:
5561 case Intrinsic::x86_avx512_psrl_w_512:
5562 case Intrinsic::x86_avx512_psrl_d_512:
5563 case Intrinsic::x86_avx512_psrl_q_512:
5564 case Intrinsic::x86_avx512_psra_w_512:
5565 case Intrinsic::x86_avx512_psra_d_512:
5566 case Intrinsic::x86_avx512_psra_q_512:
5567 case Intrinsic::x86_avx512_psrli_w_512:
5568 case Intrinsic::x86_avx512_psrli_d_512:
5569 case Intrinsic::x86_avx512_psrli_q_512:
5570 case Intrinsic::x86_avx512_psrai_w_512:
5571 case Intrinsic::x86_avx512_psrai_d_512:
5572 case Intrinsic::x86_avx512_psrai_q_512:
5573 case Intrinsic::x86_avx512_psra_q_256:
5574 case Intrinsic::x86_avx512_psra_q_128:
5575 case Intrinsic::x86_avx512_psrai_q_256:
5576 case Intrinsic::x86_avx512_psrai_q_128:
5577 case Intrinsic::x86_avx2_psll_w:
5578 case Intrinsic::x86_avx2_psll_d:
5579 case Intrinsic::x86_avx2_psll_q:
5580 case Intrinsic::x86_avx2_pslli_w:
5581 case Intrinsic::x86_avx2_pslli_d:
5582 case Intrinsic::x86_avx2_pslli_q:
5583 case Intrinsic::x86_avx2_psrl_w:
5584 case Intrinsic::x86_avx2_psrl_d:
5585 case Intrinsic::x86_avx2_psrl_q:
5586 case Intrinsic::x86_avx2_psra_w:
5587 case Intrinsic::x86_avx2_psra_d:
5588 case Intrinsic::x86_avx2_psrli_w:
5589 case Intrinsic::x86_avx2_psrli_d:
5590 case Intrinsic::x86_avx2_psrli_q:
5591 case Intrinsic::x86_avx2_psrai_w:
5592 case Intrinsic::x86_avx2_psrai_d:
5593 case Intrinsic::x86_sse2_psll_w:
5594 case Intrinsic::x86_sse2_psll_d:
5595 case Intrinsic::x86_sse2_psll_q:
5596 case Intrinsic::x86_sse2_pslli_w:
5597 case Intrinsic::x86_sse2_pslli_d:
5598 case Intrinsic::x86_sse2_pslli_q:
5599 case Intrinsic::x86_sse2_psrl_w:
5600 case Intrinsic::x86_sse2_psrl_d:
5601 case Intrinsic::x86_sse2_psrl_q:
5602 case Intrinsic::x86_sse2_psra_w:
5603 case Intrinsic::x86_sse2_psra_d:
5604 case Intrinsic::x86_sse2_psrli_w:
5605 case Intrinsic::x86_sse2_psrli_d:
5606 case Intrinsic::x86_sse2_psrli_q:
5607 case Intrinsic::x86_sse2_psrai_w:
5608 case Intrinsic::x86_sse2_psrai_d:
5609 case Intrinsic::x86_mmx_psll_w:
5610 case Intrinsic::x86_mmx_psll_d:
5611 case Intrinsic::x86_mmx_psll_q:
5612 case Intrinsic::x86_mmx_pslli_w:
5613 case Intrinsic::x86_mmx_pslli_d:
5614 case Intrinsic::x86_mmx_pslli_q:
5615 case Intrinsic::x86_mmx_psrl_w:
5616 case Intrinsic::x86_mmx_psrl_d:
5617 case Intrinsic::x86_mmx_psrl_q:
5618 case Intrinsic::x86_mmx_psra_w:
5619 case Intrinsic::x86_mmx_psra_d:
5620 case Intrinsic::x86_mmx_psrli_w:
5621 case Intrinsic::x86_mmx_psrli_d:
5622 case Intrinsic::x86_mmx_psrli_q:
5623 case Intrinsic::x86_mmx_psrai_w:
5624 case Intrinsic::x86_mmx_psrai_d:
5625 handleVectorShiftIntrinsic(
I,
false);
5627 case Intrinsic::x86_avx2_psllv_d:
5628 case Intrinsic::x86_avx2_psllv_d_256:
5629 case Intrinsic::x86_avx512_psllv_d_512:
5630 case Intrinsic::x86_avx2_psllv_q:
5631 case Intrinsic::x86_avx2_psllv_q_256:
5632 case Intrinsic::x86_avx512_psllv_q_512:
5633 case Intrinsic::x86_avx2_psrlv_d:
5634 case Intrinsic::x86_avx2_psrlv_d_256:
5635 case Intrinsic::x86_avx512_psrlv_d_512:
5636 case Intrinsic::x86_avx2_psrlv_q:
5637 case Intrinsic::x86_avx2_psrlv_q_256:
5638 case Intrinsic::x86_avx512_psrlv_q_512:
5639 case Intrinsic::x86_avx2_psrav_d:
5640 case Intrinsic::x86_avx2_psrav_d_256:
5641 case Intrinsic::x86_avx512_psrav_d_512:
5642 case Intrinsic::x86_avx512_psrav_q_128:
5643 case Intrinsic::x86_avx512_psrav_q_256:
5644 case Intrinsic::x86_avx512_psrav_q_512:
5645 handleVectorShiftIntrinsic(
I,
true);
5649 case Intrinsic::x86_sse2_packsswb_128:
5650 case Intrinsic::x86_sse2_packssdw_128:
5651 case Intrinsic::x86_sse2_packuswb_128:
5652 case Intrinsic::x86_sse41_packusdw:
5653 case Intrinsic::x86_avx2_packsswb:
5654 case Intrinsic::x86_avx2_packssdw:
5655 case Intrinsic::x86_avx2_packuswb:
5656 case Intrinsic::x86_avx2_packusdw:
5662 case Intrinsic::x86_avx512_packsswb_512:
5663 case Intrinsic::x86_avx512_packssdw_512:
5664 case Intrinsic::x86_avx512_packuswb_512:
5665 case Intrinsic::x86_avx512_packusdw_512:
5666 handleVectorPackIntrinsic(
I);
5669 case Intrinsic::x86_sse41_pblendvb:
5670 case Intrinsic::x86_sse41_blendvpd:
5671 case Intrinsic::x86_sse41_blendvps:
5672 case Intrinsic::x86_avx_blendv_pd_256:
5673 case Intrinsic::x86_avx_blendv_ps_256:
5674 case Intrinsic::x86_avx2_pblendvb:
5675 handleBlendvIntrinsic(
I);
5678 case Intrinsic::x86_avx_dp_ps_256:
5679 case Intrinsic::x86_sse41_dppd:
5680 case Intrinsic::x86_sse41_dpps:
5681 handleDppIntrinsic(
I);
5684 case Intrinsic::x86_mmx_packsswb:
5685 case Intrinsic::x86_mmx_packuswb:
5686 handleVectorPackIntrinsic(
I, 16);
5689 case Intrinsic::x86_mmx_packssdw:
5690 handleVectorPackIntrinsic(
I, 32);
5693 case Intrinsic::x86_mmx_psad_bw:
5694 handleVectorSadIntrinsic(
I,
true);
5696 case Intrinsic::x86_sse2_psad_bw:
5697 case Intrinsic::x86_avx2_psad_bw:
5698 handleVectorSadIntrinsic(
I);
5724 case Intrinsic::x86_sse2_pmadd_wd:
5725 case Intrinsic::x86_avx2_pmadd_wd:
5726 case Intrinsic::x86_avx512_pmaddw_d_512:
5727 case Intrinsic::x86_ssse3_pmadd_ub_sw_128:
5728 case Intrinsic::x86_avx2_pmadd_ub_sw:
5729 case Intrinsic::x86_avx512_pmaddubs_w_512:
5730 handleVectorPmaddIntrinsic(
I, 2);
5734 case Intrinsic::x86_ssse3_pmadd_ub_sw:
5735 handleVectorPmaddIntrinsic(
I, 2, 8);
5739 case Intrinsic::x86_mmx_pmadd_wd:
5740 handleVectorPmaddIntrinsic(
I, 2, 16);
5802 case Intrinsic::x86_avx512_vpdpbusd_128:
5803 case Intrinsic::x86_avx512_vpdpbusd_256:
5804 case Intrinsic::x86_avx512_vpdpbusd_512:
5805 case Intrinsic::x86_avx512_vpdpbusds_128:
5806 case Intrinsic::x86_avx512_vpdpbusds_256:
5807 case Intrinsic::x86_avx512_vpdpbusds_512:
5808 case Intrinsic::x86_avx2_vpdpbssd_128:
5809 case Intrinsic::x86_avx2_vpdpbssd_256:
5810 case Intrinsic::x86_avx10_vpdpbssd_512:
5811 case Intrinsic::x86_avx2_vpdpbssds_128:
5812 case Intrinsic::x86_avx2_vpdpbssds_256:
5813 case Intrinsic::x86_avx10_vpdpbssds_512:
5814 case Intrinsic::x86_avx2_vpdpbsud_128:
5815 case Intrinsic::x86_avx2_vpdpbsud_256:
5816 case Intrinsic::x86_avx10_vpdpbsud_512:
5817 case Intrinsic::x86_avx2_vpdpbsuds_128:
5818 case Intrinsic::x86_avx2_vpdpbsuds_256:
5819 case Intrinsic::x86_avx10_vpdpbsuds_512:
5820 case Intrinsic::x86_avx2_vpdpbuud_128:
5821 case Intrinsic::x86_avx2_vpdpbuud_256:
5822 case Intrinsic::x86_avx10_vpdpbuud_512:
5823 case Intrinsic::x86_avx2_vpdpbuuds_128:
5824 case Intrinsic::x86_avx2_vpdpbuuds_256:
5825 case Intrinsic::x86_avx10_vpdpbuuds_512:
5826 handleVectorPmaddIntrinsic(
I, 4, 8);
5873 case Intrinsic::x86_avx512_vpdpwssd_128:
5874 case Intrinsic::x86_avx512_vpdpwssd_256:
5875 case Intrinsic::x86_avx512_vpdpwssd_512:
5876 case Intrinsic::x86_avx512_vpdpwssds_128:
5877 case Intrinsic::x86_avx512_vpdpwssds_256:
5878 case Intrinsic::x86_avx512_vpdpwssds_512:
5879 handleVectorPmaddIntrinsic(
I, 2, 16);
5892 case Intrinsic::x86_sse_cmp_ss:
5893 case Intrinsic::x86_sse2_cmp_sd:
5894 case Intrinsic::x86_sse_comieq_ss:
5895 case Intrinsic::x86_sse_comilt_ss:
5896 case Intrinsic::x86_sse_comile_ss:
5897 case Intrinsic::x86_sse_comigt_ss:
5898 case Intrinsic::x86_sse_comige_ss:
5899 case Intrinsic::x86_sse_comineq_ss:
5900 case Intrinsic::x86_sse_ucomieq_ss:
5901 case Intrinsic::x86_sse_ucomilt_ss:
5902 case Intrinsic::x86_sse_ucomile_ss:
5903 case Intrinsic::x86_sse_ucomigt_ss:
5904 case Intrinsic::x86_sse_ucomige_ss:
5905 case Intrinsic::x86_sse_ucomineq_ss:
5906 case Intrinsic::x86_sse2_comieq_sd:
5907 case Intrinsic::x86_sse2_comilt_sd:
5908 case Intrinsic::x86_sse2_comile_sd:
5909 case Intrinsic::x86_sse2_comigt_sd:
5910 case Intrinsic::x86_sse2_comige_sd:
5911 case Intrinsic::x86_sse2_comineq_sd:
5912 case Intrinsic::x86_sse2_ucomieq_sd:
5913 case Intrinsic::x86_sse2_ucomilt_sd:
5914 case Intrinsic::x86_sse2_ucomile_sd:
5915 case Intrinsic::x86_sse2_ucomigt_sd:
5916 case Intrinsic::x86_sse2_ucomige_sd:
5917 case Intrinsic::x86_sse2_ucomineq_sd:
5918 handleVectorCompareScalarIntrinsic(
I);
5921 case Intrinsic::x86_avx_cmp_pd_256:
5922 case Intrinsic::x86_avx_cmp_ps_256:
5923 case Intrinsic::x86_sse2_cmp_pd:
5924 case Intrinsic::x86_sse_cmp_ps:
5925 handleVectorComparePackedIntrinsic(
I);
5928 case Intrinsic::x86_bmi_bextr_32:
5929 case Intrinsic::x86_bmi_bextr_64:
5930 case Intrinsic::x86_bmi_bzhi_32:
5931 case Intrinsic::x86_bmi_bzhi_64:
5932 case Intrinsic::x86_bmi_pdep_32:
5933 case Intrinsic::x86_bmi_pdep_64:
5934 case Intrinsic::x86_bmi_pext_32:
5935 case Intrinsic::x86_bmi_pext_64:
5936 handleBmiIntrinsic(
I);
5939 case Intrinsic::x86_pclmulqdq:
5940 case Intrinsic::x86_pclmulqdq_256:
5941 case Intrinsic::x86_pclmulqdq_512:
5942 handlePclmulIntrinsic(
I);
5945 case Intrinsic::x86_avx_round_pd_256:
5946 case Intrinsic::x86_avx_round_ps_256:
5947 case Intrinsic::x86_sse41_round_pd:
5948 case Intrinsic::x86_sse41_round_ps:
5949 handleRoundPdPsIntrinsic(
I);
5952 case Intrinsic::x86_sse41_round_sd:
5953 case Intrinsic::x86_sse41_round_ss:
5954 handleUnarySdSsIntrinsic(
I);
5957 case Intrinsic::x86_sse2_max_sd:
5958 case Intrinsic::x86_sse_max_ss:
5959 case Intrinsic::x86_sse2_min_sd:
5960 case Intrinsic::x86_sse_min_ss:
5961 handleBinarySdSsIntrinsic(
I);
5964 case Intrinsic::x86_avx_vtestc_pd:
5965 case Intrinsic::x86_avx_vtestc_pd_256:
5966 case Intrinsic::x86_avx_vtestc_ps:
5967 case Intrinsic::x86_avx_vtestc_ps_256:
5968 case Intrinsic::x86_avx_vtestnzc_pd:
5969 case Intrinsic::x86_avx_vtestnzc_pd_256:
5970 case Intrinsic::x86_avx_vtestnzc_ps:
5971 case Intrinsic::x86_avx_vtestnzc_ps_256:
5972 case Intrinsic::x86_avx_vtestz_pd:
5973 case Intrinsic::x86_avx_vtestz_pd_256:
5974 case Intrinsic::x86_avx_vtestz_ps:
5975 case Intrinsic::x86_avx_vtestz_ps_256:
5976 case Intrinsic::x86_avx_ptestc_256:
5977 case Intrinsic::x86_avx_ptestnzc_256:
5978 case Intrinsic::x86_avx_ptestz_256:
5979 case Intrinsic::x86_sse41_ptestc:
5980 case Intrinsic::x86_sse41_ptestnzc:
5981 case Intrinsic::x86_sse41_ptestz:
5982 handleVtestIntrinsic(
I);
5986 case Intrinsic::x86_ssse3_phadd_w:
5987 case Intrinsic::x86_ssse3_phadd_w_128:
5988 case Intrinsic::x86_avx2_phadd_w:
5989 case Intrinsic::x86_ssse3_phsub_w:
5990 case Intrinsic::x86_ssse3_phsub_w_128:
5991 case Intrinsic::x86_avx2_phsub_w: {
5992 handlePairwiseShadowOrIntrinsic(
I, 16);
5997 case Intrinsic::x86_ssse3_phadd_d:
5998 case Intrinsic::x86_ssse3_phadd_d_128:
5999 case Intrinsic::x86_avx2_phadd_d:
6000 case Intrinsic::x86_ssse3_phsub_d:
6001 case Intrinsic::x86_ssse3_phsub_d_128:
6002 case Intrinsic::x86_avx2_phsub_d: {
6003 handlePairwiseShadowOrIntrinsic(
I, 32);
6008 case Intrinsic::x86_ssse3_phadd_sw:
6009 case Intrinsic::x86_ssse3_phadd_sw_128:
6010 case Intrinsic::x86_avx2_phadd_sw:
6011 case Intrinsic::x86_ssse3_phsub_sw:
6012 case Intrinsic::x86_ssse3_phsub_sw_128:
6013 case Intrinsic::x86_avx2_phsub_sw: {
6014 handlePairwiseShadowOrIntrinsic(
I, 16);
6019 case Intrinsic::x86_sse3_hadd_ps:
6020 case Intrinsic::x86_sse3_hadd_pd:
6021 case Intrinsic::x86_avx_hadd_pd_256:
6022 case Intrinsic::x86_avx_hadd_ps_256:
6023 case Intrinsic::x86_sse3_hsub_ps:
6024 case Intrinsic::x86_sse3_hsub_pd:
6025 case Intrinsic::x86_avx_hsub_pd_256:
6026 case Intrinsic::x86_avx_hsub_ps_256: {
6027 handlePairwiseShadowOrIntrinsic(
I);
6031 case Intrinsic::x86_avx_maskstore_ps:
6032 case Intrinsic::x86_avx_maskstore_pd:
6033 case Intrinsic::x86_avx_maskstore_ps_256:
6034 case Intrinsic::x86_avx_maskstore_pd_256:
6035 case Intrinsic::x86_avx2_maskstore_d:
6036 case Intrinsic::x86_avx2_maskstore_q:
6037 case Intrinsic::x86_avx2_maskstore_d_256:
6038 case Intrinsic::x86_avx2_maskstore_q_256: {
6039 handleAVXMaskedStore(
I);
6043 case Intrinsic::x86_avx_maskload_ps:
6044 case Intrinsic::x86_avx_maskload_pd:
6045 case Intrinsic::x86_avx_maskload_ps_256:
6046 case Intrinsic::x86_avx_maskload_pd_256:
6047 case Intrinsic::x86_avx2_maskload_d:
6048 case Intrinsic::x86_avx2_maskload_q:
6049 case Intrinsic::x86_avx2_maskload_d_256:
6050 case Intrinsic::x86_avx2_maskload_q_256: {
6051 handleAVXMaskedLoad(
I);
6056 case Intrinsic::x86_avx512fp16_add_ph_512:
6057 case Intrinsic::x86_avx512fp16_sub_ph_512:
6058 case Intrinsic::x86_avx512fp16_mul_ph_512:
6059 case Intrinsic::x86_avx512fp16_div_ph_512:
6060 case Intrinsic::x86_avx512fp16_max_ph_512:
6061 case Intrinsic::x86_avx512fp16_min_ph_512:
6062 case Intrinsic::x86_avx512_min_ps_512:
6063 case Intrinsic::x86_avx512_min_pd_512:
6064 case Intrinsic::x86_avx512_max_ps_512:
6065 case Intrinsic::x86_avx512_max_pd_512: {
6070 [[maybe_unused]]
bool Success =
6071 maybeHandleSimpleNomemIntrinsic(
I, 1);
6076 case Intrinsic::x86_avx_vpermilvar_pd:
6077 case Intrinsic::x86_avx_vpermilvar_pd_256:
6078 case Intrinsic::x86_avx512_vpermilvar_pd_512:
6079 case Intrinsic::x86_avx_vpermilvar_ps:
6080 case Intrinsic::x86_avx_vpermilvar_ps_256:
6081 case Intrinsic::x86_avx512_vpermilvar_ps_512: {
6082 handleAVXVpermilvar(
I);
6086 case Intrinsic::x86_avx512_vpermi2var_d_128:
6087 case Intrinsic::x86_avx512_vpermi2var_d_256:
6088 case Intrinsic::x86_avx512_vpermi2var_d_512:
6089 case Intrinsic::x86_avx512_vpermi2var_hi_128:
6090 case Intrinsic::x86_avx512_vpermi2var_hi_256:
6091 case Intrinsic::x86_avx512_vpermi2var_hi_512:
6092 case Intrinsic::x86_avx512_vpermi2var_pd_128:
6093 case Intrinsic::x86_avx512_vpermi2var_pd_256:
6094 case Intrinsic::x86_avx512_vpermi2var_pd_512:
6095 case Intrinsic::x86_avx512_vpermi2var_ps_128:
6096 case Intrinsic::x86_avx512_vpermi2var_ps_256:
6097 case Intrinsic::x86_avx512_vpermi2var_ps_512:
6098 case Intrinsic::x86_avx512_vpermi2var_q_128:
6099 case Intrinsic::x86_avx512_vpermi2var_q_256:
6100 case Intrinsic::x86_avx512_vpermi2var_q_512:
6101 case Intrinsic::x86_avx512_vpermi2var_qi_128:
6102 case Intrinsic::x86_avx512_vpermi2var_qi_256:
6103 case Intrinsic::x86_avx512_vpermi2var_qi_512:
6104 handleAVXVpermi2var(
I);
6118 case Intrinsic::x86_avx2_pshuf_b:
6119 case Intrinsic::x86_sse_pshuf_w:
6120 case Intrinsic::x86_ssse3_pshuf_b_128:
6121 case Intrinsic::x86_ssse3_pshuf_b:
6122 case Intrinsic::x86_avx512_pshuf_b_512:
6123 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
6129 case Intrinsic::x86_avx512_mask_pmov_dw_512:
6130 case Intrinsic::x86_avx512_mask_pmov_db_512:
6131 case Intrinsic::x86_avx512_mask_pmov_qb_512:
6132 case Intrinsic::x86_avx512_mask_pmov_qw_512: {
6135 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
6143 case Intrinsic::x86_avx512_mask_pmovs_dw_512:
6144 case Intrinsic::x86_avx512_mask_pmovus_dw_512: {
6145 handleIntrinsicByApplyingToShadow(
I,
6146 Intrinsic::x86_avx512_mask_pmov_dw_512,
6151 case Intrinsic::x86_avx512_mask_pmovs_db_512:
6152 case Intrinsic::x86_avx512_mask_pmovus_db_512: {
6153 handleIntrinsicByApplyingToShadow(
I,
6154 Intrinsic::x86_avx512_mask_pmov_db_512,
6159 case Intrinsic::x86_avx512_mask_pmovs_qb_512:
6160 case Intrinsic::x86_avx512_mask_pmovus_qb_512: {
6161 handleIntrinsicByApplyingToShadow(
I,
6162 Intrinsic::x86_avx512_mask_pmov_qb_512,
6167 case Intrinsic::x86_avx512_mask_pmovs_qw_512:
6168 case Intrinsic::x86_avx512_mask_pmovus_qw_512: {
6169 handleIntrinsicByApplyingToShadow(
I,
6170 Intrinsic::x86_avx512_mask_pmov_qw_512,
6175 case Intrinsic::x86_avx512_mask_pmovs_qd_512:
6176 case Intrinsic::x86_avx512_mask_pmovus_qd_512:
6177 case Intrinsic::x86_avx512_mask_pmovs_wb_512:
6178 case Intrinsic::x86_avx512_mask_pmovus_wb_512: {
6182 handleAVX512VectorDownConvert(
I);
6222 case Intrinsic::x86_avx512_rsqrt14_ps_512:
6223 case Intrinsic::x86_avx512_rsqrt14_ps_256:
6224 case Intrinsic::x86_avx512_rsqrt14_ps_128:
6225 case Intrinsic::x86_avx512_rsqrt14_pd_512:
6226 case Intrinsic::x86_avx512_rsqrt14_pd_256:
6227 case Intrinsic::x86_avx512_rsqrt14_pd_128:
6228 case Intrinsic::x86_avx10_mask_rsqrt_bf16_512:
6229 case Intrinsic::x86_avx10_mask_rsqrt_bf16_256:
6230 case Intrinsic::x86_avx10_mask_rsqrt_bf16_128:
6231 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_512:
6232 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_256:
6233 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_128:
6234 handleAVX512VectorGenericMaskedFP(
I, 0, 1,
6274 case Intrinsic::x86_avx512_rcp14_ps_512:
6275 case Intrinsic::x86_avx512_rcp14_ps_256:
6276 case Intrinsic::x86_avx512_rcp14_ps_128:
6277 case Intrinsic::x86_avx512_rcp14_pd_512:
6278 case Intrinsic::x86_avx512_rcp14_pd_256:
6279 case Intrinsic::x86_avx512_rcp14_pd_128:
6280 case Intrinsic::x86_avx10_mask_rcp_bf16_512:
6281 case Intrinsic::x86_avx10_mask_rcp_bf16_256:
6282 case Intrinsic::x86_avx10_mask_rcp_bf16_128:
6283 case Intrinsic::x86_avx512fp16_mask_rcp_ph_512:
6284 case Intrinsic::x86_avx512fp16_mask_rcp_ph_256:
6285 case Intrinsic::x86_avx512fp16_mask_rcp_ph_128:
6286 handleAVX512VectorGenericMaskedFP(
I, 0, 1,
6330 case Intrinsic::x86_avx512fp16_mask_rndscale_ph_512:
6331 case Intrinsic::x86_avx512fp16_mask_rndscale_ph_256:
6332 case Intrinsic::x86_avx512fp16_mask_rndscale_ph_128:
6333 case Intrinsic::x86_avx512_mask_rndscale_ps_512:
6334 case Intrinsic::x86_avx512_mask_rndscale_ps_256:
6335 case Intrinsic::x86_avx512_mask_rndscale_ps_128:
6336 case Intrinsic::x86_avx512_mask_rndscale_pd_512:
6337 case Intrinsic::x86_avx512_mask_rndscale_pd_256:
6338 case Intrinsic::x86_avx512_mask_rndscale_pd_128:
6339 case Intrinsic::x86_avx10_mask_rndscale_bf16_512:
6340 case Intrinsic::x86_avx10_mask_rndscale_bf16_256:
6341 case Intrinsic::x86_avx10_mask_rndscale_bf16_128:
6342 handleAVX512VectorGenericMaskedFP(
I, 0, 2,
6347 case Intrinsic::x86_avx512fp16_mask_add_sh_round:
6348 case Intrinsic::x86_avx512fp16_mask_sub_sh_round:
6349 case Intrinsic::x86_avx512fp16_mask_mul_sh_round:
6350 case Intrinsic::x86_avx512fp16_mask_div_sh_round:
6351 case Intrinsic::x86_avx512fp16_mask_max_sh_round:
6352 case Intrinsic::x86_avx512fp16_mask_min_sh_round: {
6353 visitGenericScalarHalfwordInst(
I);
6358 case Intrinsic::x86_vgf2p8affineqb_128:
6359 case Intrinsic::x86_vgf2p8affineqb_256:
6360 case Intrinsic::x86_vgf2p8affineqb_512:
6361 handleAVXGF2P8Affine(
I);
6371 bool maybeHandleArmSIMDIntrinsic(IntrinsicInst &
I) {
6372 switch (
I.getIntrinsicID()) {
6373 case Intrinsic::aarch64_neon_rshrn:
6374 case Intrinsic::aarch64_neon_sqrshl:
6375 case Intrinsic::aarch64_neon_sqrshrn:
6376 case Intrinsic::aarch64_neon_sqrshrun:
6377 case Intrinsic::aarch64_neon_sqshl:
6378 case Intrinsic::aarch64_neon_sqshlu:
6379 case Intrinsic::aarch64_neon_sqshrn:
6380 case Intrinsic::aarch64_neon_sqshrun:
6381 case Intrinsic::aarch64_neon_srshl:
6382 case Intrinsic::aarch64_neon_sshl:
6383 case Intrinsic::aarch64_neon_uqrshl:
6384 case Intrinsic::aarch64_neon_uqrshrn:
6385 case Intrinsic::aarch64_neon_uqshl:
6386 case Intrinsic::aarch64_neon_uqshrn:
6387 case Intrinsic::aarch64_neon_urshl:
6388 case Intrinsic::aarch64_neon_ushl:
6390 handleVectorShiftIntrinsic(
I,
false);
6395 case Intrinsic::aarch64_neon_fmaxp:
6396 case Intrinsic::aarch64_neon_fminp:
6398 case Intrinsic::aarch64_neon_fmaxnmp:
6399 case Intrinsic::aarch64_neon_fminnmp:
6401 case Intrinsic::aarch64_neon_smaxp:
6402 case Intrinsic::aarch64_neon_sminp:
6403 case Intrinsic::aarch64_neon_umaxp:
6404 case Intrinsic::aarch64_neon_uminp:
6406 case Intrinsic::aarch64_neon_addp:
6408 case Intrinsic::aarch64_neon_faddp:
6410 case Intrinsic::aarch64_neon_saddlp:
6411 case Intrinsic::aarch64_neon_uaddlp: {
6412 handlePairwiseShadowOrIntrinsic(
I);
6417 case Intrinsic::aarch64_neon_fcvtas:
6418 case Intrinsic::aarch64_neon_fcvtau:
6420 case Intrinsic::aarch64_neon_fcvtms:
6421 case Intrinsic::aarch64_neon_fcvtmu:
6423 case Intrinsic::aarch64_neon_fcvtns:
6424 case Intrinsic::aarch64_neon_fcvtnu:
6426 case Intrinsic::aarch64_neon_fcvtps:
6427 case Intrinsic::aarch64_neon_fcvtpu:
6429 case Intrinsic::aarch64_neon_fcvtzs:
6430 case Intrinsic::aarch64_neon_fcvtzu:
6432 case Intrinsic::aarch64_neon_fcvtxn: {
6433 handleNEONVectorConvertIntrinsic(
I);
6438 case Intrinsic::aarch64_neon_faddv:
6439 case Intrinsic::aarch64_neon_saddv:
6440 case Intrinsic::aarch64_neon_uaddv:
6443 case Intrinsic::aarch64_neon_smaxv:
6444 case Intrinsic::aarch64_neon_sminv:
6445 case Intrinsic::aarch64_neon_umaxv:
6446 case Intrinsic::aarch64_neon_uminv:
6450 case Intrinsic::aarch64_neon_fmaxv:
6451 case Intrinsic::aarch64_neon_fminv:
6452 case Intrinsic::aarch64_neon_fmaxnmv:
6453 case Intrinsic::aarch64_neon_fminnmv:
6455 case Intrinsic::aarch64_neon_saddlv:
6456 case Intrinsic::aarch64_neon_uaddlv:
6457 handleVectorReduceIntrinsic(
I,
true);
6460 case Intrinsic::aarch64_neon_ld1x2:
6461 case Intrinsic::aarch64_neon_ld1x3:
6462 case Intrinsic::aarch64_neon_ld1x4:
6463 case Intrinsic::aarch64_neon_ld2:
6464 case Intrinsic::aarch64_neon_ld3:
6465 case Intrinsic::aarch64_neon_ld4:
6466 case Intrinsic::aarch64_neon_ld2r:
6467 case Intrinsic::aarch64_neon_ld3r:
6468 case Intrinsic::aarch64_neon_ld4r: {
6469 handleNEONVectorLoad(
I,
false);
6473 case Intrinsic::aarch64_neon_ld2lane:
6474 case Intrinsic::aarch64_neon_ld3lane:
6475 case Intrinsic::aarch64_neon_ld4lane: {
6476 handleNEONVectorLoad(
I,
true);
6481 case Intrinsic::aarch64_neon_sqxtn:
6482 case Intrinsic::aarch64_neon_sqxtun:
6483 case Intrinsic::aarch64_neon_uqxtn:
6490 case Intrinsic::aarch64_neon_st1x2:
6491 case Intrinsic::aarch64_neon_st1x3:
6492 case Intrinsic::aarch64_neon_st1x4:
6493 case Intrinsic::aarch64_neon_st2:
6494 case Intrinsic::aarch64_neon_st3:
6495 case Intrinsic::aarch64_neon_st4: {
6496 handleNEONVectorStoreIntrinsic(
I,
false);
6500 case Intrinsic::aarch64_neon_st2lane:
6501 case Intrinsic::aarch64_neon_st3lane:
6502 case Intrinsic::aarch64_neon_st4lane: {
6503 handleNEONVectorStoreIntrinsic(
I,
true);
6516 case Intrinsic::aarch64_neon_tbl1:
6517 case Intrinsic::aarch64_neon_tbl2:
6518 case Intrinsic::aarch64_neon_tbl3:
6519 case Intrinsic::aarch64_neon_tbl4:
6520 case Intrinsic::aarch64_neon_tbx1:
6521 case Intrinsic::aarch64_neon_tbx2:
6522 case Intrinsic::aarch64_neon_tbx3:
6523 case Intrinsic::aarch64_neon_tbx4: {
6525 handleIntrinsicByApplyingToShadow(
6526 I,
I.getIntrinsicID(),
6531 case Intrinsic::aarch64_neon_fmulx:
6532 case Intrinsic::aarch64_neon_pmul:
6533 case Intrinsic::aarch64_neon_pmull:
6534 case Intrinsic::aarch64_neon_smull:
6535 case Intrinsic::aarch64_neon_pmull64:
6536 case Intrinsic::aarch64_neon_umull: {
6537 handleNEONVectorMultiplyIntrinsic(
I);
6548 void visitIntrinsicInst(IntrinsicInst &
I) {
6549 if (maybeHandleCrossPlatformIntrinsic(
I))
6552 if (maybeHandleX86SIMDIntrinsic(
I))
6555 if (maybeHandleArmSIMDIntrinsic(
I))
6558 if (maybeHandleUnknownIntrinsic(
I))
6561 visitInstruction(
I);
6564 void visitLibAtomicLoad(CallBase &CB) {
6575 Value *NewOrdering =
6579 NextNodeIRBuilder NextIRB(&CB);
6580 Value *SrcShadowPtr, *SrcOriginPtr;
6581 std::tie(SrcShadowPtr, SrcOriginPtr) =
6582 getShadowOriginPtr(SrcPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
6584 Value *DstShadowPtr =
6585 getShadowOriginPtr(DstPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
6589 NextIRB.CreateMemCpy(DstShadowPtr,
Align(1), SrcShadowPtr,
Align(1),
Size);
6590 if (MS.TrackOrigins) {
6591 Value *SrcOrigin = NextIRB.CreateAlignedLoad(MS.OriginTy, SrcOriginPtr,
6593 Value *NewOrigin = updateOrigin(SrcOrigin, NextIRB);
6594 NextIRB.CreateCall(MS.MsanSetOriginFn, {DstPtr, Size, NewOrigin});
6598 void visitLibAtomicStore(CallBase &CB) {
6605 Value *NewOrdering =
6609 Value *DstShadowPtr =
6619 void visitCallBase(CallBase &CB) {
6627 visitAsmInstruction(CB);
6629 visitInstruction(CB);
6638 case LibFunc_atomic_load:
6640 llvm::errs() <<
"MSAN -- cannot instrument invoke of libatomic load."
6644 visitLibAtomicLoad(CB);
6646 case LibFunc_atomic_store:
6647 visitLibAtomicStore(CB);
6663 B.addAttribute(Attribute::Memory).addAttribute(Attribute::Speculatable);
6667 Func->removeFnAttrs(
B);
6673 bool MayCheckCall = MS.EagerChecks;
6677 MayCheckCall &= !
Func->getName().starts_with(
"__sanitizer_unaligned_");
6680 unsigned ArgOffset = 0;
6683 if (!
A->getType()->isSized()) {
6684 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is not sized: " << CB <<
"\n");
6688 if (
A->getType()->isScalableTy()) {
6689 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is vscale: " << CB <<
"\n");
6691 insertCheckShadowOf(
A, &CB);
6696 const DataLayout &
DL =
F.getDataLayout();
6700 bool EagerCheck = MayCheckCall && !ByVal && NoUndef;
6703 insertCheckShadowOf(
A, &CB);
6704 Size =
DL.getTypeAllocSize(
A->getType());
6710 Value *ArgShadow = getShadow(
A);
6711 Value *ArgShadowBase = getShadowPtrForArgument(IRB, ArgOffset);
6713 <<
" Shadow: " << *ArgShadow <<
"\n");
6717 assert(
A->getType()->isPointerTy() &&
6718 "ByVal argument is not a pointer!");
6723 MaybeAlign Alignment = std::nullopt;
6726 Value *AShadowPtr, *AOriginPtr;
6727 std::tie(AShadowPtr, AOriginPtr) =
6728 getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(), Alignment,
6730 if (!PropagateShadow) {
6737 if (MS.TrackOrigins) {
6738 Value *ArgOriginBase = getOriginPtrForArgument(IRB, ArgOffset);
6752 Size =
DL.getTypeAllocSize(
A->getType());
6758 if (MS.TrackOrigins && !(Cst && Cst->
isNullValue())) {
6760 getOriginPtrForArgument(IRB, ArgOffset));
6763 assert(Store !=
nullptr);
6772 if (FT->isVarArg()) {
6773 VAHelper->visitCallBase(CB, IRB);
6783 if (MayCheckCall && CB.
hasRetAttr(Attribute::NoUndef)) {
6784 setShadow(&CB, getCleanShadow(&CB));
6785 setOrigin(&CB, getCleanOrigin());
6791 Value *
Base = getShadowPtrForRetval(IRBBefore);
6792 IRBBefore.CreateAlignedStore(getCleanShadow(&CB),
Base,
6804 setShadow(&CB, getCleanShadow(&CB));
6805 setOrigin(&CB, getCleanOrigin());
6812 "Could not find insertion point for retval shadow load");
6815 Value *RetvalShadow = IRBAfter.CreateAlignedLoad(
6818 setShadow(&CB, RetvalShadow);
6819 if (MS.TrackOrigins)
6820 setOrigin(&CB, IRBAfter.CreateLoad(MS.OriginTy, getOriginPtrForRetval()));
6825 RetVal =
I->getOperand(0);
6828 return I->isMustTailCall();
6833 void visitReturnInst(ReturnInst &
I) {
6835 Value *RetVal =
I.getReturnValue();
6841 Value *ShadowPtr = getShadowPtrForRetval(IRB);
6842 bool HasNoUndef =
F.hasRetAttribute(Attribute::NoUndef);
6843 bool StoreShadow = !(MS.EagerChecks && HasNoUndef);
6846 bool EagerCheck = (MS.EagerChecks && HasNoUndef) || (
F.getName() ==
"main");
6848 Value *Shadow = getShadow(RetVal);
6849 bool StoreOrigin =
true;
6851 insertCheckShadowOf(RetVal, &
I);
6852 Shadow = getCleanShadow(RetVal);
6853 StoreOrigin =
false;
6860 if (MS.TrackOrigins && StoreOrigin)
6861 IRB.
CreateStore(getOrigin(RetVal), getOriginPtrForRetval());
6865 void visitPHINode(PHINode &
I) {
6867 if (!PropagateShadow) {
6868 setShadow(&
I, getCleanShadow(&
I));
6869 setOrigin(&
I, getCleanOrigin());
6873 ShadowPHINodes.push_back(&
I);
6874 setShadow(&
I, IRB.
CreatePHI(getShadowTy(&
I),
I.getNumIncomingValues(),
6876 if (MS.TrackOrigins)
6878 &
I, IRB.
CreatePHI(MS.OriginTy,
I.getNumIncomingValues(),
"_msphi_o"));
6881 Value *getLocalVarIdptr(AllocaInst &
I) {
6882 ConstantInt *IntConst =
6883 ConstantInt::get(Type::getInt32Ty((*
F.getParent()).getContext()), 0);
6884 return new GlobalVariable(*
F.getParent(), IntConst->
getType(),
6889 Value *getLocalVarDescription(AllocaInst &
I) {
6895 IRB.
CreateCall(MS.MsanPoisonStackFn, {&I, Len});
6897 Value *ShadowBase, *OriginBase;
6898 std::tie(ShadowBase, OriginBase) = getShadowOriginPtr(
6902 IRB.
CreateMemSet(ShadowBase, PoisonValue, Len,
I.getAlign());
6905 if (PoisonStack && MS.TrackOrigins) {
6906 Value *Idptr = getLocalVarIdptr(
I);
6908 Value *Descr = getLocalVarDescription(
I);
6909 IRB.
CreateCall(MS.MsanSetAllocaOriginWithDescriptionFn,
6910 {&I, Len, Idptr, Descr});
6912 IRB.
CreateCall(MS.MsanSetAllocaOriginNoDescriptionFn, {&I, Len, Idptr});
6918 Value *Descr = getLocalVarDescription(
I);
6920 IRB.
CreateCall(MS.MsanPoisonAllocaFn, {&I, Len, Descr});
6922 IRB.
CreateCall(MS.MsanUnpoisonAllocaFn, {&I, Len});
6926 void instrumentAlloca(AllocaInst &
I, Instruction *InsPoint =
nullptr) {
6929 NextNodeIRBuilder IRB(InsPoint);
6930 const DataLayout &
DL =
F.getDataLayout();
6931 TypeSize TS =
DL.getTypeAllocSize(
I.getAllocatedType());
6933 if (
I.isArrayAllocation())
6937 if (MS.CompileKernel)
6938 poisonAllocaKmsan(
I, IRB, Len);
6940 poisonAllocaUserspace(
I, IRB, Len);
6943 void visitAllocaInst(AllocaInst &
I) {
6944 setShadow(&
I, getCleanShadow(&
I));
6945 setOrigin(&
I, getCleanOrigin());
6951 void visitSelectInst(SelectInst &
I) {
6957 handleSelectLikeInst(
I,
B,
C,
D);
6963 Value *Sb = getShadow(
B);
6964 Value *Sc = getShadow(
C);
6965 Value *Sd = getShadow(
D);
6967 Value *Ob = MS.TrackOrigins ? getOrigin(
B) : nullptr;
6968 Value *Oc = MS.TrackOrigins ? getOrigin(
C) : nullptr;
6969 Value *Od = MS.TrackOrigins ? getOrigin(
D) : nullptr;
6974 if (
I.getType()->isAggregateType()) {
6978 Sa1 = getPoisonedShadow(getShadowTy(
I.getType()));
6986 C = CreateAppToShadowCast(IRB,
C);
6987 D = CreateAppToShadowCast(IRB,
D);
6994 if (MS.TrackOrigins) {
6997 if (
B->getType()->isVectorTy()) {
6998 B = convertToBool(
B, IRB);
6999 Sb = convertToBool(Sb, IRB);
7007 void visitLandingPadInst(LandingPadInst &
I) {
7010 setShadow(&
I, getCleanShadow(&
I));
7011 setOrigin(&
I, getCleanOrigin());
7014 void visitCatchSwitchInst(CatchSwitchInst &
I) {
7015 setShadow(&
I, getCleanShadow(&
I));
7016 setOrigin(&
I, getCleanOrigin());
7019 void visitFuncletPadInst(FuncletPadInst &
I) {
7020 setShadow(&
I, getCleanShadow(&
I));
7021 setOrigin(&
I, getCleanOrigin());
7024 void visitGetElementPtrInst(GetElementPtrInst &
I) { handleShadowOr(
I); }
7026 void visitExtractValueInst(ExtractValueInst &
I) {
7028 Value *Agg =
I.getAggregateOperand();
7030 Value *AggShadow = getShadow(Agg);
7034 setShadow(&
I, ResShadow);
7035 setOriginForNaryOp(
I);
7038 void visitInsertValueInst(InsertValueInst &
I) {
7041 Value *AggShadow = getShadow(
I.getAggregateOperand());
7042 Value *InsShadow = getShadow(
I.getInsertedValueOperand());
7048 setOriginForNaryOp(
I);
7051 void dumpInst(Instruction &
I) {
7055 errs() <<
"ZZZ " <<
I.getOpcodeName() <<
"\n";
7057 errs() <<
"QQQ " <<
I <<
"\n";
7060 void visitResumeInst(ResumeInst &
I) {
7065 void visitCleanupReturnInst(CleanupReturnInst &CRI) {
7070 void visitCatchReturnInst(CatchReturnInst &CRI) {
7075 void instrumentAsmArgument(
Value *Operand,
Type *ElemTy, Instruction &
I,
7084 insertCheckShadowOf(Operand, &
I);
7091 auto Size =
DL.getTypeStoreSize(ElemTy);
7093 if (MS.CompileKernel) {
7094 IRB.
CreateCall(MS.MsanInstrumentAsmStoreFn, {Operand, SizeVal});
7100 auto [ShadowPtr,
_] =
7101 getShadowOriginPtrUserspace(Operand, IRB, IRB.
getInt8Ty(),
Align(1));
7111 int getNumOutputArgs(InlineAsm *IA, CallBase *CB) {
7112 int NumRetOutputs = 0;
7119 NumRetOutputs =
ST->getNumElements();
7124 for (
const InlineAsm::ConstraintInfo &
Info : Constraints) {
7125 switch (
Info.Type) {
7133 return NumOutputs - NumRetOutputs;
7136 void visitAsmInstruction(Instruction &
I) {
7152 const DataLayout &
DL =
F.getDataLayout();
7156 int OutputArgs = getNumOutputArgs(IA, CB);
7162 for (
int i = OutputArgs; i < NumOperands; i++) {
7170 for (
int i = 0; i < OutputArgs; i++) {
7176 setShadow(&
I, getCleanShadow(&
I));
7177 setOrigin(&
I, getCleanOrigin());
7180 void visitFreezeInst(FreezeInst &
I) {
7182 setShadow(&
I, getCleanShadow(&
I));
7183 setOrigin(&
I, getCleanOrigin());
7186 void visitInstruction(Instruction &
I) {
7191 for (
size_t i = 0, n =
I.getNumOperands(); i < n; i++) {
7192 Value *Operand =
I.getOperand(i);
7194 insertCheckShadowOf(Operand, &
I);
7196 setShadow(&
I, getCleanShadow(&
I));
7197 setOrigin(&
I, getCleanOrigin());
7201struct VarArgHelperBase :
public VarArgHelper {
7203 MemorySanitizer &MS;
7204 MemorySanitizerVisitor &MSV;
7206 const unsigned VAListTagSize;
7208 VarArgHelperBase(Function &
F, MemorySanitizer &MS,
7209 MemorySanitizerVisitor &MSV,
unsigned VAListTagSize)
7210 :
F(
F), MS(MS), MSV(MSV), VAListTagSize(VAListTagSize) {}
7214 return IRB.
CreateAdd(
Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
7220 MS.VAArgTLS, ConstantInt::get(MS.IntptrTy, ArgOffset),
"_msarg_va_s");
7229 return getShadowPtrForVAArgument(IRB, ArgOffset);
7238 ConstantInt::get(MS.IntptrTy, ArgOffset),
7243 unsigned BaseOffset) {
7252 TailSize,
Align(8));
7255 void unpoisonVAListTagForInst(IntrinsicInst &
I) {
7257 Value *VAListTag =
I.getArgOperand(0);
7259 auto [ShadowPtr, OriginPtr] = MSV.getShadowOriginPtr(
7260 VAListTag, IRB, IRB.
getInt8Ty(), Alignment,
true);
7263 VAListTagSize, Alignment,
false);
7266 void visitVAStartInst(VAStartInst &
I)
override {
7267 if (
F.getCallingConv() == CallingConv::Win64)
7270 unpoisonVAListTagForInst(
I);
7273 void visitVACopyInst(VACopyInst &
I)
override {
7274 if (
F.getCallingConv() == CallingConv::Win64)
7276 unpoisonVAListTagForInst(
I);
7281struct VarArgAMD64Helper :
public VarArgHelperBase {
7284 static const unsigned AMD64GpEndOffset = 48;
7285 static const unsigned AMD64FpEndOffsetSSE = 176;
7287 static const unsigned AMD64FpEndOffsetNoSSE = AMD64GpEndOffset;
7289 unsigned AMD64FpEndOffset;
7290 AllocaInst *VAArgTLSCopy =
nullptr;
7291 AllocaInst *VAArgTLSOriginCopy =
nullptr;
7292 Value *VAArgOverflowSize =
nullptr;
7294 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
7296 VarArgAMD64Helper(Function &
F, MemorySanitizer &MS,
7297 MemorySanitizerVisitor &MSV)
7298 : VarArgHelperBase(
F, MS, MSV, 24) {
7299 AMD64FpEndOffset = AMD64FpEndOffsetSSE;
7300 for (
const auto &Attr :
F.getAttributes().getFnAttrs()) {
7301 if (Attr.isStringAttribute() &&
7302 (Attr.getKindAsString() ==
"target-features")) {
7303 if (Attr.getValueAsString().contains(
"-sse"))
7304 AMD64FpEndOffset = AMD64FpEndOffsetNoSSE;
7310 ArgKind classifyArgument(
Value *arg) {
7313 if (
T->isX86_FP80Ty())
7315 if (
T->isFPOrFPVectorTy())
7316 return AK_FloatingPoint;
7317 if (
T->isIntegerTy() &&
T->getPrimitiveSizeInBits() <= 64)
7318 return AK_GeneralPurpose;
7319 if (
T->isPointerTy())
7320 return AK_GeneralPurpose;
7332 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7333 unsigned GpOffset = 0;
7334 unsigned FpOffset = AMD64GpEndOffset;
7335 unsigned OverflowOffset = AMD64FpEndOffset;
7336 const DataLayout &
DL =
F.getDataLayout();
7340 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
7347 assert(
A->getType()->isPointerTy());
7349 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
7350 uint64_t AlignedSize =
alignTo(ArgSize, 8);
7351 unsigned BaseOffset = OverflowOffset;
7352 Value *ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
7353 Value *OriginBase =
nullptr;
7354 if (MS.TrackOrigins)
7355 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
7356 OverflowOffset += AlignedSize;
7359 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
7363 Value *ShadowPtr, *OriginPtr;
7364 std::tie(ShadowPtr, OriginPtr) =
7369 if (MS.TrackOrigins)
7373 ArgKind AK = classifyArgument(
A);
7374 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
7376 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
7378 Value *ShadowBase, *OriginBase =
nullptr;
7380 case AK_GeneralPurpose:
7381 ShadowBase = getShadowPtrForVAArgument(IRB, GpOffset);
7382 if (MS.TrackOrigins)
7383 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset);
7387 case AK_FloatingPoint:
7388 ShadowBase = getShadowPtrForVAArgument(IRB, FpOffset);
7389 if (MS.TrackOrigins)
7390 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
7397 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
7398 uint64_t AlignedSize =
alignTo(ArgSize, 8);
7399 unsigned BaseOffset = OverflowOffset;
7400 ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
7401 if (MS.TrackOrigins) {
7402 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
7404 OverflowOffset += AlignedSize;
7407 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
7416 Value *Shadow = MSV.getShadow(
A);
7418 if (MS.TrackOrigins) {
7419 Value *Origin = MSV.getOrigin(
A);
7420 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
7421 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
7427 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
7428 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
7431 void finalizeInstrumentation()
override {
7432 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
7433 "finalizeInstrumentation called twice");
7434 if (!VAStartInstrumentationList.
empty()) {
7441 ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), VAArgOverflowSize);
7442 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7448 Intrinsic::umin, CopySize,
7452 if (MS.TrackOrigins) {
7453 VAArgTLSOriginCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7462 for (CallInst *OrigInst : VAStartInstrumentationList) {
7463 NextNodeIRBuilder IRB(OrigInst);
7464 Value *VAListTag = OrigInst->getArgOperand(0);
7466 Value *RegSaveAreaPtrPtr =
7467 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, 16));
7469 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
7471 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
7472 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7474 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
7476 if (MS.TrackOrigins)
7477 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
7478 Alignment, AMD64FpEndOffset);
7479 Value *OverflowArgAreaPtrPtr =
7480 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, 8));
7481 Value *OverflowArgAreaPtr =
7482 IRB.
CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
7483 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
7484 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
7485 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
7489 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
7491 if (MS.TrackOrigins) {
7494 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
7502struct VarArgAArch64Helper :
public VarArgHelperBase {
7503 static const unsigned kAArch64GrArgSize = 64;
7504 static const unsigned kAArch64VrArgSize = 128;
7506 static const unsigned AArch64GrBegOffset = 0;
7507 static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
7509 static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
7510 static const unsigned AArch64VrEndOffset =
7511 AArch64VrBegOffset + kAArch64VrArgSize;
7512 static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
7514 AllocaInst *VAArgTLSCopy =
nullptr;
7515 Value *VAArgOverflowSize =
nullptr;
7517 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
7519 VarArgAArch64Helper(Function &
F, MemorySanitizer &MS,
7520 MemorySanitizerVisitor &MSV)
7521 : VarArgHelperBase(
F, MS, MSV, 32) {}
7524 std::pair<ArgKind, uint64_t> classifyArgument(
Type *
T) {
7525 if (
T->isIntOrPtrTy() &&
T->getPrimitiveSizeInBits() <= 64)
7526 return {AK_GeneralPurpose, 1};
7527 if (
T->isFloatingPointTy() &&
T->getPrimitiveSizeInBits() <= 128)
7528 return {AK_FloatingPoint, 1};
7530 if (
T->isArrayTy()) {
7531 auto R = classifyArgument(
T->getArrayElementType());
7532 R.second *=
T->getScalarType()->getArrayNumElements();
7537 auto R = classifyArgument(FV->getScalarType());
7538 R.second *= FV->getNumElements();
7543 return {AK_Memory, 0};
7555 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7556 unsigned GrOffset = AArch64GrBegOffset;
7557 unsigned VrOffset = AArch64VrBegOffset;
7558 unsigned OverflowOffset = AArch64VAEndOffset;
7560 const DataLayout &
DL =
F.getDataLayout();
7563 auto [AK, RegNum] = classifyArgument(
A->getType());
7564 if (AK == AK_GeneralPurpose &&
7565 (GrOffset + RegNum * 8) > AArch64GrEndOffset)
7567 if (AK == AK_FloatingPoint &&
7568 (VrOffset + RegNum * 16) > AArch64VrEndOffset)
7572 case AK_GeneralPurpose:
7573 Base = getShadowPtrForVAArgument(IRB, GrOffset);
7574 GrOffset += 8 * RegNum;
7576 case AK_FloatingPoint:
7577 Base = getShadowPtrForVAArgument(IRB, VrOffset);
7578 VrOffset += 16 * RegNum;
7585 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
7586 uint64_t AlignedSize =
alignTo(ArgSize, 8);
7587 unsigned BaseOffset = OverflowOffset;
7588 Base = getShadowPtrForVAArgument(IRB, BaseOffset);
7589 OverflowOffset += AlignedSize;
7592 CleanUnusedTLS(IRB,
Base, BaseOffset);
7604 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
7605 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
7610 Value *SaveAreaPtrPtr =
7611 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, offset));
7612 return IRB.
CreateLoad(Type::getInt64Ty(*MS.C), SaveAreaPtrPtr);
7617 Value *SaveAreaPtr =
7618 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, offset));
7620 return IRB.
CreateSExt(SaveArea32, MS.IntptrTy);
7623 void finalizeInstrumentation()
override {
7624 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
7625 "finalizeInstrumentation called twice");
7626 if (!VAStartInstrumentationList.empty()) {
7633 ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset), VAArgOverflowSize);
7634 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7640 Intrinsic::umin, CopySize,
7646 Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
7647 Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize);
7651 for (CallInst *OrigInst : VAStartInstrumentationList) {
7652 NextNodeIRBuilder IRB(OrigInst);
7654 Value *VAListTag = OrigInst->getArgOperand(0);
7671 Value *StackSaveAreaPtr =
7672 IRB.
CreateIntToPtr(getVAField64(IRB, VAListTag, 0), RegSaveAreaPtrTy);
7675 Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
7676 Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
7679 IRB.
CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea), RegSaveAreaPtrTy);
7682 Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
7683 Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
7686 IRB.
CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea), RegSaveAreaPtrTy);
7692 Value *GrRegSaveAreaShadowPtrOff =
7693 IRB.
CreateAdd(GrArgSize, GrOffSaveArea);
7695 Value *GrRegSaveAreaShadowPtr =
7696 MSV.getShadowOriginPtr(GrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7702 Value *GrCopySize = IRB.
CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
7708 Value *VrRegSaveAreaShadowPtrOff =
7709 IRB.
CreateAdd(VrArgSize, VrOffSaveArea);
7711 Value *VrRegSaveAreaShadowPtr =
7712 MSV.getShadowOriginPtr(VrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7719 VrRegSaveAreaShadowPtrOff);
7720 Value *VrCopySize = IRB.
CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
7726 Value *StackSaveAreaShadowPtr =
7727 MSV.getShadowOriginPtr(StackSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7732 VAArgTLSCopy, IRB.
getInt32(AArch64VAEndOffset));
7735 Align(16), VAArgOverflowSize);
7741struct VarArgPowerPC64Helper :
public VarArgHelperBase {
7742 AllocaInst *VAArgTLSCopy =
nullptr;
7743 Value *VAArgSize =
nullptr;
7745 VarArgPowerPC64Helper(Function &
F, MemorySanitizer &MS,
7746 MemorySanitizerVisitor &MSV)
7747 : VarArgHelperBase(
F, MS, MSV, 8) {}
7749 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7757 Triple TargetTriple(
F.getParent()->getTargetTriple());
7761 if (TargetTriple.isPPC64ELFv2ABI())
7765 unsigned VAArgOffset = VAArgBase;
7766 const DataLayout &
DL =
F.getDataLayout();
7769 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
7771 assert(
A->getType()->isPointerTy());
7773 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
7776 ArgAlign =
Align(8);
7777 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
7780 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
7782 Value *AShadowPtr, *AOriginPtr;
7783 std::tie(AShadowPtr, AOriginPtr) =
7784 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
7794 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
7796 if (
A->getType()->isArrayTy()) {
7799 Type *ElementTy =
A->getType()->getArrayElementType();
7801 ArgAlign =
Align(
DL.getTypeAllocSize(ElementTy));
7802 }
else if (
A->getType()->isVectorTy()) {
7804 ArgAlign =
Align(ArgSize);
7807 ArgAlign =
Align(8);
7808 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
7809 if (
DL.isBigEndian()) {
7813 VAArgOffset += (8 - ArgSize);
7817 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
7821 VAArgOffset += ArgSize;
7825 VAArgBase = VAArgOffset;
7829 ConstantInt::get(MS.IntptrTy, VAArgOffset - VAArgBase);
7832 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
7835 void finalizeInstrumentation()
override {
7836 assert(!VAArgSize && !VAArgTLSCopy &&
7837 "finalizeInstrumentation called twice");
7840 Value *CopySize = VAArgSize;
7842 if (!VAStartInstrumentationList.empty()) {
7846 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7852 Intrinsic::umin, CopySize,
7860 for (CallInst *OrigInst : VAStartInstrumentationList) {
7861 NextNodeIRBuilder IRB(OrigInst);
7862 Value *VAListTag = OrigInst->getArgOperand(0);
7865 RegSaveAreaPtrPtr = IRB.
CreateIntToPtr(RegSaveAreaPtrPtr, MS.PtrTy);
7868 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
7869 const DataLayout &
DL =
F.getDataLayout();
7870 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
7872 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
7873 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7875 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
7882struct VarArgPowerPC32Helper :
public VarArgHelperBase {
7883 AllocaInst *VAArgTLSCopy =
nullptr;
7884 Value *VAArgSize =
nullptr;
7886 VarArgPowerPC32Helper(Function &
F, MemorySanitizer &MS,
7887 MemorySanitizerVisitor &MSV)
7888 : VarArgHelperBase(
F, MS, MSV, 12) {}
7890 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7894 unsigned VAArgOffset = VAArgBase;
7895 const DataLayout &
DL =
F.getDataLayout();
7896 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
7899 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
7901 assert(
A->getType()->isPointerTy());
7903 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
7905 if (ArgAlign < IntptrSize)
7906 ArgAlign =
Align(IntptrSize);
7907 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
7910 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
7912 Value *AShadowPtr, *AOriginPtr;
7913 std::tie(AShadowPtr, AOriginPtr) =
7914 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
7924 Type *ArgTy =
A->getType();
7930 uint64_t ArgSize =
DL.getTypeAllocSize(ArgTy);
7937 ArgAlign =
Align(
DL.getTypeAllocSize(ElementTy));
7940 ArgAlign =
Align(ArgSize);
7942 if (ArgAlign < IntptrSize)
7943 ArgAlign =
Align(IntptrSize);
7944 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
7945 if (
DL.isBigEndian()) {
7948 if (ArgSize < IntptrSize)
7949 VAArgOffset += (IntptrSize - ArgSize);
7952 Base = getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase,
7958 VAArgOffset += ArgSize;
7965 ConstantInt::get(MS.IntptrTy, VAArgOffset - VAArgBase);
7968 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
7971 void finalizeInstrumentation()
override {
7972 assert(!VAArgSize && !VAArgTLSCopy &&
7973 "finalizeInstrumentation called twice");
7975 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
7976 Value *CopySize = VAArgSize;
7978 if (!VAStartInstrumentationList.empty()) {
7982 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7988 Intrinsic::umin, CopySize,
7996 for (CallInst *OrigInst : VAStartInstrumentationList) {
7997 NextNodeIRBuilder IRB(OrigInst);
7998 Value *VAListTag = OrigInst->getArgOperand(0);
8000 Value *RegSaveAreaSize = CopySize;
8004 IRB.
CreateAdd(RegSaveAreaPtrPtr, ConstantInt::get(MS.IntptrTy, 8));
8008 Intrinsic::umin, CopySize, ConstantInt::get(MS.IntptrTy, 32));
8010 RegSaveAreaPtrPtr = IRB.
CreateIntToPtr(RegSaveAreaPtrPtr, MS.PtrTy);
8013 const DataLayout &
DL =
F.getDataLayout();
8014 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8018 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8019 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8020 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8022 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy,
8023 Alignment, RegSaveAreaSize);
8025 RegSaveAreaShadowPtr =
8028 ConstantInt::get(MS.IntptrTy, 32));
8033 ConstantInt::get(MS.IntptrTy, 32), Alignment);
8038 Value *OverflowAreaSize = IRB.
CreateSub(CopySize, RegSaveAreaSize);
8041 OverflowAreaPtrPtr =
8042 IRB.
CreateAdd(OverflowAreaPtrPtr, ConstantInt::get(MS.IntptrTy, 4));
8043 OverflowAreaPtrPtr = IRB.
CreateIntToPtr(OverflowAreaPtrPtr, MS.PtrTy);
8045 Value *OverflowAreaPtr = IRB.
CreateLoad(MS.PtrTy, OverflowAreaPtrPtr);
8047 Value *OverflowAreaShadowPtr, *OverflowAreaOriginPtr;
8048 std::tie(OverflowAreaShadowPtr, OverflowAreaOriginPtr) =
8049 MSV.getShadowOriginPtr(OverflowAreaPtr, IRB, IRB.
getInt8Ty(),
8052 Value *OverflowVAArgTLSCopyPtr =
8054 OverflowVAArgTLSCopyPtr =
8055 IRB.
CreateAdd(OverflowVAArgTLSCopyPtr, RegSaveAreaSize);
8057 OverflowVAArgTLSCopyPtr =
8060 OverflowVAArgTLSCopyPtr, Alignment, OverflowAreaSize);
8067struct VarArgSystemZHelper :
public VarArgHelperBase {
8068 static const unsigned SystemZGpOffset = 16;
8069 static const unsigned SystemZGpEndOffset = 56;
8070 static const unsigned SystemZFpOffset = 128;
8071 static const unsigned SystemZFpEndOffset = 160;
8072 static const unsigned SystemZMaxVrArgs = 8;
8073 static const unsigned SystemZRegSaveAreaSize = 160;
8074 static const unsigned SystemZOverflowOffset = 160;
8075 static const unsigned SystemZVAListTagSize = 32;
8076 static const unsigned SystemZOverflowArgAreaPtrOffset = 16;
8077 static const unsigned SystemZRegSaveAreaPtrOffset = 24;
8079 bool IsSoftFloatABI;
8080 AllocaInst *VAArgTLSCopy =
nullptr;
8081 AllocaInst *VAArgTLSOriginCopy =
nullptr;
8082 Value *VAArgOverflowSize =
nullptr;
8084 enum class ArgKind {
8092 enum class ShadowExtension {
None,
Zero, Sign };
8094 VarArgSystemZHelper(Function &
F, MemorySanitizer &MS,
8095 MemorySanitizerVisitor &MSV)
8096 : VarArgHelperBase(
F, MS, MSV, SystemZVAListTagSize),
8097 IsSoftFloatABI(
F.getFnAttribute(
"use-soft-float").getValueAsBool()) {}
8099 ArgKind classifyArgument(
Type *
T) {
8106 if (
T->isIntegerTy(128) ||
T->isFP128Ty())
8107 return ArgKind::Indirect;
8108 if (
T->isFloatingPointTy())
8109 return IsSoftFloatABI ? ArgKind::GeneralPurpose : ArgKind::FloatingPoint;
8110 if (
T->isIntegerTy() ||
T->isPointerTy())
8111 return ArgKind::GeneralPurpose;
8112 if (
T->isVectorTy())
8113 return ArgKind::Vector;
8114 return ArgKind::Memory;
8117 ShadowExtension getShadowExtension(
const CallBase &CB,
unsigned ArgNo) {
8127 return ShadowExtension::Zero;
8131 return ShadowExtension::Sign;
8133 return ShadowExtension::None;
8136 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8137 unsigned GpOffset = SystemZGpOffset;
8138 unsigned FpOffset = SystemZFpOffset;
8139 unsigned VrIndex = 0;
8140 unsigned OverflowOffset = SystemZOverflowOffset;
8141 const DataLayout &
DL =
F.getDataLayout();
8147 ArgKind AK = classifyArgument(
T);
8148 if (AK == ArgKind::Indirect) {
8150 AK = ArgKind::GeneralPurpose;
8152 if (AK == ArgKind::GeneralPurpose && GpOffset >= SystemZGpEndOffset)
8153 AK = ArgKind::Memory;
8154 if (AK == ArgKind::FloatingPoint && FpOffset >= SystemZFpEndOffset)
8155 AK = ArgKind::Memory;
8156 if (AK == ArgKind::Vector && (VrIndex >= SystemZMaxVrArgs || !IsFixed))
8157 AK = ArgKind::Memory;
8158 Value *ShadowBase =
nullptr;
8159 Value *OriginBase =
nullptr;
8160 ShadowExtension SE = ShadowExtension::None;
8162 case ArgKind::GeneralPurpose: {
8164 uint64_t ArgSize = 8;
8167 SE = getShadowExtension(CB, ArgNo);
8168 uint64_t GapSize = 0;
8169 if (SE == ShadowExtension::None) {
8170 uint64_t ArgAllocSize =
DL.getTypeAllocSize(
T);
8171 assert(ArgAllocSize <= ArgSize);
8172 GapSize = ArgSize - ArgAllocSize;
8174 ShadowBase = getShadowAddrForVAArgument(IRB, GpOffset + GapSize);
8175 if (MS.TrackOrigins)
8176 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset + GapSize);
8178 GpOffset += ArgSize;
8184 case ArgKind::FloatingPoint: {
8186 uint64_t ArgSize = 8;
8193 ShadowBase = getShadowAddrForVAArgument(IRB, FpOffset);
8194 if (MS.TrackOrigins)
8195 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
8197 FpOffset += ArgSize;
8203 case ArgKind::Vector: {
8210 case ArgKind::Memory: {
8215 uint64_t ArgAllocSize =
DL.getTypeAllocSize(
T);
8216 uint64_t ArgSize =
alignTo(ArgAllocSize, 8);
8218 SE = getShadowExtension(CB, ArgNo);
8220 SE == ShadowExtension::None ? ArgSize - ArgAllocSize : 0;
8222 getShadowAddrForVAArgument(IRB, OverflowOffset + GapSize);
8223 if (MS.TrackOrigins)
8225 getOriginPtrForVAArgument(IRB, OverflowOffset + GapSize);
8226 OverflowOffset += ArgSize;
8233 case ArgKind::Indirect:
8236 if (ShadowBase ==
nullptr)
8238 Value *Shadow = MSV.getShadow(
A);
8239 if (SE != ShadowExtension::None)
8240 Shadow = MSV.CreateShadowCast(IRB, Shadow, IRB.
getInt64Ty(),
8241 SE == ShadowExtension::Sign);
8242 ShadowBase = IRB.
CreateIntToPtr(ShadowBase, MS.PtrTy,
"_msarg_va_s");
8244 if (MS.TrackOrigins) {
8245 Value *Origin = MSV.getOrigin(
A);
8246 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
8247 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
8251 Constant *OverflowSize = ConstantInt::get(
8252 IRB.
getInt64Ty(), OverflowOffset - SystemZOverflowOffset);
8253 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
8260 ConstantInt::get(MS.IntptrTy, SystemZRegSaveAreaPtrOffset)),
8263 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8265 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8266 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(), Alignment,
8271 unsigned RegSaveAreaSize =
8272 IsSoftFloatABI ? SystemZGpEndOffset : SystemZRegSaveAreaSize;
8273 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8275 if (MS.TrackOrigins)
8276 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
8277 Alignment, RegSaveAreaSize);
8286 ConstantInt::get(MS.IntptrTy, SystemZOverflowArgAreaPtrOffset)),
8288 Value *OverflowArgAreaPtr = IRB.
CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
8289 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
8291 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
8292 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
8295 SystemZOverflowOffset);
8296 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
8298 if (MS.TrackOrigins) {
8300 SystemZOverflowOffset);
8301 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
8306 void finalizeInstrumentation()
override {
8307 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
8308 "finalizeInstrumentation called twice");
8309 if (!VAStartInstrumentationList.empty()) {
8316 IRB.
CreateAdd(ConstantInt::get(MS.IntptrTy, SystemZOverflowOffset),
8318 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8324 Intrinsic::umin, CopySize,
8328 if (MS.TrackOrigins) {
8329 VAArgTLSOriginCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8338 for (CallInst *OrigInst : VAStartInstrumentationList) {
8339 NextNodeIRBuilder IRB(OrigInst);
8340 Value *VAListTag = OrigInst->getArgOperand(0);
8341 copyRegSaveArea(IRB, VAListTag);
8342 copyOverflowArea(IRB, VAListTag);
8348struct VarArgI386Helper :
public VarArgHelperBase {
8349 AllocaInst *VAArgTLSCopy =
nullptr;
8350 Value *VAArgSize =
nullptr;
8352 VarArgI386Helper(Function &
F, MemorySanitizer &MS,
8353 MemorySanitizerVisitor &MSV)
8354 : VarArgHelperBase(
F, MS, MSV, 4) {}
8356 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8357 const DataLayout &
DL =
F.getDataLayout();
8358 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8359 unsigned VAArgOffset = 0;
8362 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
8364 assert(
A->getType()->isPointerTy());
8366 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
8368 if (ArgAlign < IntptrSize)
8369 ArgAlign =
Align(IntptrSize);
8370 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8372 Value *
Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8374 Value *AShadowPtr, *AOriginPtr;
8375 std::tie(AShadowPtr, AOriginPtr) =
8376 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
8386 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
8388 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8389 if (
DL.isBigEndian()) {
8392 if (ArgSize < IntptrSize)
8393 VAArgOffset += (IntptrSize - ArgSize);
8396 Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8399 VAArgOffset += ArgSize;
8405 Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
8408 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8411 void finalizeInstrumentation()
override {
8412 assert(!VAArgSize && !VAArgTLSCopy &&
8413 "finalizeInstrumentation called twice");
8415 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
8416 Value *CopySize = VAArgSize;
8418 if (!VAStartInstrumentationList.empty()) {
8421 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8427 Intrinsic::umin, CopySize,
8435 for (CallInst *OrigInst : VAStartInstrumentationList) {
8436 NextNodeIRBuilder IRB(OrigInst);
8437 Value *VAListTag = OrigInst->getArgOperand(0);
8438 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
8439 Value *RegSaveAreaPtrPtr =
8441 PointerType::get(*MS.C, 0));
8442 Value *RegSaveAreaPtr =
8443 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
8444 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8445 const DataLayout &
DL =
F.getDataLayout();
8446 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8448 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8449 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8451 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8459struct VarArgGenericHelper :
public VarArgHelperBase {
8460 AllocaInst *VAArgTLSCopy =
nullptr;
8461 Value *VAArgSize =
nullptr;
8463 VarArgGenericHelper(Function &
F, MemorySanitizer &MS,
8464 MemorySanitizerVisitor &MSV,
const unsigned VAListTagSize)
8465 : VarArgHelperBase(
F, MS, MSV, VAListTagSize) {}
8467 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8468 unsigned VAArgOffset = 0;
8469 const DataLayout &
DL =
F.getDataLayout();
8470 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8475 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
8476 if (
DL.isBigEndian()) {
8479 if (ArgSize < IntptrSize)
8480 VAArgOffset += (IntptrSize - ArgSize);
8482 Value *
Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8483 VAArgOffset += ArgSize;
8484 VAArgOffset =
alignTo(VAArgOffset, IntptrSize);
8490 Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
8493 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8496 void finalizeInstrumentation()
override {
8497 assert(!VAArgSize && !VAArgTLSCopy &&
8498 "finalizeInstrumentation called twice");
8500 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
8501 Value *CopySize = VAArgSize;
8503 if (!VAStartInstrumentationList.empty()) {
8506 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8512 Intrinsic::umin, CopySize,
8520 for (CallInst *OrigInst : VAStartInstrumentationList) {
8521 NextNodeIRBuilder IRB(OrigInst);
8522 Value *VAListTag = OrigInst->getArgOperand(0);
8523 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
8524 Value *RegSaveAreaPtrPtr =
8526 PointerType::get(*MS.C, 0));
8527 Value *RegSaveAreaPtr =
8528 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
8529 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8530 const DataLayout &
DL =
F.getDataLayout();
8531 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8533 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8534 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8536 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8544using VarArgARM32Helper = VarArgGenericHelper;
8545using VarArgRISCVHelper = VarArgGenericHelper;
8546using VarArgMIPSHelper = VarArgGenericHelper;
8547using VarArgLoongArch64Helper = VarArgGenericHelper;
8550struct VarArgNoOpHelper :
public VarArgHelper {
8551 VarArgNoOpHelper(Function &
F, MemorySanitizer &MS,
8552 MemorySanitizerVisitor &MSV) {}
8554 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {}
8556 void visitVAStartInst(VAStartInst &
I)
override {}
8558 void visitVACopyInst(VACopyInst &
I)
override {}
8560 void finalizeInstrumentation()
override {}
8566 MemorySanitizerVisitor &Visitor) {
8569 Triple TargetTriple(Func.getParent()->getTargetTriple());
8572 return new VarArgI386Helper(Func, Msan, Visitor);
8575 return new VarArgAMD64Helper(Func, Msan, Visitor);
8577 if (TargetTriple.
isARM())
8578 return new VarArgARM32Helper(Func, Msan, Visitor, 4);
8581 return new VarArgAArch64Helper(Func, Msan, Visitor);
8584 return new VarArgSystemZHelper(Func, Msan, Visitor);
8589 return new VarArgPowerPC32Helper(Func, Msan, Visitor);
8592 return new VarArgPowerPC64Helper(Func, Msan, Visitor);
8595 return new VarArgRISCVHelper(Func, Msan, Visitor, 4);
8598 return new VarArgRISCVHelper(Func, Msan, Visitor, 8);
8601 return new VarArgMIPSHelper(Func, Msan, Visitor, 4);
8604 return new VarArgMIPSHelper(Func, Msan, Visitor, 8);
8607 return new VarArgLoongArch64Helper(Func, Msan, Visitor,
8610 return new VarArgNoOpHelper(Func, Msan, Visitor);
8617 if (
F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
8620 MemorySanitizerVisitor Visitor(
F, *
this, TLI);
8627 return Visitor.runOnFunction();
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
static bool isStore(int Opcode)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClWithComdat("asan-with-comdat", cl::desc("Place ASan constructors in comdat sections"), cl::Hidden, cl::init(true))
VarLocInsertPt getNextNode(const DbgRecord *DVR)
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const MemoryMapParams Linux_LoongArch64_MemoryMapParams
const MemoryMapParams Linux_X86_64_MemoryMapParams
static cl::opt< int > ClTrackOrigins("dfsan-track-origins", cl::desc("Track origins of labels"), cl::Hidden, cl::init(0))
static AtomicOrdering addReleaseOrdering(AtomicOrdering AO)
static AtomicOrdering addAcquireOrdering(AtomicOrdering AO)
const MemoryMapParams Linux_AArch64_MemoryMapParams
static bool isAMustTailRetVal(Value *RetVal)
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
Module.h This file contains the declarations for the Module class.
Machine Check Debug Module
static const PlatformMemoryMapParams Linux_S390_MemoryMapParams
static const Align kMinOriginAlignment
static cl::opt< uint64_t > ClShadowBase("msan-shadow-base", cl::desc("Define custom MSan ShadowBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClPoisonUndef("msan-poison-undef", cl::desc("Poison fully undef temporary values. " "Partially undefined constant vectors " "are unaffected by this flag (see " "-msan-poison-undef-vectors)."), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_X86_MemoryMapParams
static cl::opt< uint64_t > ClOriginBase("msan-origin-base", cl::desc("Define custom MSan OriginBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClCheckConstantShadow("msan-check-constant-shadow", cl::desc("Insert checks for constant shadow values"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_LoongArch_MemoryMapParams
static const MemoryMapParams NetBSD_X86_64_MemoryMapParams
static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams
static const unsigned kOriginSize
static cl::opt< bool > ClWithComdat("msan-with-comdat", cl::desc("Place MSan constructors in comdat sections"), cl::Hidden, cl::init(false))
static cl::opt< int > ClTrackOrigins("msan-track-origins", cl::desc("Track origins (allocation sites) of poisoned memory"), cl::Hidden, cl::init(0))
Track origins of uninitialized values.
static cl::opt< int > ClInstrumentationWithCallThreshold("msan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented requires more than " "this number of checks and origin stores, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(3500))
static cl::opt< int > ClPoisonStackPattern("msan-poison-stack-pattern", cl::desc("poison uninitialized stack variables with the given pattern"), cl::Hidden, cl::init(0xff))
static const Align kShadowTLSAlignment
static cl::opt< bool > ClHandleICmpExact("msan-handle-icmp-exact", cl::desc("exact handling of relational integer ICmp"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams
static cl::opt< bool > ClDumpStrictInstructions("msan-dump-strict-instructions", cl::desc("print out instructions with default strict semantics i.e.," "check that all the inputs are fully initialized, and mark " "the output as fully initialized. These semantics are applied " "to instructions that could not be handled explicitly nor " "heuristically."), cl::Hidden, cl::init(false))
static Constant * getOrInsertGlobal(Module &M, StringRef Name, Type *Ty)
static cl::opt< bool > ClPreciseDisjointOr("msan-precise-disjoint-or", cl::desc("Precisely poison disjoint OR. If false (legacy behavior), " "disjointedness is ignored (i.e., 1|1 is initialized)."), cl::Hidden, cl::init(false))
static const MemoryMapParams Linux_S390X_MemoryMapParams
static cl::opt< bool > ClPoisonStack("msan-poison-stack", cl::desc("poison uninitialized stack variables"), cl::Hidden, cl::init(true))
static const MemoryMapParams Linux_I386_MemoryMapParams
const char kMsanInitName[]
static cl::opt< bool > ClPoisonUndefVectors("msan-poison-undef-vectors", cl::desc("Precisely poison partially undefined constant vectors. " "If false (legacy behavior), the entire vector is " "considered fully initialized, which may lead to false " "negatives. Fully undefined constant vectors are " "unaffected by this flag (see -msan-poison-undef)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClPrintStackNames("msan-print-stack-names", cl::desc("Print name of local stack variable"), cl::Hidden, cl::init(true))
static cl::opt< uint64_t > ClAndMask("msan-and-mask", cl::desc("Define custom MSan AndMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleLifetimeIntrinsics("msan-handle-lifetime-intrinsics", cl::desc("when possible, poison scoped variables at the beginning of the scope " "(slower, but more precise)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKeepGoing("msan-keep-going", cl::desc("keep going after reporting a UMR"), cl::Hidden, cl::init(false))
static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams
static GlobalVariable * createPrivateConstGlobalForString(Module &M, StringRef Str)
Create a non-const global initialized with the given string.
static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClEagerChecks("msan-eager-checks", cl::desc("check arguments and return values at function call boundaries"), cl::Hidden, cl::init(false))
static cl::opt< int > ClDisambiguateWarning("msan-disambiguate-warning-threshold", cl::desc("Define threshold for number of checks per " "debug location to force origin update."), cl::Hidden, cl::init(3))
static VarArgHelper * CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, MemorySanitizerVisitor &Visitor)
static const MemoryMapParams Linux_MIPS64_MemoryMapParams
static const MemoryMapParams Linux_PowerPC64_MemoryMapParams
static cl::opt< uint64_t > ClXorMask("msan-xor-mask", cl::desc("Define custom MSan XorMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleAsmConservative("msan-handle-asm-conservative", cl::desc("conservative handling of inline assembly"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams
static const PlatformMemoryMapParams FreeBSD_ARM_MemoryMapParams
static const unsigned kParamTLSSize
static cl::opt< bool > ClHandleICmp("msan-handle-icmp", cl::desc("propagate shadow through ICmpEQ and ICmpNE"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClEnableKmsan("msan-kernel", cl::desc("Enable KernelMemorySanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClPoisonStackWithCall("msan-poison-stack-with-call", cl::desc("poison uninitialized stack variables with a call"), cl::Hidden, cl::init(false))
static const PlatformMemoryMapParams NetBSD_X86_MemoryMapParams
static cl::opt< bool > ClDumpHeuristicInstructions("msan-dump-heuristic-instructions", cl::desc("Prints 'unknown' instructions that were handled heuristically. " "Use -msan-dump-strict-instructions to print instructions that " "could not be handled explicitly nor heuristically."), cl::Hidden, cl::init(false))
static const unsigned kRetvalTLSSize
static const MemoryMapParams FreeBSD_AArch64_MemoryMapParams
const char kMsanModuleCtorName[]
static const MemoryMapParams FreeBSD_I386_MemoryMapParams
static cl::opt< bool > ClCheckAccessAddress("msan-check-access-address", cl::desc("report accesses through a pointer which has poisoned shadow"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDisableChecks("msan-disable-checks", cl::desc("Apply no_sanitize to the whole file"), cl::Hidden, cl::init(false))
FunctionAnalysisManager FAM
const SmallVectorImpl< MachineOperand > & Cond
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
void setAlignment(Align Align)
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
const T & front() const
front - Get the first element.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
This class stores enough information to efficiently remove some attributes from an existing AttrBuild...
AttributeMask & addAttribute(Attribute::AttrKind Val)
Add an attribute to the mask.
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
InstListType::iterator iterator
Instruction iterators...
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
void removeFnAttrs(const AttributeMask &AttrsToRemove)
Removes the attributes from the function.
MaybeAlign getParamAlign(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
void setArgOperand(unsigned i, Value *v)
FunctionType * getFunctionType() const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_SGT
signed greater than
@ ICMP_SGE
signed greater or equal
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getString(LLVMContext &Context, StringRef Initializer, bool AddNull=true)
This method constructs a CDS and initializes it with a text string.
static LLVM_ABI Constant * get(LLVMContext &Context, ArrayRef< uint8_t > Elts)
get() constructors - Return a constant with vector type with an element count and element type matchi...
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
LLVM_ABI bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isZeroValue() const
Return true if the value is negative zero or null value.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
static bool shouldExecute(unsigned CounterName)
unsigned getNumElements() const
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
static FixedVectorType * getHalfElementsVectorType(FixedVectorType *VTy)
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
LLVM_ABI void setComdat(Comdat *C)
@ PrivateLinkage
Like Internal, but omit from symbol table.
@ ExternalLinkage
Externally visible function.
Analysis pass providing a never-invalidated alias analysis result.
ConstantInt * getInt1(bool V)
Get a constant value representing either true or false.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
LLVM_ABI CallInst * CreateMaskedCompressStore(Value *Val, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr)
Create a call to Masked Compress Store intrinsic.
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memcpy between the specified pointers.
LLVM_ABI CallInst * CreateAndReduce(Value *Src)
Create a vector int AND reduction intrinsic of the source vector.
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
LLVM_ABI CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
ConstantInt * getInt8(uint8_t C)
Get a constant 8-bit value.
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Value * CreateUDiv(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
LLVM_ABI CallInst * CreateOrReduce(Value *Src)
Create a vector int OR reduction intrinsic of the source vector.
LLVM_ABI Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
LLVM_ABI DebugLoc getCurrentDebugLocation() const
Get location information used by debugging information.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memset to the specified pointer and the specified value.
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
LLVMContext & getContext() const
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
LLVM_ABI CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name="")
LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Value * CreateIsNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg == 0.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Type * getVoidTy()
Fetch the type representing void.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
LLVM_ABI CallInst * CreateMaskedExpandLoad(Type *Ty, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Expand Load intrinsic.
Value * CreateInBoundsPtrAdd(Value *Ptr, Value *Offset, const Twine &Name="")
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
LLVM_ABI CallInst * CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, Value *Mask=nullptr)
Create a call to Masked Scatter intrinsic.
LLVM_ABI CallInst * CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Gather intrinsic.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
std::vector< ConstraintInfo > ConstraintInfoVector
void visit(Iterator Start, Iterator End)
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
LLVM_ABI MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
A Module instance is used to store all the information related to an LLVM module.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & abandon()
Mark an analysis as abandoned.
bool remove(const value_type &X)
Remove an item from the set vector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
StringRef - Represent a constant reference to a string, i.e.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
unsigned getNumElements() const
Random access to the elements.
Type * getElementType(unsigned N) const
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Triple - Helper class for working with autoconf configuration names.
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
bool isRISCV32() const
Tests whether the target is 32-bit RISC-V.
bool isPPC32() const
Tests whether the target is 32-bit PowerPC (little and big endian).
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isRISCV64() const
Tests whether the target is 64-bit RISC-V.
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
bool isARM() const
Tests whether the target is ARM (little and big endian).
bool isPPC64() const
Tests whether the target is 64-bit PowerPC (little and big endian).
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
bool isSystemZ() const
Tests whether the target is SystemZ.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isArrayTy() const
True if this is an instance of ArrayType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
Type * getArrayElementType() const
bool isPPC_FP128Ty() const
Return true if this is powerpc long double.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
bool isVoidTy() const
Return true if this is 'void'.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Type * getElementType() const
int getNumOccurrences() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
initializer< Ty > init(const Ty &Val)
Function * Kernel
Summary of a kernel (=entry point for target offloading).
NodeAddr< FuncNode * > Func
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
FunctionAddr VTableAddr Value
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
LLVM_ABI std::pair< Instruction *, Value * > SplitBlockAndInsertSimpleForLoop(Value *End, BasicBlock::iterator SplitBefore)
Insert a for (int i = 0; i < End; i++) loop structure (with the exception that End is assumed > 0,...
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
auto dyn_cast_or_null(const Y &Val)
LLVM_ABI std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
@ Or
Bitwise or logical OR of integers.
@ And
Bitwise or logical AND of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
RoundingMode
Rounding mode.
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
iterator_range< df_iterator< T > > depth_first(const T &G)
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
LLVM_ABI bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
LLVM_ABI bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
std::string itostr(int64_t X)
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
A CRTP mix-in to automatically provide informational APIs needed for passes.