184#include "llvm/IR/IntrinsicsAArch64.h"
185#include "llvm/IR/IntrinsicsX86.h"
216#define DEBUG_TYPE "msan"
219 "Controls which checks to insert");
222 "Controls which instruction to instrument");
241 "msan-track-origins",
246 cl::desc(
"keep going after reporting a UMR"),
255 "msan-poison-stack-with-call",
260 "msan-poison-stack-pattern",
261 cl::desc(
"poison uninitialized stack variables with the given pattern"),
266 cl::desc(
"Print name of local stack variable"),
271 cl::desc(
"Poison fully undef temporary values. "
272 "Partially undefined constant vectors "
273 "are unaffected by this flag (see "
274 "-msan-poison-undef-vectors)."),
278 "msan-poison-undef-vectors",
279 cl::desc(
"Precisely poison partially undefined constant vectors. "
280 "If false (legacy behavior), the entire vector is "
281 "considered fully initialized, which may lead to false "
282 "negatives. Fully undefined constant vectors are "
283 "unaffected by this flag (see -msan-poison-undef)."),
287 "msan-precise-disjoint-or",
288 cl::desc(
"Precisely poison disjoint OR. If false (legacy behavior), "
289 "disjointedness is ignored (i.e., 1|1 is initialized)."),
294 cl::desc(
"propagate shadow through ICmpEQ and ICmpNE"),
299 cl::desc(
"exact handling of relational integer ICmp"),
303 "msan-handle-lifetime-intrinsics",
305 "when possible, poison scoped variables at the beginning of the scope "
306 "(slower, but more precise)"),
317 "msan-handle-asm-conservative",
328 "msan-check-access-address",
329 cl::desc(
"report accesses through a pointer which has poisoned shadow"),
334 cl::desc(
"check arguments and return values at function call boundaries"),
338 "msan-dump-strict-instructions",
339 cl::desc(
"print out instructions with default strict semantics i.e.,"
340 "check that all the inputs are fully initialized, and mark "
341 "the output as fully initialized. These semantics are applied "
342 "to instructions that could not be handled explicitly nor "
351 "msan-dump-heuristic-instructions",
352 cl::desc(
"Prints 'unknown' instructions that were handled heuristically. "
353 "Use -msan-dump-strict-instructions to print instructions that "
354 "could not be handled explicitly nor heuristically."),
358 "msan-instrumentation-with-call-threshold",
360 "If the function being instrumented requires more than "
361 "this number of checks and origin stores, use callbacks instead of "
362 "inline checks (-1 means never use callbacks)."),
367 cl::desc(
"Enable KernelMemorySanitizer instrumentation"),
377 cl::desc(
"Insert checks for constant shadow values"),
384 cl::desc(
"Place MSan constructors in comdat sections"),
390 cl::desc(
"Define custom MSan AndMask"),
394 cl::desc(
"Define custom MSan XorMask"),
398 cl::desc(
"Define custom MSan ShadowBase"),
402 cl::desc(
"Define custom MSan OriginBase"),
407 cl::desc(
"Define threshold for number of checks per "
408 "debug location to force origin update."),
420struct MemoryMapParams {
427struct PlatformMemoryMapParams {
428 const MemoryMapParams *bits32;
429 const MemoryMapParams *bits64;
591class MemorySanitizer {
600 MemorySanitizer(MemorySanitizer &&) =
delete;
601 MemorySanitizer &operator=(MemorySanitizer &&) =
delete;
602 MemorySanitizer(
const MemorySanitizer &) =
delete;
603 MemorySanitizer &operator=(
const MemorySanitizer &) =
delete;
605 bool sanitizeFunction(Function &
F, TargetLibraryInfo &TLI);
608 friend struct MemorySanitizerVisitor;
609 friend struct VarArgHelperBase;
610 friend struct VarArgAMD64Helper;
611 friend struct VarArgAArch64Helper;
612 friend struct VarArgPowerPC64Helper;
613 friend struct VarArgPowerPC32Helper;
614 friend struct VarArgSystemZHelper;
615 friend struct VarArgI386Helper;
616 friend struct VarArgGenericHelper;
618 void initializeModule(
Module &M);
619 void initializeCallbacks(
Module &M,
const TargetLibraryInfo &TLI);
620 void createKernelApi(
Module &M,
const TargetLibraryInfo &TLI);
621 void createUserspaceApi(
Module &M,
const TargetLibraryInfo &TLI);
623 template <
typename... ArgsTy>
624 FunctionCallee getOrInsertMsanMetadataFunction(
Module &M, StringRef Name,
650 Value *ParamOriginTLS;
656 Value *RetvalOriginTLS;
662 Value *VAArgOriginTLS;
665 Value *VAArgOverflowSizeTLS;
668 bool CallbacksInitialized =
false;
671 FunctionCallee WarningFn;
675 FunctionCallee MaybeWarningVarSizeFn;
680 FunctionCallee MsanSetAllocaOriginWithDescriptionFn;
682 FunctionCallee MsanSetAllocaOriginNoDescriptionFn;
685 FunctionCallee MsanPoisonStackFn;
689 FunctionCallee MsanChainOriginFn;
692 FunctionCallee MsanSetOriginFn;
695 FunctionCallee MemmoveFn, MemcpyFn, MemsetFn;
698 StructType *MsanContextStateTy;
699 FunctionCallee MsanGetContextStateFn;
702 FunctionCallee MsanPoisonAllocaFn, MsanUnpoisonAllocaFn;
708 FunctionCallee MsanMetadataPtrForLoadN, MsanMetadataPtrForStoreN;
709 FunctionCallee MsanMetadataPtrForLoad_1_8[4];
710 FunctionCallee MsanMetadataPtrForStore_1_8[4];
711 FunctionCallee MsanInstrumentAsmStoreFn;
714 Value *MsanMetadataAlloca;
717 FunctionCallee getKmsanShadowOriginAccessFn(
bool isStore,
int size);
720 const MemoryMapParams *MapParams;
724 MemoryMapParams CustomMapParams;
726 MDNode *ColdCallWeights;
729 MDNode *OriginStoreWeights;
732void insertModuleCtor(
Module &M) {
769 if (!Options.Kernel) {
778 MemorySanitizer Msan(*
F.getParent(), Options);
797 OS, MapClassName2PassName);
803 if (Options.EagerChecks)
804 OS <<
"eager-checks;";
805 OS <<
"track-origins=" << Options.TrackOrigins;
821template <
typename... ArgsTy>
823MemorySanitizer::getOrInsertMsanMetadataFunction(
Module &M,
StringRef Name,
828 std::forward<ArgsTy>(Args)...);
831 return M.getOrInsertFunction(Name, MsanMetadata,
832 std::forward<ArgsTy>(Args)...);
841 RetvalOriginTLS =
nullptr;
843 ParamOriginTLS =
nullptr;
845 VAArgOriginTLS =
nullptr;
846 VAArgOverflowSizeTLS =
nullptr;
848 WarningFn =
M.getOrInsertFunction(
"__msan_warning",
850 IRB.getVoidTy(), IRB.getInt32Ty());
861 MsanGetContextStateFn =
862 M.getOrInsertFunction(
"__msan_get_context_state", PtrTy);
866 for (
int ind = 0,
size = 1; ind < 4; ind++,
size <<= 1) {
867 std::string name_load =
868 "__msan_metadata_ptr_for_load_" + std::to_string(
size);
869 std::string name_store =
870 "__msan_metadata_ptr_for_store_" + std::to_string(
size);
871 MsanMetadataPtrForLoad_1_8[ind] =
872 getOrInsertMsanMetadataFunction(M, name_load, PtrTy);
873 MsanMetadataPtrForStore_1_8[ind] =
874 getOrInsertMsanMetadataFunction(M, name_store, PtrTy);
877 MsanMetadataPtrForLoadN = getOrInsertMsanMetadataFunction(
878 M,
"__msan_metadata_ptr_for_load_n", PtrTy, IntptrTy);
879 MsanMetadataPtrForStoreN = getOrInsertMsanMetadataFunction(
880 M,
"__msan_metadata_ptr_for_store_n", PtrTy, IntptrTy);
883 MsanPoisonAllocaFn =
M.getOrInsertFunction(
884 "__msan_poison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
885 MsanUnpoisonAllocaFn =
M.getOrInsertFunction(
886 "__msan_unpoison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy);
890 return M.getOrInsertGlobal(Name, Ty, [&] {
892 nullptr, Name,
nullptr,
898void MemorySanitizer::createUserspaceApi(
Module &M,
906 StringRef WarningFnName = Recover ?
"__msan_warning_with_origin"
907 :
"__msan_warning_with_origin_noreturn";
908 WarningFn =
M.getOrInsertFunction(WarningFnName,
910 IRB.getVoidTy(), IRB.getInt32Ty());
913 Recover ?
"__msan_warning" :
"__msan_warning_noreturn";
914 WarningFn =
M.getOrInsertFunction(WarningFnName, IRB.getVoidTy());
941 IRB.getIntPtrTy(
M.getDataLayout()));
945 unsigned AccessSize = 1 << AccessSizeIndex;
946 std::string FunctionName =
"__msan_maybe_warning_" +
itostr(AccessSize);
947 MaybeWarningFn[AccessSizeIndex] =
M.getOrInsertFunction(
949 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt32Ty());
950 MaybeWarningVarSizeFn =
M.getOrInsertFunction(
951 "__msan_maybe_warning_N", TLI.
getAttrList(
C, {},
false),
952 IRB.getVoidTy(), PtrTy, IRB.getInt64Ty(), IRB.getInt32Ty());
953 FunctionName =
"__msan_maybe_store_origin_" +
itostr(AccessSize);
954 MaybeStoreOriginFn[AccessSizeIndex] =
M.getOrInsertFunction(
956 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), PtrTy,
960 MsanSetAllocaOriginWithDescriptionFn =
961 M.getOrInsertFunction(
"__msan_set_alloca_origin_with_descr",
962 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy, PtrTy);
963 MsanSetAllocaOriginNoDescriptionFn =
964 M.getOrInsertFunction(
"__msan_set_alloca_origin_no_descr",
965 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
966 MsanPoisonStackFn =
M.getOrInsertFunction(
"__msan_poison_stack",
967 IRB.getVoidTy(), PtrTy, IntptrTy);
971void MemorySanitizer::initializeCallbacks(
Module &M,
974 if (CallbacksInitialized)
980 MsanChainOriginFn =
M.getOrInsertFunction(
981 "__msan_chain_origin",
984 MsanSetOriginFn =
M.getOrInsertFunction(
986 IRB.getVoidTy(), PtrTy, IntptrTy, IRB.getInt32Ty());
988 M.getOrInsertFunction(
"__msan_memmove", PtrTy, PtrTy, PtrTy, IntptrTy);
990 M.getOrInsertFunction(
"__msan_memcpy", PtrTy, PtrTy, PtrTy, IntptrTy);
991 MemsetFn =
M.getOrInsertFunction(
"__msan_memset",
993 PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
995 MsanInstrumentAsmStoreFn =
M.getOrInsertFunction(
996 "__msan_instrument_asm_store", IRB.getVoidTy(), PtrTy, IntptrTy);
999 createKernelApi(M, TLI);
1001 createUserspaceApi(M, TLI);
1003 CallbacksInitialized =
true;
1009 isStore ? MsanMetadataPtrForStore_1_8 : MsanMetadataPtrForLoad_1_8;
1027void MemorySanitizer::initializeModule(
Module &M) {
1028 auto &
DL =
M.getDataLayout();
1030 TargetTriple =
M.getTargetTriple();
1032 bool ShadowPassed =
ClShadowBase.getNumOccurrences() > 0;
1033 bool OriginPassed =
ClOriginBase.getNumOccurrences() > 0;
1035 if (ShadowPassed || OriginPassed) {
1040 MapParams = &CustomMapParams;
1042 switch (TargetTriple.getOS()) {
1044 switch (TargetTriple.getArch()) {
1059 switch (TargetTriple.getArch()) {
1068 switch (TargetTriple.getArch()) {
1102 C = &(
M.getContext());
1104 IntptrTy = IRB.getIntPtrTy(
DL);
1105 OriginTy = IRB.getInt32Ty();
1106 PtrTy = IRB.getPtrTy();
1111 if (!CompileKernel) {
1113 M.getOrInsertGlobal(
"__msan_track_origins", IRB.getInt32Ty(), [&] {
1114 return new GlobalVariable(
1115 M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
1116 IRB.getInt32(TrackOrigins),
"__msan_track_origins");
1120 M.getOrInsertGlobal(
"__msan_keep_going", IRB.getInt32Ty(), [&] {
1121 return new GlobalVariable(M, IRB.getInt32Ty(), true,
1122 GlobalValue::WeakODRLinkage,
1123 IRB.getInt32(Recover),
"__msan_keep_going");
1138struct VarArgHelper {
1139 virtual ~VarArgHelper() =
default;
1142 virtual void visitCallBase(CallBase &CB,
IRBuilder<> &IRB) = 0;
1145 virtual void visitVAStartInst(VAStartInst &
I) = 0;
1148 virtual void visitVACopyInst(VACopyInst &
I) = 0;
1154 virtual void finalizeInstrumentation() = 0;
1157struct MemorySanitizerVisitor;
1162 MemorySanitizerVisitor &Visitor);
1169 if (TypeSizeFixed <= 8)
1178class NextNodeIRBuilder :
public IRBuilder<> {
1191struct MemorySanitizerVisitor :
public InstVisitor<MemorySanitizerVisitor> {
1193 MemorySanitizer &MS;
1195 ValueMap<Value *, Value *> ShadowMap, OriginMap;
1196 std::unique_ptr<VarArgHelper> VAHelper;
1197 const TargetLibraryInfo *TLI;
1204 bool PropagateShadow;
1207 bool PoisonUndefVectors;
1209 struct ShadowOriginAndInsertPoint {
1214 ShadowOriginAndInsertPoint(
Value *S,
Value *O, Instruction *
I)
1215 : Shadow(S), Origin(
O), OrigIns(
I) {}
1218 DenseMap<const DILocation *, int> LazyWarningDebugLocationCount;
1219 SmallSetVector<AllocaInst *, 16> AllocaSet;
1222 int64_t SplittableBlocksCount = 0;
1224 MemorySanitizerVisitor(Function &
F, MemorySanitizer &MS,
1225 const TargetLibraryInfo &TLI)
1227 bool SanitizeFunction =
1229 InsertChecks = SanitizeFunction;
1230 PropagateShadow = SanitizeFunction;
1241 MS.initializeCallbacks(*
F.getParent(), TLI);
1243 IRBuilder<>(&
F.getEntryBlock(),
F.getEntryBlock().getFirstNonPHIIt())
1244 .CreateIntrinsic(Intrinsic::donothing, {});
1246 if (MS.CompileKernel) {
1248 insertKmsanPrologue(IRB);
1252 <<
"MemorySanitizer is not inserting checks into '"
1253 <<
F.getName() <<
"'\n");
1256 bool instrumentWithCalls(
Value *V) {
1260 ++SplittableBlocksCount;
1265 bool isInPrologue(Instruction &
I) {
1266 return I.getParent() == FnPrologueEnd->
getParent() &&
1275 if (MS.TrackOrigins <= 1)
1277 return IRB.
CreateCall(MS.MsanChainOriginFn, V);
1281 const DataLayout &
DL =
F.getDataLayout();
1282 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1292 TypeSize TS, Align Alignment) {
1293 const DataLayout &
DL =
F.getDataLayout();
1294 const Align IntptrAlignment =
DL.getABITypeAlign(MS.IntptrTy);
1295 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1307 auto [InsertPt,
Index] =
1319 Align CurrentAlignment = Alignment;
1320 if (Alignment >= IntptrAlignment && IntptrSize >
kOriginSize) {
1321 Value *IntptrOrigin = originToIntptr(IRB, Origin);
1323 for (
unsigned i = 0; i <
Size / IntptrSize; ++i) {
1328 CurrentAlignment = IntptrAlignment;
1341 Value *OriginPtr, Align Alignment) {
1342 const DataLayout &
DL =
F.getDataLayout();
1344 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
1346 Value *ConvertedShadow = convertShadowToScalar(Shadow, IRB);
1355 paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
1362 TypeSize TypeSizeInBits =
DL.getTypeSizeInBits(ConvertedShadow->
getType());
1364 if (instrumentWithCalls(ConvertedShadow) &&
1366 FunctionCallee Fn = MS.MaybeStoreOriginFn[SizeIndex];
1367 Value *ConvertedShadow2 =
1369 CallBase *CB = IRB.
CreateCall(Fn, {ConvertedShadow2, Addr, Origin});
1373 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1377 paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), OriginPtr, StoreSize,
1382 void materializeStores() {
1383 for (StoreInst *SI : StoreList) {
1385 Value *Val =
SI->getValueOperand();
1386 Value *Addr =
SI->getPointerOperand();
1387 Value *Shadow =
SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
1388 Value *ShadowPtr, *OriginPtr;
1390 const Align Alignment =
SI->getAlign();
1392 std::tie(ShadowPtr, OriginPtr) =
1393 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
true);
1395 [[maybe_unused]] StoreInst *NewSI =
1402 if (MS.TrackOrigins && !
SI->isAtomic())
1403 storeOrigin(IRB, Addr, Shadow, getOrigin(Val), OriginPtr,
1410 if (MS.TrackOrigins < 2)
1413 if (LazyWarningDebugLocationCount.
empty())
1414 for (
const auto &
I : InstrumentationList)
1415 ++LazyWarningDebugLocationCount[
I.OrigIns->getDebugLoc()];
1431 auto NewDebugLoc = OI->getDebugLoc();
1438 IRBOrigin.SetCurrentDebugLocation(NewDebugLoc);
1439 Origin = updateOrigin(Origin, IRBOrigin);
1444 if (MS.CompileKernel || MS.TrackOrigins)
1455 const DataLayout &
DL =
F.getDataLayout();
1456 TypeSize TypeSizeInBits =
DL.getTypeSizeInBits(ConvertedShadow->
getType());
1458 if (instrumentWithCalls(ConvertedShadow) && !MS.CompileKernel) {
1460 ConvertedShadow = convertShadowToScalar(ConvertedShadow, IRB);
1461 Value *ConvertedShadow2 =
1465 FunctionCallee Fn = MS.MaybeWarningFn[SizeIndex];
1469 MS.TrackOrigins && Origin ? Origin : (
Value *)IRB.
getInt32(0)});
1473 FunctionCallee Fn = MS.MaybeWarningVarSizeFn;
1476 unsigned ShadowSize =
DL.getTypeAllocSize(ConvertedShadow2->
getType());
1479 {ShadowAlloca, ConstantInt::get(IRB.
getInt64Ty(), ShadowSize),
1480 MS.TrackOrigins && Origin ? Origin : (
Value *)IRB.
getInt32(0)});
1485 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1488 !MS.Recover, MS.ColdCallWeights);
1491 insertWarningFn(IRB, Origin);
1496 void materializeInstructionChecks(
1498 const DataLayout &
DL =
F.getDataLayout();
1501 bool Combine = !MS.TrackOrigins;
1503 Value *Shadow =
nullptr;
1504 for (
const auto &ShadowData : InstructionChecks) {
1505 assert(ShadowData.OrigIns == Instruction);
1508 Value *ConvertedShadow = ShadowData.Shadow;
1517 insertWarningFn(IRB, ShadowData.Origin);
1527 materializeOneCheck(IRB, ConvertedShadow, ShadowData.Origin);
1532 Shadow = ConvertedShadow;
1536 Shadow = convertToBool(Shadow, IRB,
"_mscmp");
1537 ConvertedShadow = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1538 Shadow = IRB.
CreateOr(Shadow, ConvertedShadow,
"_msor");
1544 materializeOneCheck(IRB, Shadow,
nullptr);
1548 static bool isAArch64SVCount(
Type *Ty) {
1550 return TTy->
getName() ==
"aarch64.svcount";
1556 static bool isScalableNonVectorType(
Type *Ty) {
1557 if (!isAArch64SVCount(Ty))
1558 LLVM_DEBUG(
dbgs() <<
"isScalableNonVectorType: Unexpected type " << *Ty
1564 void materializeChecks() {
1567 SmallPtrSet<Instruction *, 16>
Done;
1570 for (
auto I = InstrumentationList.begin();
1571 I != InstrumentationList.end();) {
1572 auto OrigIns =
I->OrigIns;
1576 auto J = std::find_if(
I + 1, InstrumentationList.end(),
1577 [OrigIns](
const ShadowOriginAndInsertPoint &R) {
1578 return OrigIns != R.OrigIns;
1592 MS.ParamTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1593 {Zero, IRB.getInt32(0)},
"param_shadow");
1594 MS.RetvalTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1595 {Zero, IRB.getInt32(1)},
"retval_shadow");
1596 MS.VAArgTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1597 {Zero, IRB.getInt32(2)},
"va_arg_shadow");
1598 MS.VAArgOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1599 {Zero, IRB.getInt32(3)},
"va_arg_origin");
1600 MS.VAArgOverflowSizeTLS =
1601 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1602 {Zero, IRB.getInt32(4)},
"va_arg_overflow_size");
1603 MS.ParamOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1604 {Zero, IRB.getInt32(5)},
"param_origin");
1605 MS.RetvalOriginTLS =
1606 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1607 {Zero, IRB.getInt32(6)},
"retval_origin");
1609 MS.MsanMetadataAlloca = IRB.
CreateAlloca(MS.MsanMetadata, 0u);
1622 for (Instruction *
I : Instructions)
1626 for (PHINode *PN : ShadowPHINodes) {
1628 PHINode *PNO = MS.TrackOrigins ?
cast<PHINode>(getOrigin(PN)) : nullptr;
1629 size_t NumValues = PN->getNumIncomingValues();
1630 for (
size_t v = 0;
v < NumValues;
v++) {
1631 PNS->
addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
1633 PNO->
addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
1637 VAHelper->finalizeInstrumentation();
1642 for (
auto Item : LifetimeStartList) {
1643 instrumentAlloca(*Item.second, Item.first);
1644 AllocaSet.
remove(Item.second);
1649 for (AllocaInst *AI : AllocaSet)
1650 instrumentAlloca(*AI);
1653 materializeChecks();
1657 materializeStores();
1663 Type *getShadowTy(
Value *V) {
return getShadowTy(
V->getType()); }
1674 const DataLayout &
DL =
F.getDataLayout();
1676 uint32_t EltSize =
DL.getTypeSizeInBits(VT->getElementType());
1678 VT->getElementCount());
1681 return ArrayType::get(getShadowTy(AT->getElementType()),
1682 AT->getNumElements());
1686 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
1687 Elements.push_back(getShadowTy(
ST->getElementType(i)));
1689 LLVM_DEBUG(
dbgs() <<
"getShadowTy: " << *ST <<
" ===> " << *Res <<
"\n");
1692 if (isScalableNonVectorType(OrigTy)) {
1693 LLVM_DEBUG(
dbgs() <<
"getShadowTy: Scalable non-vector type: " << *OrigTy
1698 uint32_t TypeSize =
DL.getTypeSizeInBits(OrigTy);
1703 Value *collapseStructShadow(StructType *Struct,
Value *Shadow,
1708 for (
unsigned Idx = 0; Idx <
Struct->getNumElements(); Idx++) {
1711 Value *ShadowBool = convertToBool(ShadowItem, IRB);
1713 if (Aggregator != FalseVal)
1714 Aggregator = IRB.
CreateOr(Aggregator, ShadowBool);
1716 Aggregator = ShadowBool;
1723 Value *collapseArrayShadow(ArrayType *Array,
Value *Shadow,
1725 if (!
Array->getNumElements())
1729 Value *Aggregator = convertShadowToScalar(FirstItem, IRB);
1731 for (
unsigned Idx = 1; Idx <
Array->getNumElements(); Idx++) {
1733 Value *ShadowInner = convertShadowToScalar(ShadowItem, IRB);
1734 Aggregator = IRB.
CreateOr(Aggregator, ShadowInner);
1744 return collapseStructShadow(Struct, V, IRB);
1746 return collapseArrayShadow(Array, V, IRB);
1751 V->getType()->getPrimitiveSizeInBits().getFixedValue();
1759 Type *VTy =
V->getType();
1761 return convertToBool(convertShadowToScalar(V, IRB), IRB,
name);
1768 Type *ptrToIntPtrType(
Type *PtrTy)
const {
1770 return VectorType::get(ptrToIntPtrType(VectTy->getElementType()),
1771 VectTy->getElementCount());
1777 Type *getPtrToShadowPtrType(
Type *IntPtrTy,
Type *ShadowTy)
const {
1779 return VectorType::get(
1780 getPtrToShadowPtrType(VectTy->getElementType(), ShadowTy),
1781 VectTy->getElementCount());
1783 assert(IntPtrTy == MS.IntptrTy);
1790 VectTy->getElementCount(),
1791 constToIntPtr(VectTy->getElementType(),
C));
1793 assert(IntPtrTy == MS.IntptrTy);
1794 return ConstantInt::get(MS.IntptrTy,
C);
1807 Type *IntptrTy = ptrToIntPtrType(Addr->
getType());
1810 if (uint64_t AndMask = MS.MapParams->AndMask)
1811 OffsetLong = IRB.
CreateAnd(OffsetLong, constToIntPtr(IntptrTy, ~AndMask));
1813 if (uint64_t XorMask = MS.MapParams->XorMask)
1814 OffsetLong = IRB.
CreateXor(OffsetLong, constToIntPtr(IntptrTy, XorMask));
1826 std::pair<Value *, Value *>
1828 MaybeAlign Alignment) {
1833 assert(VectTy->getElementType()->isPointerTy());
1835 Type *IntptrTy = ptrToIntPtrType(Addr->
getType());
1836 Value *ShadowOffset = getShadowPtrOffset(Addr, IRB);
1837 Value *ShadowLong = ShadowOffset;
1838 if (uint64_t ShadowBase = MS.MapParams->ShadowBase) {
1840 IRB.
CreateAdd(ShadowLong, constToIntPtr(IntptrTy, ShadowBase));
1843 ShadowLong, getPtrToShadowPtrType(IntptrTy, ShadowTy));
1845 Value *OriginPtr =
nullptr;
1846 if (MS.TrackOrigins) {
1847 Value *OriginLong = ShadowOffset;
1848 uint64_t OriginBase = MS.MapParams->OriginBase;
1849 if (OriginBase != 0)
1851 IRB.
CreateAdd(OriginLong, constToIntPtr(IntptrTy, OriginBase));
1854 OriginLong = IRB.
CreateAnd(OriginLong, constToIntPtr(IntptrTy, ~Mask));
1857 OriginLong, getPtrToShadowPtrType(IntptrTy, MS.OriginTy));
1859 return std::make_pair(ShadowPtr, OriginPtr);
1862 template <
typename... ArgsTy>
1867 {MS.MsanMetadataAlloca, std::forward<ArgsTy>(Args)...});
1868 return IRB.
CreateLoad(MS.MsanMetadata, MS.MsanMetadataAlloca);
1871 return IRB.
CreateCall(Callee, {std::forward<ArgsTy>(Args)...});
1874 std::pair<Value *, Value *> getShadowOriginPtrKernelNoVec(
Value *Addr,
1878 Value *ShadowOriginPtrs;
1879 const DataLayout &
DL =
F.getDataLayout();
1880 TypeSize
Size =
DL.getTypeStoreSize(ShadowTy);
1882 FunctionCallee Getter = MS.getKmsanShadowOriginAccessFn(
isStore,
Size);
1885 ShadowOriginPtrs = createMetadataCall(IRB, Getter, AddrCast);
1887 Value *SizeVal = ConstantInt::get(MS.IntptrTy,
Size);
1888 ShadowOriginPtrs = createMetadataCall(
1890 isStore ? MS.MsanMetadataPtrForStoreN : MS.MsanMetadataPtrForLoadN,
1897 return std::make_pair(ShadowPtr, OriginPtr);
1903 std::pair<Value *, Value *> getShadowOriginPtrKernel(
Value *Addr,
1910 return getShadowOriginPtrKernelNoVec(Addr, IRB, ShadowTy,
isStore);
1915 Value *ShadowPtrs = ConstantInt::getNullValue(
1917 Value *OriginPtrs =
nullptr;
1918 if (MS.TrackOrigins)
1919 OriginPtrs = ConstantInt::getNullValue(
1921 for (
unsigned i = 0; i < NumElements; ++i) {
1924 auto [ShadowPtr, OriginPtr] =
1925 getShadowOriginPtrKernelNoVec(OneAddr, IRB, ShadowTy,
isStore);
1928 ShadowPtrs, ShadowPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1929 if (MS.TrackOrigins)
1931 OriginPtrs, OriginPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1933 return {ShadowPtrs, OriginPtrs};
1936 std::pair<Value *, Value *> getShadowOriginPtr(
Value *Addr,
IRBuilder<> &IRB,
1938 MaybeAlign Alignment,
1940 if (MS.CompileKernel)
1941 return getShadowOriginPtrKernel(Addr, IRB, ShadowTy,
isStore);
1942 return getShadowOriginPtrUserspace(Addr, IRB, ShadowTy, Alignment);
1950 ConstantInt::get(MS.IntptrTy, ArgOffset),
"_msarg");
1955 if (!MS.TrackOrigins)
1958 ConstantInt::get(MS.IntptrTy, ArgOffset),
1968 Value *getOriginPtrForRetval() {
1970 return MS.RetvalOriginTLS;
1975 assert(!ShadowMap.
count(V) &&
"Values may only have one shadow");
1976 ShadowMap[
V] = PropagateShadow ? SV : getCleanShadow(V);
1981 if (!MS.TrackOrigins)
1983 assert(!OriginMap.
count(V) &&
"Values may only have one origin");
1984 LLVM_DEBUG(
dbgs() <<
"ORIGIN: " << *V <<
" ==> " << *Origin <<
"\n");
1985 OriginMap[
V] = Origin;
1989 Type *ShadowTy = getShadowTy(OrigTy);
1999 Constant *getCleanShadow(
Value *V) {
return getCleanShadow(
V->getType()); }
2008 getPoisonedShadow(AT->getElementType()));
2013 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
2014 Vals.
push_back(getPoisonedShadow(
ST->getElementType(i)));
2022 Type *ShadowTy = getShadowTy(V);
2025 return getPoisonedShadow(ShadowTy);
2037 if (!PropagateShadow ||
I->getMetadata(LLVMContext::MD_nosanitize))
2038 return getCleanShadow(V);
2040 Value *Shadow = ShadowMap[
V];
2042 LLVM_DEBUG(
dbgs() <<
"No shadow: " << *V <<
"\n" << *(
I->getParent()));
2043 assert(Shadow &&
"No shadow for a value");
2050 Value *
AllOnes = (PropagateShadow && PoisonUndef) ? getPoisonedShadow(V)
2051 : getCleanShadow(V);
2057 Value *&ShadowPtr = ShadowMap[
V];
2062 unsigned ArgOffset = 0;
2063 const DataLayout &
DL =
F->getDataLayout();
2064 for (
auto &FArg :
F->args()) {
2065 if (!FArg.getType()->isSized() || FArg.getType()->isScalableTy()) {
2067 ?
"vscale not fully supported\n"
2068 :
"Arg is not sized\n"));
2070 ShadowPtr = getCleanShadow(V);
2071 setOrigin(
A, getCleanOrigin());
2077 unsigned Size = FArg.hasByValAttr()
2078 ?
DL.getTypeAllocSize(FArg.getParamByValType())
2079 :
DL.getTypeAllocSize(FArg.getType());
2083 if (FArg.hasByValAttr()) {
2087 const Align ArgAlign =
DL.getValueOrABITypeAlignment(
2088 FArg.getParamAlign(), FArg.getParamByValType());
2089 Value *CpShadowPtr, *CpOriginPtr;
2090 std::tie(CpShadowPtr, CpOriginPtr) =
2091 getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign,
2093 if (!PropagateShadow || Overflow) {
2095 EntryIRB.CreateMemSet(
2099 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2101 [[maybe_unused]]
Value *Cpy = EntryIRB.CreateMemCpy(
2102 CpShadowPtr, CopyAlign,
Base, CopyAlign,
Size);
2105 if (MS.TrackOrigins) {
2106 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2110 EntryIRB.CreateMemCpy(
2119 if (!PropagateShadow || Overflow || FArg.hasByValAttr() ||
2120 (MS.EagerChecks && FArg.hasAttribute(Attribute::NoUndef))) {
2121 ShadowPtr = getCleanShadow(V);
2122 setOrigin(
A, getCleanOrigin());
2125 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2126 ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg),
Base,
2128 if (MS.TrackOrigins) {
2129 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2130 setOrigin(
A, EntryIRB.CreateLoad(MS.OriginTy, OriginPtr));
2134 <<
" ARG: " << FArg <<
" ==> " << *ShadowPtr <<
"\n");
2140 assert(ShadowPtr &&
"Could not find shadow for an argument");
2147 cast<Constant>(V)->containsUndefOrPoisonElement() && PropagateShadow &&
2148 PoisonUndefVectors) {
2151 for (
unsigned i = 0; i != NumElems; ++i) {
2154 : getCleanShadow(Elem);
2158 LLVM_DEBUG(
dbgs() <<
"Partial undef constant vector: " << *V <<
" ==> "
2159 << *ShadowConstant <<
"\n");
2161 return ShadowConstant;
2167 return getCleanShadow(V);
2171 Value *getShadow(Instruction *
I,
int i) {
2172 return getShadow(
I->getOperand(i));
2177 if (!MS.TrackOrigins)
2180 return getCleanOrigin();
2182 "Unexpected value type in getOrigin()");
2184 if (
I->getMetadata(LLVMContext::MD_nosanitize))
2185 return getCleanOrigin();
2187 Value *Origin = OriginMap[
V];
2188 assert(Origin &&
"Missing origin");
2193 Value *getOrigin(Instruction *
I,
int i) {
2194 return getOrigin(
I->getOperand(i));
2201 void insertCheckShadow(
Value *Shadow,
Value *Origin, Instruction *OrigIns) {
2207 LLVM_DEBUG(
dbgs() <<
"Skipping check of " << *Shadow <<
" before "
2208 << *OrigIns <<
"\n");
2213 if (isScalableNonVectorType(ShadowTy)) {
2214 LLVM_DEBUG(
dbgs() <<
"Skipping check of scalable non-vector " << *Shadow
2215 <<
" before " << *OrigIns <<
"\n");
2221 "Can only insert checks for integer, vector, and aggregate shadow "
2224 InstrumentationList.push_back(
2225 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
2233 void insertCheckShadowOf(
Value *Val, Instruction *OrigIns) {
2235 Value *Shadow, *Origin;
2237 Shadow = getShadow(Val);
2240 Origin = getOrigin(Val);
2247 insertCheckShadow(Shadow, Origin, OrigIns);
2252 case AtomicOrdering::NotAtomic:
2253 return AtomicOrdering::NotAtomic;
2254 case AtomicOrdering::Unordered:
2255 case AtomicOrdering::Monotonic:
2256 case AtomicOrdering::Release:
2257 return AtomicOrdering::Release;
2258 case AtomicOrdering::Acquire:
2259 case AtomicOrdering::AcquireRelease:
2260 return AtomicOrdering::AcquireRelease;
2261 case AtomicOrdering::SequentiallyConsistent:
2262 return AtomicOrdering::SequentiallyConsistent;
2268 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2269 uint32_t OrderingTable[NumOrderings] = {};
2271 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2272 OrderingTable[(
int)AtomicOrderingCABI::release] =
2273 (int)AtomicOrderingCABI::release;
2274 OrderingTable[(int)AtomicOrderingCABI::consume] =
2275 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2276 OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
2277 (
int)AtomicOrderingCABI::acq_rel;
2278 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2279 (
int)AtomicOrderingCABI::seq_cst;
2286 case AtomicOrdering::NotAtomic:
2287 return AtomicOrdering::NotAtomic;
2288 case AtomicOrdering::Unordered:
2289 case AtomicOrdering::Monotonic:
2290 case AtomicOrdering::Acquire:
2291 return AtomicOrdering::Acquire;
2292 case AtomicOrdering::Release:
2293 case AtomicOrdering::AcquireRelease:
2294 return AtomicOrdering::AcquireRelease;
2295 case AtomicOrdering::SequentiallyConsistent:
2296 return AtomicOrdering::SequentiallyConsistent;
2302 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2303 uint32_t OrderingTable[NumOrderings] = {};
2305 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2306 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2307 OrderingTable[(int)AtomicOrderingCABI::consume] =
2308 (
int)AtomicOrderingCABI::acquire;
2309 OrderingTable[(int)AtomicOrderingCABI::release] =
2310 OrderingTable[(
int)AtomicOrderingCABI::acq_rel] =
2311 (int)AtomicOrderingCABI::acq_rel;
2312 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2313 (
int)AtomicOrderingCABI::seq_cst;
2319 using InstVisitor<MemorySanitizerVisitor>
::visit;
2320 void visit(Instruction &
I) {
2321 if (
I.getMetadata(LLVMContext::MD_nosanitize))
2324 if (isInPrologue(
I))
2329 setShadow(&
I, getCleanShadow(&
I));
2330 setOrigin(&
I, getCleanOrigin());
2341 void visitLoadInst(LoadInst &
I) {
2342 assert(
I.getType()->isSized() &&
"Load type must have size");
2343 assert(!
I.getMetadata(LLVMContext::MD_nosanitize));
2344 NextNodeIRBuilder IRB(&
I);
2345 Type *ShadowTy = getShadowTy(&
I);
2346 Value *Addr =
I.getPointerOperand();
2347 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
2348 const Align Alignment =
I.getAlign();
2349 if (PropagateShadow) {
2350 std::tie(ShadowPtr, OriginPtr) =
2351 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
false);
2355 setShadow(&
I, getCleanShadow(&
I));
2359 insertCheckShadowOf(
I.getPointerOperand(), &
I);
2364 if (MS.TrackOrigins) {
2365 if (PropagateShadow) {
2370 setOrigin(&
I, getCleanOrigin());
2379 void visitStoreInst(StoreInst &
I) {
2380 StoreList.push_back(&
I);
2382 insertCheckShadowOf(
I.getPointerOperand(), &
I);
2385 void handleCASOrRMW(Instruction &
I) {
2389 Value *Addr =
I.getOperand(0);
2390 Value *Val =
I.getOperand(1);
2391 Value *ShadowPtr = getShadowOriginPtr(Addr, IRB, getShadowTy(Val),
Align(1),
2396 insertCheckShadowOf(Addr, &
I);
2402 insertCheckShadowOf(Val, &
I);
2406 setShadow(&
I, getCleanShadow(&
I));
2407 setOrigin(&
I, getCleanOrigin());
2410 void visitAtomicRMWInst(AtomicRMWInst &
I) {
2415 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &
I) {
2421 void visitExtractElementInst(ExtractElementInst &
I) {
2422 insertCheckShadowOf(
I.getOperand(1), &
I);
2426 setOrigin(&
I, getOrigin(&
I, 0));
2429 void visitInsertElementInst(InsertElementInst &
I) {
2430 insertCheckShadowOf(
I.getOperand(2), &
I);
2432 auto *Shadow0 = getShadow(&
I, 0);
2433 auto *Shadow1 = getShadow(&
I, 1);
2436 setOriginForNaryOp(
I);
2439 void visitShuffleVectorInst(ShuffleVectorInst &
I) {
2441 auto *Shadow0 = getShadow(&
I, 0);
2442 auto *Shadow1 = getShadow(&
I, 1);
2445 setOriginForNaryOp(
I);
2449 void visitSExtInst(SExtInst &
I) {
2451 setShadow(&
I, IRB.
CreateSExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2452 setOrigin(&
I, getOrigin(&
I, 0));
2455 void visitZExtInst(ZExtInst &
I) {
2457 setShadow(&
I, IRB.
CreateZExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2458 setOrigin(&
I, getOrigin(&
I, 0));
2461 void visitTruncInst(TruncInst &
I) {
2463 setShadow(&
I, IRB.
CreateTrunc(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2464 setOrigin(&
I, getOrigin(&
I, 0));
2467 void visitBitCastInst(BitCastInst &
I) {
2472 if (CI->isMustTailCall())
2476 setOrigin(&
I, getOrigin(&
I, 0));
2479 void visitPtrToIntInst(PtrToIntInst &
I) {
2482 "_msprop_ptrtoint"));
2483 setOrigin(&
I, getOrigin(&
I, 0));
2486 void visitIntToPtrInst(IntToPtrInst &
I) {
2489 "_msprop_inttoptr"));
2490 setOrigin(&
I, getOrigin(&
I, 0));
2493 void visitFPToSIInst(CastInst &
I) { handleShadowOr(
I); }
2494 void visitFPToUIInst(CastInst &
I) { handleShadowOr(
I); }
2495 void visitSIToFPInst(CastInst &
I) { handleShadowOr(
I); }
2496 void visitUIToFPInst(CastInst &
I) { handleShadowOr(
I); }
2497 void visitFPExtInst(CastInst &
I) { handleShadowOr(
I); }
2498 void visitFPTruncInst(CastInst &
I) { handleShadowOr(
I); }
2505 void visitAnd(BinaryOperator &
I) {
2513 Value *S2 = getShadow(&
I, 1);
2514 Value *V1 =
I.getOperand(0);
2515 Value *V2 =
I.getOperand(1);
2523 setShadow(&
I, IRB.
CreateOr({S1S2, V1S2, S1V2}));
2524 setOriginForNaryOp(
I);
2527 void visitOr(BinaryOperator &
I) {
2540 Value *S2 = getShadow(&
I, 1);
2541 Value *V1 =
I.getOperand(0);
2542 Value *V2 =
I.getOperand(1);
2561 S = IRB.
CreateOr(S, DisjointOrShadow,
"_ms_disjoint");
2565 setOriginForNaryOp(
I);
2583 template <
bool CombineShadow>
class Combiner {
2584 Value *Shadow =
nullptr;
2585 Value *Origin =
nullptr;
2587 MemorySanitizerVisitor *MSV;
2590 Combiner(MemorySanitizerVisitor *MSV,
IRBuilder<> &IRB)
2591 : IRB(IRB), MSV(MSV) {}
2595 if (CombineShadow) {
2600 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
2601 Shadow = IRB.
CreateOr(Shadow, OpShadow,
"_msprop");
2605 if (MSV->MS.TrackOrigins) {
2612 if (!ConstOrigin || !ConstOrigin->
isNullValue()) {
2613 Value *
Cond = MSV->convertToBool(OpShadow, IRB);
2623 Value *OpShadow = MSV->getShadow(V);
2624 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) :
nullptr;
2625 return Add(OpShadow, OpOrigin);
2630 void Done(Instruction *
I) {
2631 if (CombineShadow) {
2633 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(
I));
2634 MSV->setShadow(
I, Shadow);
2636 if (MSV->MS.TrackOrigins) {
2638 MSV->setOrigin(
I, Origin);
2644 void DoneAndStoreOrigin(TypeSize TS,
Value *OriginPtr) {
2645 if (MSV->MS.TrackOrigins) {
2652 using ShadowAndOriginCombiner = Combiner<true>;
2653 using OriginCombiner = Combiner<false>;
2656 void setOriginForNaryOp(Instruction &
I) {
2657 if (!MS.TrackOrigins)
2660 OriginCombiner OC(
this, IRB);
2661 for (Use &
Op :
I.operands())
2666 size_t VectorOrPrimitiveTypeSizeInBits(
Type *Ty) {
2668 "Vector of pointers is not a valid shadow type");
2678 Type *srcTy =
V->getType();
2681 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
2682 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
2683 if (srcSizeInBits > 1 && dstSizeInBits == 1)
2701 Type *ShadowTy = getShadowTy(V);
2702 if (
V->getType() == ShadowTy)
2704 if (
V->getType()->isPtrOrPtrVectorTy())
2711 void handleShadowOr(Instruction &
I) {
2713 ShadowAndOriginCombiner SC(
this, IRB);
2714 for (Use &
Op :
I.operands())
2741 Value *horizontalReduce(IntrinsicInst &
I,
unsigned ReductionFactor,
2742 unsigned Shards,
Value *VectorA,
Value *VectorB) {
2747 [[maybe_unused]]
unsigned TotalNumElems = NumElems;
2753 assert(NumElems % (ReductionFactor * Shards) == 0);
2758 for (
unsigned i = 0; i < ReductionFactor; i++) {
2759 SmallVector<int, 16>
Mask;
2761 for (
unsigned j = 0;
j < Shards;
j++) {
2762 unsigned Offset = NumElems / Shards *
j;
2764 for (
unsigned X = 0;
X < NumElems / Shards;
X += ReductionFactor)
2768 for (
unsigned X = 0;
X < NumElems / Shards;
X += ReductionFactor)
2793 void handlePairwiseShadowOrIntrinsic(IntrinsicInst &
I,
unsigned Shards) {
2794 assert(
I.arg_size() == 1 ||
I.arg_size() == 2);
2796 assert(
I.getType()->isVectorTy());
2797 assert(
I.getArgOperand(0)->getType()->isVectorTy());
2799 [[maybe_unused]] FixedVectorType *ParamType =
2803 [[maybe_unused]] FixedVectorType *
ReturnType =
2811 Value *FirstArgShadow = getShadow(&
I, 0);
2812 Value *SecondArgShadow =
nullptr;
2813 if (
I.arg_size() == 2)
2814 SecondArgShadow = getShadow(&
I, 1);
2816 Value *OrShadow = horizontalReduce(
I, 2, Shards,
2817 FirstArgShadow, SecondArgShadow);
2819 OrShadow = CreateShadowCast(IRB, OrShadow, getShadowTy(&
I));
2821 setShadow(&
I, OrShadow);
2822 setOriginForNaryOp(
I);
2832 void handlePairwiseShadowOrIntrinsic(IntrinsicInst &
I,
unsigned Shards,
2833 int ReinterpretElemWidth) {
2834 assert(
I.arg_size() == 1 ||
I.arg_size() == 2);
2836 assert(
I.getType()->isVectorTy());
2837 assert(
I.getArgOperand(0)->getType()->isVectorTy());
2839 FixedVectorType *ParamType =
2844 [[maybe_unused]] FixedVectorType *
ReturnType =
2851 FixedVectorType *ReinterpretShadowTy =
nullptr;
2859 Value *FirstArgShadow = getShadow(&
I, 0);
2860 FirstArgShadow = IRB.
CreateBitCast(FirstArgShadow, ReinterpretShadowTy);
2870 Value *SecondArgShadow =
nullptr;
2871 if (
I.arg_size() == 2) {
2872 SecondArgShadow = getShadow(&
I, 1);
2873 SecondArgShadow = IRB.
CreateBitCast(SecondArgShadow, ReinterpretShadowTy);
2876 Value *OrShadow = horizontalReduce(
I, 2, Shards,
2877 FirstArgShadow, SecondArgShadow);
2879 OrShadow = CreateShadowCast(IRB, OrShadow, getShadowTy(&
I));
2881 setShadow(&
I, OrShadow);
2882 setOriginForNaryOp(
I);
2885 void visitFNeg(UnaryOperator &
I) { handleShadowOr(
I); }
2896 void handleMulByConstant(BinaryOperator &
I, Constant *ConstArg,
2902 Type *EltTy = VTy->getElementType();
2904 for (
unsigned Idx = 0; Idx < NumElements; ++Idx) {
2905 if (ConstantInt *Elt =
2907 const APInt &
V = Elt->getValue();
2908 APInt V2 = APInt(
V.getBitWidth(), 1) <<
V.countr_zero();
2909 Elements.push_back(ConstantInt::get(EltTy, V2));
2911 Elements.push_back(ConstantInt::get(EltTy, 1));
2917 const APInt &
V = Elt->getValue();
2918 APInt V2 = APInt(
V.getBitWidth(), 1) <<
V.countr_zero();
2919 ShadowMul = ConstantInt::get(Ty, V2);
2921 ShadowMul = ConstantInt::get(Ty, 1);
2927 IRB.
CreateMul(getShadow(OtherArg), ShadowMul,
"msprop_mul_cst"));
2928 setOrigin(&
I, getOrigin(OtherArg));
2931 void visitMul(BinaryOperator &
I) {
2934 if (constOp0 && !constOp1)
2935 handleMulByConstant(
I, constOp0,
I.getOperand(1));
2936 else if (constOp1 && !constOp0)
2937 handleMulByConstant(
I, constOp1,
I.getOperand(0));
2942 void visitFAdd(BinaryOperator &
I) { handleShadowOr(
I); }
2943 void visitFSub(BinaryOperator &
I) { handleShadowOr(
I); }
2944 void visitFMul(BinaryOperator &
I) { handleShadowOr(
I); }
2945 void visitAdd(BinaryOperator &
I) { handleShadowOr(
I); }
2946 void visitSub(BinaryOperator &
I) { handleShadowOr(
I); }
2947 void visitXor(BinaryOperator &
I) { handleShadowOr(
I); }
2949 void handleIntegerDiv(Instruction &
I) {
2952 insertCheckShadowOf(
I.getOperand(1), &
I);
2953 setShadow(&
I, getShadow(&
I, 0));
2954 setOrigin(&
I, getOrigin(&
I, 0));
2957 void visitUDiv(BinaryOperator &
I) { handleIntegerDiv(
I); }
2958 void visitSDiv(BinaryOperator &
I) { handleIntegerDiv(
I); }
2959 void visitURem(BinaryOperator &
I) { handleIntegerDiv(
I); }
2960 void visitSRem(BinaryOperator &
I) { handleIntegerDiv(
I); }
2964 void visitFDiv(BinaryOperator &
I) { handleShadowOr(
I); }
2965 void visitFRem(BinaryOperator &
I) { handleShadowOr(
I); }
2971 void handleEqualityComparison(ICmpInst &
I) {
2975 Value *Sa = getShadow(
A);
2976 Value *Sb = getShadow(
B);
3002 setOriginForNaryOp(
I);
3010 void handleRelationalComparisonExact(ICmpInst &
I) {
3014 Value *Sa = getShadow(
A);
3015 Value *Sb = getShadow(
B);
3026 bool IsSigned =
I.isSigned();
3028 auto GetMinMaxUnsigned = [&](
Value *
V,
Value *S) {
3038 V = IRB.
CreateXor(V, ConstantInt::get(
V->getType(), MinVal));
3043 return std::make_pair(Min, Max);
3046 auto [Amin, Amax] = GetMinMaxUnsigned(
A, Sa);
3047 auto [Bmin, Bmax] = GetMinMaxUnsigned(
B, Sb);
3053 setOriginForNaryOp(
I);
3060 void handleSignedRelationalComparison(ICmpInst &
I) {
3065 op =
I.getOperand(0);
3066 pre =
I.getPredicate();
3068 op =
I.getOperand(1);
3069 pre =
I.getSwappedPredicate();
3082 setShadow(&
I, Shadow);
3083 setOrigin(&
I, getOrigin(
op));
3089 void visitICmpInst(ICmpInst &
I) {
3094 if (
I.isEquality()) {
3095 handleEqualityComparison(
I);
3101 handleRelationalComparisonExact(
I);
3105 handleSignedRelationalComparison(
I);
3111 handleRelationalComparisonExact(
I);
3118 void visitFCmpInst(FCmpInst &
I) { handleShadowOr(
I); }
3120 void handleShift(BinaryOperator &
I) {
3125 Value *S2 = getShadow(&
I, 1);
3128 Value *V2 =
I.getOperand(1);
3130 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3131 setOriginForNaryOp(
I);
3134 void visitShl(BinaryOperator &
I) { handleShift(
I); }
3135 void visitAShr(BinaryOperator &
I) { handleShift(
I); }
3136 void visitLShr(BinaryOperator &
I) { handleShift(
I); }
3138 void handleFunnelShift(IntrinsicInst &
I) {
3142 Value *S0 = getShadow(&
I, 0);
3144 Value *S2 = getShadow(&
I, 2);
3147 Value *V2 =
I.getOperand(2);
3150 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3151 setOriginForNaryOp(
I);
3164 void visitMemMoveInst(MemMoveInst &
I) {
3165 getShadow(
I.getArgOperand(1));
3168 {I.getArgOperand(0), I.getArgOperand(1),
3169 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3187 void visitMemCpyInst(MemCpyInst &
I) {
3188 getShadow(
I.getArgOperand(1));
3191 {I.getArgOperand(0), I.getArgOperand(1),
3192 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3197 void visitMemSetInst(MemSetInst &
I) {
3201 {I.getArgOperand(0),
3202 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
3203 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3207 void visitVAStartInst(VAStartInst &
I) { VAHelper->visitVAStartInst(
I); }
3209 void visitVACopyInst(VACopyInst &
I) { VAHelper->visitVACopyInst(
I); }
3215 bool handleVectorStoreIntrinsic(IntrinsicInst &
I) {
3219 Value *Addr =
I.getArgOperand(0);
3220 Value *Shadow = getShadow(&
I, 1);
3221 Value *ShadowPtr, *OriginPtr;
3225 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
3230 insertCheckShadowOf(Addr, &
I);
3233 if (MS.TrackOrigins)
3242 bool handleVectorLoadIntrinsic(IntrinsicInst &
I) {
3246 Value *Addr =
I.getArgOperand(0);
3248 Type *ShadowTy = getShadowTy(&
I);
3249 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
3250 if (PropagateShadow) {
3254 std::tie(ShadowPtr, OriginPtr) =
3255 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
false);
3259 setShadow(&
I, getCleanShadow(&
I));
3263 insertCheckShadowOf(Addr, &
I);
3265 if (MS.TrackOrigins) {
3266 if (PropagateShadow)
3267 setOrigin(&
I, IRB.
CreateLoad(MS.OriginTy, OriginPtr));
3269 setOrigin(&
I, getCleanOrigin());
3289 [[maybe_unused]]
bool
3290 maybeHandleSimpleNomemIntrinsic(IntrinsicInst &
I,
3291 unsigned int trailingFlags) {
3292 Type *RetTy =
I.getType();
3296 unsigned NumArgOperands =
I.arg_size();
3297 assert(NumArgOperands >= trailingFlags);
3298 for (
unsigned i = 0; i < NumArgOperands - trailingFlags; ++i) {
3299 Type *Ty =
I.getArgOperand(i)->getType();
3305 ShadowAndOriginCombiner SC(
this, IRB);
3306 for (
unsigned i = 0; i < NumArgOperands; ++i)
3307 SC.Add(
I.getArgOperand(i));
3324 bool maybeHandleUnknownIntrinsicUnlogged(IntrinsicInst &
I) {
3325 unsigned NumArgOperands =
I.arg_size();
3326 if (NumArgOperands == 0)
3329 if (NumArgOperands == 2 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
3330 I.getArgOperand(1)->getType()->isVectorTy() &&
3331 I.getType()->isVoidTy() && !
I.onlyReadsMemory()) {
3333 return handleVectorStoreIntrinsic(
I);
3336 if (NumArgOperands == 1 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
3337 I.getType()->isVectorTy() &&
I.onlyReadsMemory()) {
3339 return handleVectorLoadIntrinsic(
I);
3342 if (
I.doesNotAccessMemory())
3343 if (maybeHandleSimpleNomemIntrinsic(
I, 0))
3351 bool maybeHandleUnknownIntrinsic(IntrinsicInst &
I) {
3352 if (maybeHandleUnknownIntrinsicUnlogged(
I)) {
3356 LLVM_DEBUG(
dbgs() <<
"UNKNOWN INSTRUCTION HANDLED HEURISTICALLY: " <<
I
3363 void handleInvariantGroup(IntrinsicInst &
I) {
3364 setShadow(&
I, getShadow(&
I, 0));
3365 setOrigin(&
I, getOrigin(&
I, 0));
3368 void handleLifetimeStart(IntrinsicInst &
I) {
3373 LifetimeStartList.push_back(std::make_pair(&
I, AI));
3376 void handleBswap(IntrinsicInst &
I) {
3379 Type *OpType =
Op->getType();
3382 setOrigin(&
I, getOrigin(
Op));
3403 void handleCountLeadingTrailingZeros(IntrinsicInst &
I) {
3405 Value *Src =
I.getArgOperand(0);
3406 Value *SrcShadow = getShadow(Src);
3410 I.getType(),
I.getIntrinsicID(), {Src, False});
3412 I.getType(),
I.getIntrinsicID(), {SrcShadow, False});
3415 ConcreteZerosCount, ShadowZerosCount,
"_mscz_cmp_zeros");
3417 Value *NotAllZeroShadow =
3419 Value *OutputShadow =
3420 IRB.
CreateAnd(CompareConcreteZeros, NotAllZeroShadow,
"_mscz_main");
3426 OutputShadow = IRB.
CreateOr(OutputShadow, BoolZeroPoison,
"_mscz_bs");
3429 OutputShadow = IRB.
CreateSExt(OutputShadow, getShadowTy(Src),
"_mscz_os");
3431 setShadow(&
I, OutputShadow);
3432 setOriginForNaryOp(
I);
3442 void handleNEONVectorConvertIntrinsic(IntrinsicInst &
I) {
3446 Value *S0 = getShadow(&
I, 0);
3455 setShadow(&
I, OutShadow);
3456 setOriginForNaryOp(
I);
3465 FixedVectorType *maybeShrinkVectorShadowType(
Value *Src, IntrinsicInst &
I) {
3485 Value *maybeExtendVectorShadowWithZeros(
Value *Shadow, IntrinsicInst &
I) {
3490 Value *FullShadow = getCleanShadow(&
I);
3491 unsigned ShadowNumElems =
3493 unsigned FullShadowNumElems =
3496 assert((ShadowNumElems == FullShadowNumElems) ||
3497 (ShadowNumElems * 2 == FullShadowNumElems));
3499 if (ShadowNumElems == FullShadowNumElems) {
3500 FullShadow = Shadow;
3504 std::iota(ShadowMask.begin(), ShadowMask.end(), 0);
3529 void handleSSEVectorConvertIntrinsicByProp(IntrinsicInst &
I,
3530 bool HasRoundingMode) {
3531 if (HasRoundingMode) {
3539 Value *Src =
I.getArgOperand(0);
3540 assert(Src->getType()->isVectorTy());
3544 VectorType *ShadowType = maybeShrinkVectorShadowType(Src,
I);
3547 Value *S0 = getShadow(&
I, 0);
3559 Value *FullShadow = maybeExtendVectorShadowWithZeros(Shadow,
I);
3561 setShadow(&
I, FullShadow);
3562 setOriginForNaryOp(
I);
3583 void handleSSEVectorConvertIntrinsic(IntrinsicInst &
I,
int NumUsedElements,
3584 bool HasRoundingMode =
false) {
3586 Value *CopyOp, *ConvertOp;
3588 assert((!HasRoundingMode ||
3590 "Invalid rounding mode");
3592 switch (
I.arg_size() - HasRoundingMode) {
3594 CopyOp =
I.getArgOperand(0);
3595 ConvertOp =
I.getArgOperand(1);
3598 ConvertOp =
I.getArgOperand(0);
3612 Value *ConvertShadow = getShadow(ConvertOp);
3613 Value *AggShadow =
nullptr;
3616 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), 0));
3617 for (
int i = 1; i < NumUsedElements; ++i) {
3619 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), i));
3620 AggShadow = IRB.
CreateOr(AggShadow, MoreShadow);
3623 AggShadow = ConvertShadow;
3626 insertCheckShadow(AggShadow, getOrigin(ConvertOp), &
I);
3633 Value *ResultShadow = getShadow(CopyOp);
3635 for (
int i = 0; i < NumUsedElements; ++i) {
3637 ResultShadow, ConstantInt::getNullValue(EltTy),
3640 setShadow(&
I, ResultShadow);
3641 setOrigin(&
I, getOrigin(CopyOp));
3643 setShadow(&
I, getCleanShadow(&
I));
3644 setOrigin(&
I, getCleanOrigin());
3652 S = CreateShadowCast(IRB, S, IRB.
getInt64Ty(),
true);
3655 return CreateShadowCast(IRB, S2,
T,
true);
3663 return CreateShadowCast(IRB, S2,
T,
true);
3680 void handleVectorShiftIntrinsic(IntrinsicInst &
I,
bool Variable) {
3686 Value *S2 = getShadow(&
I, 1);
3688 : Lower64ShadowExtend(IRB, S2, getShadowTy(&
I));
3689 Value *V1 =
I.getOperand(0);
3690 Value *V2 =
I.getOperand(1);
3692 {IRB.CreateBitCast(S1, V1->getType()), V2});
3694 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3695 setOriginForNaryOp(
I);
3700 Type *getMMXVectorTy(
unsigned EltSizeInBits,
3701 unsigned X86_MMXSizeInBits = 64) {
3702 assert(EltSizeInBits != 0 && (X86_MMXSizeInBits % EltSizeInBits) == 0 &&
3703 "Illegal MMX vector element size");
3705 X86_MMXSizeInBits / EltSizeInBits);
3712 case Intrinsic::x86_sse2_packsswb_128:
3713 case Intrinsic::x86_sse2_packuswb_128:
3714 return Intrinsic::x86_sse2_packsswb_128;
3716 case Intrinsic::x86_sse2_packssdw_128:
3717 case Intrinsic::x86_sse41_packusdw:
3718 return Intrinsic::x86_sse2_packssdw_128;
3720 case Intrinsic::x86_avx2_packsswb:
3721 case Intrinsic::x86_avx2_packuswb:
3722 return Intrinsic::x86_avx2_packsswb;
3724 case Intrinsic::x86_avx2_packssdw:
3725 case Intrinsic::x86_avx2_packusdw:
3726 return Intrinsic::x86_avx2_packssdw;
3728 case Intrinsic::x86_mmx_packsswb:
3729 case Intrinsic::x86_mmx_packuswb:
3730 return Intrinsic::x86_mmx_packsswb;
3732 case Intrinsic::x86_mmx_packssdw:
3733 return Intrinsic::x86_mmx_packssdw;
3735 case Intrinsic::x86_avx512_packssdw_512:
3736 case Intrinsic::x86_avx512_packusdw_512:
3737 return Intrinsic::x86_avx512_packssdw_512;
3739 case Intrinsic::x86_avx512_packsswb_512:
3740 case Intrinsic::x86_avx512_packuswb_512:
3741 return Intrinsic::x86_avx512_packsswb_512;
3757 void handleVectorPackIntrinsic(IntrinsicInst &
I,
3758 unsigned MMXEltSizeInBits = 0) {
3762 Value *S2 = getShadow(&
I, 1);
3763 assert(
S1->getType()->isVectorTy());
3769 MMXEltSizeInBits ? getMMXVectorTy(MMXEltSizeInBits) :
S1->
getType();
3770 if (MMXEltSizeInBits) {
3778 if (MMXEltSizeInBits) {
3784 {S1_ext, S2_ext},
nullptr,
3785 "_msprop_vector_pack");
3786 if (MMXEltSizeInBits)
3789 setOriginForNaryOp(
I);
3793 Constant *createDppMask(
unsigned Width,
unsigned Mask) {
3806 const unsigned Width =
3813 Value *DstMaskV = createDppMask(Width, DstMask);
3830 void handleDppIntrinsic(IntrinsicInst &
I) {
3833 Value *S0 = getShadow(&
I, 0);
3837 const unsigned Width =
3839 assert(Width == 2 || Width == 4 || Width == 8);
3842 const unsigned SrcMask =
Mask >> 4;
3843 const unsigned DstMask =
Mask & 0xf;
3846 Value *SI1 = findDppPoisonedOutput(IRB, S, SrcMask, DstMask);
3851 SI1, findDppPoisonedOutput(IRB, S, SrcMask << 4, DstMask << 4));
3858 setOriginForNaryOp(
I);
3862 C = CreateAppToShadowCast(IRB,
C);
3871 void handleBlendvIntrinsic(IntrinsicInst &
I) {
3876 Value *Sc = getShadow(&
I, 2);
3877 Value *Oc = MS.TrackOrigins ? getOrigin(
C) : nullptr;
3882 C = convertBlendvToSelectMask(IRB,
C);
3883 Sc = convertBlendvToSelectMask(IRB, Sc);
3889 handleSelectLikeInst(
I,
C,
T,
F);
3893 void handleVectorSadIntrinsic(IntrinsicInst &
I,
bool IsMMX =
false) {
3894 const unsigned SignificantBitsPerResultElement = 16;
3896 unsigned ZeroBitsPerResultElement =
3900 auto *Shadow0 = getShadow(&
I, 0);
3901 auto *Shadow1 = getShadow(&
I, 1);
3906 S = IRB.
CreateLShr(S, ZeroBitsPerResultElement);
3909 setOriginForNaryOp(
I);
3931 void handleVectorPmaddIntrinsic(IntrinsicInst &
I,
unsigned ReductionFactor,
3933 unsigned EltSizeInBits = 0) {
3936 [[maybe_unused]] FixedVectorType *
ReturnType =
3941 Value *Va =
nullptr;
3942 Value *Vb =
nullptr;
3943 Value *Sa =
nullptr;
3944 Value *Sb =
nullptr;
3946 assert(
I.arg_size() == 2 ||
I.arg_size() == 3);
3947 if (
I.arg_size() == 2) {
3948 Va =
I.getOperand(0);
3949 Vb =
I.getOperand(1);
3951 Sa = getShadow(&
I, 0);
3952 Sb = getShadow(&
I, 1);
3953 }
else if (
I.arg_size() == 3) {
3955 Va =
I.getOperand(1);
3956 Vb =
I.getOperand(2);
3958 Sa = getShadow(&
I, 1);
3959 Sb = getShadow(&
I, 2);
3968 if (
I.arg_size() == 3) {
3969 [[maybe_unused]]
auto *AccumulatorType =
3971 assert(AccumulatorType == ReturnType);
3974 FixedVectorType *ImplicitReturnType =
3977 if (EltSizeInBits) {
3979 getMMXVectorTy(EltSizeInBits * ReductionFactor,
3991 ReturnType->getNumElements() * ReductionFactor);
4013 VaInt = CreateAppToShadowCast(IRB, Va);
4014 VbInt = CreateAppToShadowCast(IRB, Vb);
4024 And = IRB.
CreateOr({SaAndSbNonZero, VaAndSbNonZero, SaAndVbNonZero});
4046 ImplicitReturnType);
4051 OutShadow = CreateShadowCast(IRB, OutShadow, getShadowTy(&
I));
4054 if (
I.arg_size() == 3)
4055 OutShadow = IRB.
CreateOr(OutShadow, getShadow(&
I, 0));
4057 setShadow(&
I, OutShadow);
4058 setOriginForNaryOp(
I);
4064 void handleVectorComparePackedIntrinsic(IntrinsicInst &
I) {
4066 Type *ResTy = getShadowTy(&
I);
4067 auto *Shadow0 = getShadow(&
I, 0);
4068 auto *Shadow1 = getShadow(&
I, 1);
4073 setOriginForNaryOp(
I);
4079 void handleVectorCompareScalarIntrinsic(IntrinsicInst &
I) {
4081 auto *Shadow0 = getShadow(&
I, 0);
4082 auto *Shadow1 = getShadow(&
I, 1);
4084 Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&
I));
4086 setOriginForNaryOp(
I);
4095 void handleVectorReduceIntrinsic(IntrinsicInst &
I,
bool AllowShadowCast) {
4100 if (AllowShadowCast)
4101 S = CreateShadowCast(IRB, S, getShadowTy(&
I));
4105 setOriginForNaryOp(
I);
4115 void handleVectorReduceWithStarterIntrinsic(IntrinsicInst &
I) {
4119 Value *Shadow0 = getShadow(&
I, 0);
4125 setOriginForNaryOp(
I);
4131 void handleVectorReduceOrIntrinsic(IntrinsicInst &
I) {
4135 Value *OperandShadow = getShadow(&
I, 0);
4137 Value *OperandUnsetOrPoison = IRB.
CreateOr(OperandUnsetBits, OperandShadow);
4145 setOrigin(&
I, getOrigin(&
I, 0));
4151 void handleVectorReduceAndIntrinsic(IntrinsicInst &
I) {
4155 Value *OperandShadow = getShadow(&
I, 0);
4156 Value *OperandSetOrPoison = IRB.
CreateOr(
I.getOperand(0), OperandShadow);
4164 setOrigin(&
I, getOrigin(&
I, 0));
4167 void handleStmxcsr(IntrinsicInst &
I) {
4169 Value *Addr =
I.getArgOperand(0);
4172 getShadowOriginPtr(Addr, IRB, Ty,
Align(1),
true).first;
4177 insertCheckShadowOf(Addr, &
I);
4180 void handleLdmxcsr(IntrinsicInst &
I) {
4185 Value *Addr =
I.getArgOperand(0);
4188 Value *ShadowPtr, *OriginPtr;
4189 std::tie(ShadowPtr, OriginPtr) =
4190 getShadowOriginPtr(Addr, IRB, Ty, Alignment,
false);
4193 insertCheckShadowOf(Addr, &
I);
4196 Value *Origin = MS.TrackOrigins ? IRB.
CreateLoad(MS.OriginTy, OriginPtr)
4198 insertCheckShadow(Shadow, Origin, &
I);
4201 void handleMaskedExpandLoad(IntrinsicInst &
I) {
4203 Value *Ptr =
I.getArgOperand(0);
4204 MaybeAlign
Align =
I.getParamAlign(0);
4206 Value *PassThru =
I.getArgOperand(2);
4209 insertCheckShadowOf(Ptr, &
I);
4210 insertCheckShadowOf(Mask, &
I);
4213 if (!PropagateShadow) {
4214 setShadow(&
I, getCleanShadow(&
I));
4215 setOrigin(&
I, getCleanOrigin());
4219 Type *ShadowTy = getShadowTy(&
I);
4221 auto [ShadowPtr, OriginPtr] =
4222 getShadowOriginPtr(Ptr, IRB, ElementShadowTy, Align,
false);
4226 getShadow(PassThru),
"_msmaskedexpload");
4228 setShadow(&
I, Shadow);
4231 setOrigin(&
I, getCleanOrigin());
4234 void handleMaskedCompressStore(IntrinsicInst &
I) {
4236 Value *Values =
I.getArgOperand(0);
4237 Value *Ptr =
I.getArgOperand(1);
4238 MaybeAlign
Align =
I.getParamAlign(1);
4242 insertCheckShadowOf(Ptr, &
I);
4243 insertCheckShadowOf(Mask, &
I);
4246 Value *Shadow = getShadow(Values);
4247 Type *ElementShadowTy =
4249 auto [ShadowPtr, OriginPtrs] =
4250 getShadowOriginPtr(Ptr, IRB, ElementShadowTy, Align,
true);
4257 void handleMaskedGather(IntrinsicInst &
I) {
4259 Value *Ptrs =
I.getArgOperand(0);
4260 const Align Alignment =
I.getParamAlign(0).valueOrOne();
4262 Value *PassThru =
I.getArgOperand(2);
4264 Type *PtrsShadowTy = getShadowTy(Ptrs);
4266 insertCheckShadowOf(Mask, &
I);
4270 insertCheckShadow(MaskedPtrShadow, getOrigin(Ptrs), &
I);
4273 if (!PropagateShadow) {
4274 setShadow(&
I, getCleanShadow(&
I));
4275 setOrigin(&
I, getCleanOrigin());
4279 Type *ShadowTy = getShadowTy(&
I);
4281 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
4282 Ptrs, IRB, ElementShadowTy, Alignment,
false);
4286 getShadow(PassThru),
"_msmaskedgather");
4288 setShadow(&
I, Shadow);
4291 setOrigin(&
I, getCleanOrigin());
4294 void handleMaskedScatter(IntrinsicInst &
I) {
4296 Value *Values =
I.getArgOperand(0);
4297 Value *Ptrs =
I.getArgOperand(1);
4298 const Align Alignment =
I.getParamAlign(1).valueOrOne();
4301 Type *PtrsShadowTy = getShadowTy(Ptrs);
4303 insertCheckShadowOf(Mask, &
I);
4307 insertCheckShadow(MaskedPtrShadow, getOrigin(Ptrs), &
I);
4310 Value *Shadow = getShadow(Values);
4311 Type *ElementShadowTy =
4313 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
4314 Ptrs, IRB, ElementShadowTy, Alignment,
true);
4325 void handleMaskedStore(IntrinsicInst &
I) {
4327 Value *
V =
I.getArgOperand(0);
4328 Value *Ptr =
I.getArgOperand(1);
4329 const Align Alignment =
I.getParamAlign(1).valueOrOne();
4331 Value *Shadow = getShadow(V);
4334 insertCheckShadowOf(Ptr, &
I);
4335 insertCheckShadowOf(Mask, &
I);
4340 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
4341 Ptr, IRB, Shadow->
getType(), Alignment,
true);
4345 if (!MS.TrackOrigins)
4348 auto &
DL =
F.getDataLayout();
4349 paintOrigin(IRB, getOrigin(V), OriginPtr,
4358 void handleMaskedLoad(IntrinsicInst &
I) {
4360 Value *Ptr =
I.getArgOperand(0);
4361 const Align Alignment =
I.getParamAlign(0).valueOrOne();
4363 Value *PassThru =
I.getArgOperand(2);
4366 insertCheckShadowOf(Ptr, &
I);
4367 insertCheckShadowOf(Mask, &
I);
4370 if (!PropagateShadow) {
4371 setShadow(&
I, getCleanShadow(&
I));
4372 setOrigin(&
I, getCleanOrigin());
4376 Type *ShadowTy = getShadowTy(&
I);
4377 Value *ShadowPtr, *OriginPtr;
4378 std::tie(ShadowPtr, OriginPtr) =
4379 getShadowOriginPtr(Ptr, IRB, ShadowTy, Alignment,
false);
4381 getShadow(PassThru),
"_msmaskedld"));
4383 if (!MS.TrackOrigins)
4390 Value *NotNull = convertToBool(MaskedPassThruShadow, IRB,
"_mscmp");
4395 setOrigin(&
I, Origin);
4411 void handleAVXMaskedStore(IntrinsicInst &
I) {
4416 Value *Dst =
I.getArgOperand(0);
4417 assert(Dst->getType()->isPointerTy() &&
"Destination is not a pointer!");
4422 Value *Src =
I.getArgOperand(2);
4427 Value *SrcShadow = getShadow(Src);
4430 insertCheckShadowOf(Dst, &
I);
4431 insertCheckShadowOf(Mask, &
I);
4434 Value *DstShadowPtr;
4435 Value *DstOriginPtr;
4436 std::tie(DstShadowPtr, DstOriginPtr) = getShadowOriginPtr(
4437 Dst, IRB, SrcShadow->
getType(), Alignment,
true);
4439 SmallVector<Value *, 2> ShadowArgs;
4440 ShadowArgs.
append(1, DstShadowPtr);
4441 ShadowArgs.
append(1, Mask);
4452 if (!MS.TrackOrigins)
4456 auto &
DL =
F.getDataLayout();
4457 paintOrigin(IRB, getOrigin(Src), DstOriginPtr,
4458 DL.getTypeStoreSize(SrcShadow->
getType()),
4477 void handleAVXMaskedLoad(IntrinsicInst &
I) {
4482 Value *Src =
I.getArgOperand(0);
4483 assert(Src->getType()->isPointerTy() &&
"Source is not a pointer!");
4491 insertCheckShadowOf(Mask, &
I);
4494 Type *SrcShadowTy = getShadowTy(Src);
4495 Value *SrcShadowPtr, *SrcOriginPtr;
4496 std::tie(SrcShadowPtr, SrcOriginPtr) =
4497 getShadowOriginPtr(Src, IRB, SrcShadowTy, Alignment,
false);
4499 SmallVector<Value *, 2> ShadowArgs;
4500 ShadowArgs.
append(1, SrcShadowPtr);
4501 ShadowArgs.
append(1, Mask);
4510 if (!MS.TrackOrigins)
4517 setOrigin(&
I, PtrSrcOrigin);
4526 assert(isFixedIntVector(Idx));
4527 auto IdxVectorSize =
4535 auto *IdxShadow = getShadow(Idx);
4540 insertCheckShadow(Truncated, getOrigin(Idx),
I);
4545 void handleAVXVpermilvar(IntrinsicInst &
I) {
4547 Value *Shadow = getShadow(&
I, 0);
4548 maskedCheckAVXIndexShadow(IRB,
I.getArgOperand(1), &
I);
4552 Shadow = IRB.
CreateBitCast(Shadow,
I.getArgOperand(0)->getType());
4554 {Shadow, I.getArgOperand(1)});
4557 setOriginForNaryOp(
I);
4562 void handleAVXVpermi2var(IntrinsicInst &
I) {
4567 [[maybe_unused]]
auto ArgVectorSize =
4570 ->getNumElements() == ArgVectorSize);
4572 ->getNumElements() == ArgVectorSize);
4573 assert(
I.getArgOperand(0)->getType() ==
I.getArgOperand(2)->getType());
4574 assert(
I.getType() ==
I.getArgOperand(0)->getType());
4575 assert(
I.getArgOperand(1)->getType()->isIntOrIntVectorTy());
4577 Value *AShadow = getShadow(&
I, 0);
4578 Value *Idx =
I.getArgOperand(1);
4579 Value *BShadow = getShadow(&
I, 2);
4581 maskedCheckAVXIndexShadow(IRB, Idx, &
I);
4585 AShadow = IRB.
CreateBitCast(AShadow,
I.getArgOperand(0)->getType());
4586 BShadow = IRB.
CreateBitCast(BShadow,
I.getArgOperand(2)->getType());
4588 {AShadow, Idx, BShadow});
4590 setOriginForNaryOp(
I);
4593 [[maybe_unused]]
static bool isFixedIntVectorTy(
const Type *
T) {
4597 [[maybe_unused]]
static bool isFixedFPVectorTy(
const Type *
T) {
4601 [[maybe_unused]]
static bool isFixedIntVector(
const Value *V) {
4602 return isFixedIntVectorTy(
V->getType());
4605 [[maybe_unused]]
static bool isFixedFPVector(
const Value *V) {
4606 return isFixedFPVectorTy(
V->getType());
4628 void handleAVX512VectorConvertFPToInt(IntrinsicInst &
I,
bool LastMask) {
4633 Value *WriteThrough;
4637 WriteThrough =
I.getOperand(2);
4638 Mask =
I.getOperand(3);
4641 WriteThrough =
I.getOperand(1);
4642 Mask =
I.getOperand(2);
4647 assert(isFixedIntVector(WriteThrough));
4649 unsigned ANumElements =
4651 [[maybe_unused]]
unsigned WriteThruNumElements =
4653 assert(ANumElements == WriteThruNumElements ||
4654 ANumElements * 2 == WriteThruNumElements);
4657 unsigned MaskNumElements =
Mask->getType()->getScalarSizeInBits();
4658 assert(ANumElements == MaskNumElements ||
4659 ANumElements * 2 == MaskNumElements);
4661 assert(WriteThruNumElements == MaskNumElements);
4665 insertCheckShadowOf(Mask, &
I);
4675 Value *AShadow = getShadow(
A);
4676 AShadow = maybeExtendVectorShadowWithZeros(AShadow,
I);
4678 if (ANumElements * 2 == MaskNumElements) {
4690 "_ms_mask_bitcast");
4700 getShadowTy(&
I),
"_ms_a_shadow");
4702 Value *WriteThroughShadow = getShadow(WriteThrough);
4704 "_ms_writethru_select");
4706 setShadow(&
I, Shadow);
4707 setOriginForNaryOp(
I);
4715 void handleBmiIntrinsic(IntrinsicInst &
I) {
4717 Type *ShadowTy = getShadowTy(&
I);
4720 Value *SMask = getShadow(&
I, 1);
4725 {getShadow(&I, 0), I.getOperand(1)});
4728 setOriginForNaryOp(
I);
4731 static SmallVector<int, 8> getPclmulMask(
unsigned Width,
bool OddElements) {
4732 SmallVector<int, 8>
Mask;
4733 for (
unsigned X = OddElements ? 1 : 0;
X < Width;
X += 2) {
4747 void handlePclmulIntrinsic(IntrinsicInst &
I) {
4752 "pclmul 3rd operand must be a constant");
4755 getPclmulMask(Width, Imm & 0x01));
4757 getPclmulMask(Width, Imm & 0x10));
4758 ShadowAndOriginCombiner SOC(
this, IRB);
4759 SOC.Add(Shuf0, getOrigin(&
I, 0));
4760 SOC.Add(Shuf1, getOrigin(&
I, 1));
4765 void handleUnarySdSsIntrinsic(IntrinsicInst &
I) {
4770 Value *Second = getShadow(&
I, 1);
4772 SmallVector<int, 16>
Mask;
4773 Mask.push_back(Width);
4774 for (
unsigned i = 1; i < Width; i++)
4778 setShadow(&
I, Shadow);
4779 setOriginForNaryOp(
I);
4782 void handleVtestIntrinsic(IntrinsicInst &
I) {
4784 Value *Shadow0 = getShadow(&
I, 0);
4785 Value *Shadow1 = getShadow(&
I, 1);
4791 setShadow(&
I, Shadow);
4792 setOriginForNaryOp(
I);
4795 void handleBinarySdSsIntrinsic(IntrinsicInst &
I) {
4800 Value *Second = getShadow(&
I, 1);
4803 SmallVector<int, 16>
Mask;
4804 Mask.push_back(Width);
4805 for (
unsigned i = 1; i < Width; i++)
4809 setShadow(&
I, Shadow);
4810 setOriginForNaryOp(
I);
4816 void handleRoundPdPsIntrinsic(IntrinsicInst &
I) {
4817 assert(
I.getArgOperand(0)->getType() ==
I.getType());
4822 ShadowAndOriginCombiner SC(
this, IRB);
4823 SC.Add(
I.getArgOperand(0));
4831 void handleAbsIntrinsic(IntrinsicInst &
I) {
4833 Value *Src =
I.getArgOperand(0);
4834 Value *IsIntMinPoison =
I.getArgOperand(1);
4836 assert(
I.getType()->isIntOrIntVectorTy());
4838 assert(Src->getType() ==
I.getType());
4844 Value *SrcShadow = getShadow(Src);
4848 Value *MinValVec = ConstantInt::get(Src->getType(), MinVal);
4851 Value *PoisonedShadow = getPoisonedShadow(Src);
4852 Value *PoisonedIfIntMinShadow =
4855 IRB.
CreateSelect(IsIntMinPoison, PoisonedIfIntMinShadow, SrcShadow);
4857 setShadow(&
I, Shadow);
4858 setOrigin(&
I, getOrigin(&
I, 0));
4861 void handleIsFpClass(IntrinsicInst &
I) {
4863 Value *Shadow = getShadow(&
I, 0);
4864 setShadow(&
I, IRB.
CreateICmpNE(Shadow, getCleanShadow(Shadow)));
4865 setOrigin(&
I, getOrigin(&
I, 0));
4868 void handleArithmeticWithOverflow(IntrinsicInst &
I) {
4870 Value *Shadow0 = getShadow(&
I, 0);
4871 Value *Shadow1 = getShadow(&
I, 1);
4874 IRB.
CreateICmpNE(ShadowElt0, getCleanShadow(ShadowElt0));
4880 setShadow(&
I, Shadow);
4881 setOriginForNaryOp(
I);
4887 Value *Shadow = getShadow(V);
4909 void handleAVX512VectorDownConvert(IntrinsicInst &
I) {
4914 Value *WriteThrough =
I.getOperand(1);
4918 assert(isFixedIntVector(WriteThrough));
4920 unsigned ANumElements =
4922 unsigned OutputNumElements =
4924 assert(ANumElements == OutputNumElements ||
4925 ANumElements * 2 == OutputNumElements);
4928 assert(
Mask->getType()->getScalarSizeInBits() == ANumElements);
4929 insertCheckShadowOf(Mask, &
I);
4940 if (ANumElements != OutputNumElements) {
4942 Mask = IRB.
CreateZExt(Mask, Type::getIntNTy(*MS.C, OutputNumElements),
4949 Value *AShadow = getShadow(
A);
4953 VectorType *ShadowType = maybeShrinkVectorShadowType(
A,
I);
4963 AShadow = IRB.
CreateTrunc(AShadow, ShadowType,
"_ms_trunc_shadow");
4964 AShadow = maybeExtendVectorShadowWithZeros(AShadow,
I);
4966 Value *WriteThroughShadow = getShadow(WriteThrough);
4969 setShadow(&
I, Shadow);
4970 setOriginForNaryOp(
I);
4997 void handleAVX512VectorGenericMaskedFP(IntrinsicInst &
I,
unsigned AIndex,
4998 unsigned WriteThruIndex,
4999 unsigned MaskIndex) {
5002 unsigned NumArgs =
I.arg_size();
5003 assert(AIndex < NumArgs);
5004 assert(WriteThruIndex < NumArgs);
5005 assert(MaskIndex < NumArgs);
5006 assert(AIndex != WriteThruIndex);
5007 assert(AIndex != MaskIndex);
5008 assert(WriteThruIndex != MaskIndex);
5010 Value *
A =
I.getOperand(AIndex);
5011 Value *WriteThru =
I.getOperand(WriteThruIndex);
5015 assert(isFixedFPVector(WriteThru));
5017 [[maybe_unused]]
unsigned ANumElements =
5019 unsigned OutputNumElements =
5021 assert(ANumElements == OutputNumElements);
5023 for (
unsigned i = 0; i < NumArgs; ++i) {
5024 if (i != AIndex && i != WriteThruIndex) {
5027 assert(
I.getOperand(i)->getType()->isIntegerTy());
5028 insertCheckShadowOf(
I.getOperand(i), &
I);
5033 if (
Mask->getType()->getScalarSizeInBits() == 8 && ANumElements < 8)
5035 assert(
Mask->getType()->getScalarSizeInBits() == ANumElements);
5042 Value *AShadow = getShadow(
A);
5048 Value *WriteThruShadow = getShadow(WriteThru);
5051 setShadow(&
I, Shadow);
5053 setOriginForNaryOp(
I);
5063 void visitGenericScalarHalfwordInst(IntrinsicInst &
I) {
5069 Value *WriteThrough =
I.getOperand(2);
5076 insertCheckShadowOf(Mask, &
I);
5080 unsigned NumElements =
5082 assert(NumElements == 8);
5083 assert(
A->getType() ==
B->getType());
5085 assert(
Mask->getType()->getPrimitiveSizeInBits() == NumElements);
5088 Value *ALowerShadow = extractLowerShadow(IRB,
A);
5089 Value *BLowerShadow = extractLowerShadow(IRB,
B);
5091 Value *ABLowerShadow = IRB.
CreateOr(ALowerShadow, BLowerShadow);
5093 Value *WriteThroughLowerShadow = extractLowerShadow(IRB, WriteThrough);
5100 Value *AShadow = getShadow(
A);
5101 Value *DstLowerShadow =
5102 IRB.
CreateSelect(MaskLower, ABLowerShadow, WriteThroughLowerShadow);
5104 AShadow, DstLowerShadow, ConstantInt::get(IRB.
getInt32Ty(), 0),
5107 setShadow(&
I, DstShadow);
5108 setOriginForNaryOp(
I);
5138 void handleAVXGF2P8Affine(IntrinsicInst &
I) {
5149 ->getScalarSizeInBits() == 8);
5151 assert(
A->getType() ==
X->getType());
5153 assert(
B->getType()->isIntegerTy());
5154 assert(
B->getType()->getScalarSizeInBits() == 8);
5156 assert(
I.getType() ==
A->getType());
5158 Value *AShadow = getShadow(
A);
5159 Value *XShadow = getShadow(
X);
5160 Value *BZeroShadow = getCleanShadow(
B);
5163 I.getType(),
I.getIntrinsicID(), {XShadow, AShadow, BZeroShadow});
5165 {X, AShadow, BZeroShadow});
5167 {XShadow, A, BZeroShadow});
5170 Value *BShadow = getShadow(
B);
5171 Value *BBroadcastShadow = getCleanShadow(AShadow);
5176 for (
unsigned i = 0; i < NumElements; i++)
5180 {AShadowXShadow, AShadowX, XShadowA, BBroadcastShadow}));
5181 setOriginForNaryOp(
I);
5195 void handleNEONVectorLoad(IntrinsicInst &
I,
bool WithLane) {
5196 unsigned int numArgs =
I.arg_size();
5199 assert(
I.getType()->isStructTy());
5209 assert(4 <= numArgs && numArgs <= 6);
5223 for (
unsigned int i = 0; i < numArgs - 2; i++)
5224 ShadowArgs.
push_back(getShadow(
I.getArgOperand(i)));
5227 Value *LaneNumber =
I.getArgOperand(numArgs - 2);
5231 insertCheckShadowOf(LaneNumber, &
I);
5234 Value *Src =
I.getArgOperand(numArgs - 1);
5235 assert(Src->getType()->isPointerTy() &&
"Source is not a pointer!");
5237 Type *SrcShadowTy = getShadowTy(Src);
5238 auto [SrcShadowPtr, SrcOriginPtr] =
5239 getShadowOriginPtr(Src, IRB, SrcShadowTy,
Align(1),
false);
5249 if (!MS.TrackOrigins)
5253 setOrigin(&
I, PtrSrcOrigin);
5270 void handleNEONVectorStoreIntrinsic(IntrinsicInst &
I,
bool useLane) {
5274 int numArgOperands =
I.arg_size();
5277 assert(numArgOperands >= 1);
5278 Value *Addr =
I.getArgOperand(numArgOperands - 1);
5280 int skipTrailingOperands = 1;
5283 insertCheckShadowOf(Addr, &
I);
5287 skipTrailingOperands++;
5288 assert(numArgOperands >=
static_cast<int>(skipTrailingOperands));
5290 I.getArgOperand(numArgOperands - skipTrailingOperands)->getType()));
5293 SmallVector<Value *, 8> ShadowArgs;
5295 for (
int i = 0; i < numArgOperands - skipTrailingOperands; i++) {
5297 Value *Shadow = getShadow(&
I, i);
5298 ShadowArgs.
append(1, Shadow);
5315 (numArgOperands - skipTrailingOperands));
5316 Type *OutputShadowTy = getShadowTy(OutputVectorTy);
5320 I.getArgOperand(numArgOperands - skipTrailingOperands));
5322 Value *OutputShadowPtr, *OutputOriginPtr;
5324 std::tie(OutputShadowPtr, OutputOriginPtr) = getShadowOriginPtr(
5325 Addr, IRB, OutputShadowTy,
Align(1),
true);
5326 ShadowArgs.
append(1, OutputShadowPtr);
5332 if (MS.TrackOrigins) {
5340 OriginCombiner OC(
this, IRB);
5341 for (
int i = 0; i < numArgOperands - skipTrailingOperands; i++)
5342 OC.Add(
I.getArgOperand(i));
5344 const DataLayout &
DL =
F.getDataLayout();
5345 OC.DoneAndStoreOrigin(
DL.getTypeStoreSize(OutputVectorTy),
5372 void handleIntrinsicByApplyingToShadow(IntrinsicInst &
I,
5374 unsigned int trailingVerbatimArgs) {
5377 assert(trailingVerbatimArgs <
I.arg_size());
5379 SmallVector<Value *, 8> ShadowArgs;
5381 for (
unsigned int i = 0; i <
I.arg_size() - trailingVerbatimArgs; i++) {
5382 Value *Shadow = getShadow(&
I, i);
5390 for (
unsigned int i =
I.arg_size() - trailingVerbatimArgs; i <
I.arg_size();
5392 Value *Arg =
I.getArgOperand(i);
5398 Value *CombinedShadow = CI;
5401 for (
unsigned int i =
I.arg_size() - trailingVerbatimArgs; i <
I.arg_size();
5404 CreateShadowCast(IRB, getShadow(&
I, i), CombinedShadow->
getType());
5405 CombinedShadow = IRB.
CreateOr(Shadow, CombinedShadow,
"_msprop");
5410 setOriginForNaryOp(
I);
5416 void handleNEONVectorMultiplyIntrinsic(IntrinsicInst &
I) {
5422 bool maybeHandleCrossPlatformIntrinsic(IntrinsicInst &
I) {
5423 switch (
I.getIntrinsicID()) {
5424 case Intrinsic::uadd_with_overflow:
5425 case Intrinsic::sadd_with_overflow:
5426 case Intrinsic::usub_with_overflow:
5427 case Intrinsic::ssub_with_overflow:
5428 case Intrinsic::umul_with_overflow:
5429 case Intrinsic::smul_with_overflow:
5430 handleArithmeticWithOverflow(
I);
5432 case Intrinsic::abs:
5433 handleAbsIntrinsic(
I);
5435 case Intrinsic::bitreverse:
5436 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
5439 case Intrinsic::is_fpclass:
5442 case Intrinsic::lifetime_start:
5443 handleLifetimeStart(
I);
5445 case Intrinsic::launder_invariant_group:
5446 case Intrinsic::strip_invariant_group:
5447 handleInvariantGroup(
I);
5449 case Intrinsic::bswap:
5452 case Intrinsic::ctlz:
5453 case Intrinsic::cttz:
5454 handleCountLeadingTrailingZeros(
I);
5456 case Intrinsic::masked_compressstore:
5457 handleMaskedCompressStore(
I);
5459 case Intrinsic::masked_expandload:
5460 handleMaskedExpandLoad(
I);
5462 case Intrinsic::masked_gather:
5463 handleMaskedGather(
I);
5465 case Intrinsic::masked_scatter:
5466 handleMaskedScatter(
I);
5468 case Intrinsic::masked_store:
5469 handleMaskedStore(
I);
5471 case Intrinsic::masked_load:
5472 handleMaskedLoad(
I);
5474 case Intrinsic::vector_reduce_and:
5475 handleVectorReduceAndIntrinsic(
I);
5477 case Intrinsic::vector_reduce_or:
5478 handleVectorReduceOrIntrinsic(
I);
5481 case Intrinsic::vector_reduce_add:
5482 case Intrinsic::vector_reduce_xor:
5483 case Intrinsic::vector_reduce_mul:
5486 case Intrinsic::vector_reduce_smax:
5487 case Intrinsic::vector_reduce_smin:
5488 case Intrinsic::vector_reduce_umax:
5489 case Intrinsic::vector_reduce_umin:
5492 case Intrinsic::vector_reduce_fmax:
5493 case Intrinsic::vector_reduce_fmin:
5494 handleVectorReduceIntrinsic(
I,
false);
5497 case Intrinsic::vector_reduce_fadd:
5498 case Intrinsic::vector_reduce_fmul:
5499 handleVectorReduceWithStarterIntrinsic(
I);
5502 case Intrinsic::scmp:
5503 case Intrinsic::ucmp: {
5508 case Intrinsic::fshl:
5509 case Intrinsic::fshr:
5510 handleFunnelShift(
I);
5513 case Intrinsic::is_constant:
5515 setShadow(&
I, getCleanShadow(&
I));
5516 setOrigin(&
I, getCleanOrigin());
5526 bool maybeHandleX86SIMDIntrinsic(IntrinsicInst &
I) {
5527 switch (
I.getIntrinsicID()) {
5528 case Intrinsic::x86_sse_stmxcsr:
5531 case Intrinsic::x86_sse_ldmxcsr:
5538 case Intrinsic::x86_avx512_vcvtsd2usi64:
5539 case Intrinsic::x86_avx512_vcvtsd2usi32:
5540 case Intrinsic::x86_avx512_vcvtss2usi64:
5541 case Intrinsic::x86_avx512_vcvtss2usi32:
5542 case Intrinsic::x86_avx512_cvttss2usi64:
5543 case Intrinsic::x86_avx512_cvttss2usi:
5544 case Intrinsic::x86_avx512_cvttsd2usi64:
5545 case Intrinsic::x86_avx512_cvttsd2usi:
5546 case Intrinsic::x86_avx512_cvtusi2ss:
5547 case Intrinsic::x86_avx512_cvtusi642sd:
5548 case Intrinsic::x86_avx512_cvtusi642ss:
5549 handleSSEVectorConvertIntrinsic(
I, 1,
true);
5551 case Intrinsic::x86_sse2_cvtsd2si64:
5552 case Intrinsic::x86_sse2_cvtsd2si:
5553 case Intrinsic::x86_sse2_cvtsd2ss:
5554 case Intrinsic::x86_sse2_cvttsd2si64:
5555 case Intrinsic::x86_sse2_cvttsd2si:
5556 case Intrinsic::x86_sse_cvtss2si64:
5557 case Intrinsic::x86_sse_cvtss2si:
5558 case Intrinsic::x86_sse_cvttss2si64:
5559 case Intrinsic::x86_sse_cvttss2si:
5560 handleSSEVectorConvertIntrinsic(
I, 1);
5562 case Intrinsic::x86_sse_cvtps2pi:
5563 case Intrinsic::x86_sse_cvttps2pi:
5564 handleSSEVectorConvertIntrinsic(
I, 2);
5572 case Intrinsic::x86_vcvtps2ph_128:
5573 case Intrinsic::x86_vcvtps2ph_256: {
5574 handleSSEVectorConvertIntrinsicByProp(
I,
true);
5583 case Intrinsic::x86_avx512_mask_cvtps2dq_512:
5584 handleAVX512VectorConvertFPToInt(
I,
false);
5589 case Intrinsic::x86_sse2_cvtpd2ps:
5590 case Intrinsic::x86_sse2_cvtps2dq:
5591 case Intrinsic::x86_sse2_cvtpd2dq:
5592 case Intrinsic::x86_sse2_cvttps2dq:
5593 case Intrinsic::x86_sse2_cvttpd2dq:
5594 case Intrinsic::x86_avx_cvt_pd2_ps_256:
5595 case Intrinsic::x86_avx_cvt_ps2dq_256:
5596 case Intrinsic::x86_avx_cvt_pd2dq_256:
5597 case Intrinsic::x86_avx_cvtt_ps2dq_256:
5598 case Intrinsic::x86_avx_cvtt_pd2dq_256: {
5599 handleSSEVectorConvertIntrinsicByProp(
I,
false);
5610 case Intrinsic::x86_avx512_mask_vcvtps2ph_512:
5611 case Intrinsic::x86_avx512_mask_vcvtps2ph_256:
5612 case Intrinsic::x86_avx512_mask_vcvtps2ph_128:
5613 handleAVX512VectorConvertFPToInt(
I,
true);
5617 case Intrinsic::x86_avx512_psll_w_512:
5618 case Intrinsic::x86_avx512_psll_d_512:
5619 case Intrinsic::x86_avx512_psll_q_512:
5620 case Intrinsic::x86_avx512_pslli_w_512:
5621 case Intrinsic::x86_avx512_pslli_d_512:
5622 case Intrinsic::x86_avx512_pslli_q_512:
5623 case Intrinsic::x86_avx512_psrl_w_512:
5624 case Intrinsic::x86_avx512_psrl_d_512:
5625 case Intrinsic::x86_avx512_psrl_q_512:
5626 case Intrinsic::x86_avx512_psra_w_512:
5627 case Intrinsic::x86_avx512_psra_d_512:
5628 case Intrinsic::x86_avx512_psra_q_512:
5629 case Intrinsic::x86_avx512_psrli_w_512:
5630 case Intrinsic::x86_avx512_psrli_d_512:
5631 case Intrinsic::x86_avx512_psrli_q_512:
5632 case Intrinsic::x86_avx512_psrai_w_512:
5633 case Intrinsic::x86_avx512_psrai_d_512:
5634 case Intrinsic::x86_avx512_psrai_q_512:
5635 case Intrinsic::x86_avx512_psra_q_256:
5636 case Intrinsic::x86_avx512_psra_q_128:
5637 case Intrinsic::x86_avx512_psrai_q_256:
5638 case Intrinsic::x86_avx512_psrai_q_128:
5639 case Intrinsic::x86_avx2_psll_w:
5640 case Intrinsic::x86_avx2_psll_d:
5641 case Intrinsic::x86_avx2_psll_q:
5642 case Intrinsic::x86_avx2_pslli_w:
5643 case Intrinsic::x86_avx2_pslli_d:
5644 case Intrinsic::x86_avx2_pslli_q:
5645 case Intrinsic::x86_avx2_psrl_w:
5646 case Intrinsic::x86_avx2_psrl_d:
5647 case Intrinsic::x86_avx2_psrl_q:
5648 case Intrinsic::x86_avx2_psra_w:
5649 case Intrinsic::x86_avx2_psra_d:
5650 case Intrinsic::x86_avx2_psrli_w:
5651 case Intrinsic::x86_avx2_psrli_d:
5652 case Intrinsic::x86_avx2_psrli_q:
5653 case Intrinsic::x86_avx2_psrai_w:
5654 case Intrinsic::x86_avx2_psrai_d:
5655 case Intrinsic::x86_sse2_psll_w:
5656 case Intrinsic::x86_sse2_psll_d:
5657 case Intrinsic::x86_sse2_psll_q:
5658 case Intrinsic::x86_sse2_pslli_w:
5659 case Intrinsic::x86_sse2_pslli_d:
5660 case Intrinsic::x86_sse2_pslli_q:
5661 case Intrinsic::x86_sse2_psrl_w:
5662 case Intrinsic::x86_sse2_psrl_d:
5663 case Intrinsic::x86_sse2_psrl_q:
5664 case Intrinsic::x86_sse2_psra_w:
5665 case Intrinsic::x86_sse2_psra_d:
5666 case Intrinsic::x86_sse2_psrli_w:
5667 case Intrinsic::x86_sse2_psrli_d:
5668 case Intrinsic::x86_sse2_psrli_q:
5669 case Intrinsic::x86_sse2_psrai_w:
5670 case Intrinsic::x86_sse2_psrai_d:
5671 case Intrinsic::x86_mmx_psll_w:
5672 case Intrinsic::x86_mmx_psll_d:
5673 case Intrinsic::x86_mmx_psll_q:
5674 case Intrinsic::x86_mmx_pslli_w:
5675 case Intrinsic::x86_mmx_pslli_d:
5676 case Intrinsic::x86_mmx_pslli_q:
5677 case Intrinsic::x86_mmx_psrl_w:
5678 case Intrinsic::x86_mmx_psrl_d:
5679 case Intrinsic::x86_mmx_psrl_q:
5680 case Intrinsic::x86_mmx_psra_w:
5681 case Intrinsic::x86_mmx_psra_d:
5682 case Intrinsic::x86_mmx_psrli_w:
5683 case Intrinsic::x86_mmx_psrli_d:
5684 case Intrinsic::x86_mmx_psrli_q:
5685 case Intrinsic::x86_mmx_psrai_w:
5686 case Intrinsic::x86_mmx_psrai_d:
5687 handleVectorShiftIntrinsic(
I,
false);
5689 case Intrinsic::x86_avx2_psllv_d:
5690 case Intrinsic::x86_avx2_psllv_d_256:
5691 case Intrinsic::x86_avx512_psllv_d_512:
5692 case Intrinsic::x86_avx2_psllv_q:
5693 case Intrinsic::x86_avx2_psllv_q_256:
5694 case Intrinsic::x86_avx512_psllv_q_512:
5695 case Intrinsic::x86_avx2_psrlv_d:
5696 case Intrinsic::x86_avx2_psrlv_d_256:
5697 case Intrinsic::x86_avx512_psrlv_d_512:
5698 case Intrinsic::x86_avx2_psrlv_q:
5699 case Intrinsic::x86_avx2_psrlv_q_256:
5700 case Intrinsic::x86_avx512_psrlv_q_512:
5701 case Intrinsic::x86_avx2_psrav_d:
5702 case Intrinsic::x86_avx2_psrav_d_256:
5703 case Intrinsic::x86_avx512_psrav_d_512:
5704 case Intrinsic::x86_avx512_psrav_q_128:
5705 case Intrinsic::x86_avx512_psrav_q_256:
5706 case Intrinsic::x86_avx512_psrav_q_512:
5707 handleVectorShiftIntrinsic(
I,
true);
5711 case Intrinsic::x86_sse2_packsswb_128:
5712 case Intrinsic::x86_sse2_packssdw_128:
5713 case Intrinsic::x86_sse2_packuswb_128:
5714 case Intrinsic::x86_sse41_packusdw:
5715 case Intrinsic::x86_avx2_packsswb:
5716 case Intrinsic::x86_avx2_packssdw:
5717 case Intrinsic::x86_avx2_packuswb:
5718 case Intrinsic::x86_avx2_packusdw:
5724 case Intrinsic::x86_avx512_packsswb_512:
5725 case Intrinsic::x86_avx512_packssdw_512:
5726 case Intrinsic::x86_avx512_packuswb_512:
5727 case Intrinsic::x86_avx512_packusdw_512:
5728 handleVectorPackIntrinsic(
I);
5731 case Intrinsic::x86_sse41_pblendvb:
5732 case Intrinsic::x86_sse41_blendvpd:
5733 case Intrinsic::x86_sse41_blendvps:
5734 case Intrinsic::x86_avx_blendv_pd_256:
5735 case Intrinsic::x86_avx_blendv_ps_256:
5736 case Intrinsic::x86_avx2_pblendvb:
5737 handleBlendvIntrinsic(
I);
5740 case Intrinsic::x86_avx_dp_ps_256:
5741 case Intrinsic::x86_sse41_dppd:
5742 case Intrinsic::x86_sse41_dpps:
5743 handleDppIntrinsic(
I);
5746 case Intrinsic::x86_mmx_packsswb:
5747 case Intrinsic::x86_mmx_packuswb:
5748 handleVectorPackIntrinsic(
I, 16);
5751 case Intrinsic::x86_mmx_packssdw:
5752 handleVectorPackIntrinsic(
I, 32);
5755 case Intrinsic::x86_mmx_psad_bw:
5756 handleVectorSadIntrinsic(
I,
true);
5758 case Intrinsic::x86_sse2_psad_bw:
5759 case Intrinsic::x86_avx2_psad_bw:
5760 handleVectorSadIntrinsic(
I);
5786 case Intrinsic::x86_sse2_pmadd_wd:
5787 case Intrinsic::x86_avx2_pmadd_wd:
5788 case Intrinsic::x86_avx512_pmaddw_d_512:
5789 case Intrinsic::x86_ssse3_pmadd_ub_sw_128:
5790 case Intrinsic::x86_avx2_pmadd_ub_sw:
5791 case Intrinsic::x86_avx512_pmaddubs_w_512:
5792 handleVectorPmaddIntrinsic(
I, 2,
5797 case Intrinsic::x86_ssse3_pmadd_ub_sw:
5798 handleVectorPmaddIntrinsic(
I, 2,
5803 case Intrinsic::x86_mmx_pmadd_wd:
5804 handleVectorPmaddIntrinsic(
I, 2,
5867 case Intrinsic::x86_avx512_vpdpbusd_128:
5868 case Intrinsic::x86_avx512_vpdpbusd_256:
5869 case Intrinsic::x86_avx512_vpdpbusd_512:
5870 case Intrinsic::x86_avx512_vpdpbusds_128:
5871 case Intrinsic::x86_avx512_vpdpbusds_256:
5872 case Intrinsic::x86_avx512_vpdpbusds_512:
5873 case Intrinsic::x86_avx2_vpdpbssd_128:
5874 case Intrinsic::x86_avx2_vpdpbssd_256:
5875 case Intrinsic::x86_avx10_vpdpbssd_512:
5876 case Intrinsic::x86_avx2_vpdpbssds_128:
5877 case Intrinsic::x86_avx2_vpdpbssds_256:
5878 case Intrinsic::x86_avx10_vpdpbssds_512:
5879 case Intrinsic::x86_avx2_vpdpbsud_128:
5880 case Intrinsic::x86_avx2_vpdpbsud_256:
5881 case Intrinsic::x86_avx10_vpdpbsud_512:
5882 case Intrinsic::x86_avx2_vpdpbsuds_128:
5883 case Intrinsic::x86_avx2_vpdpbsuds_256:
5884 case Intrinsic::x86_avx10_vpdpbsuds_512:
5885 case Intrinsic::x86_avx2_vpdpbuud_128:
5886 case Intrinsic::x86_avx2_vpdpbuud_256:
5887 case Intrinsic::x86_avx10_vpdpbuud_512:
5888 case Intrinsic::x86_avx2_vpdpbuuds_128:
5889 case Intrinsic::x86_avx2_vpdpbuuds_256:
5890 case Intrinsic::x86_avx10_vpdpbuuds_512:
5891 handleVectorPmaddIntrinsic(
I, 4,
5987 case Intrinsic::x86_avx512_vpdpwssd_128:
5988 case Intrinsic::x86_avx512_vpdpwssd_256:
5989 case Intrinsic::x86_avx512_vpdpwssd_512:
5990 case Intrinsic::x86_avx512_vpdpwssds_128:
5991 case Intrinsic::x86_avx512_vpdpwssds_256:
5992 case Intrinsic::x86_avx512_vpdpwssds_512:
5993 case Intrinsic::x86_avx2_vpdpwsud_128:
5994 case Intrinsic::x86_avx2_vpdpwsud_256:
5995 case Intrinsic::x86_avx10_vpdpwsud_512:
5996 case Intrinsic::x86_avx2_vpdpwsuds_128:
5997 case Intrinsic::x86_avx2_vpdpwsuds_256:
5998 case Intrinsic::x86_avx10_vpdpwsuds_512:
5999 case Intrinsic::x86_avx2_vpdpwusd_128:
6000 case Intrinsic::x86_avx2_vpdpwusd_256:
6001 case Intrinsic::x86_avx10_vpdpwusd_512:
6002 case Intrinsic::x86_avx2_vpdpwusds_128:
6003 case Intrinsic::x86_avx2_vpdpwusds_256:
6004 case Intrinsic::x86_avx10_vpdpwusds_512:
6005 case Intrinsic::x86_avx2_vpdpwuud_128:
6006 case Intrinsic::x86_avx2_vpdpwuud_256:
6007 case Intrinsic::x86_avx10_vpdpwuud_512:
6008 case Intrinsic::x86_avx2_vpdpwuuds_128:
6009 case Intrinsic::x86_avx2_vpdpwuuds_256:
6010 case Intrinsic::x86_avx10_vpdpwuuds_512:
6011 handleVectorPmaddIntrinsic(
I, 2,
6023 case Intrinsic::x86_avx512bf16_dpbf16ps_128:
6024 case Intrinsic::x86_avx512bf16_dpbf16ps_256:
6025 case Intrinsic::x86_avx512bf16_dpbf16ps_512:
6026 handleVectorPmaddIntrinsic(
I, 2,
6030 case Intrinsic::x86_sse_cmp_ss:
6031 case Intrinsic::x86_sse2_cmp_sd:
6032 case Intrinsic::x86_sse_comieq_ss:
6033 case Intrinsic::x86_sse_comilt_ss:
6034 case Intrinsic::x86_sse_comile_ss:
6035 case Intrinsic::x86_sse_comigt_ss:
6036 case Intrinsic::x86_sse_comige_ss:
6037 case Intrinsic::x86_sse_comineq_ss:
6038 case Intrinsic::x86_sse_ucomieq_ss:
6039 case Intrinsic::x86_sse_ucomilt_ss:
6040 case Intrinsic::x86_sse_ucomile_ss:
6041 case Intrinsic::x86_sse_ucomigt_ss:
6042 case Intrinsic::x86_sse_ucomige_ss:
6043 case Intrinsic::x86_sse_ucomineq_ss:
6044 case Intrinsic::x86_sse2_comieq_sd:
6045 case Intrinsic::x86_sse2_comilt_sd:
6046 case Intrinsic::x86_sse2_comile_sd:
6047 case Intrinsic::x86_sse2_comigt_sd:
6048 case Intrinsic::x86_sse2_comige_sd:
6049 case Intrinsic::x86_sse2_comineq_sd:
6050 case Intrinsic::x86_sse2_ucomieq_sd:
6051 case Intrinsic::x86_sse2_ucomilt_sd:
6052 case Intrinsic::x86_sse2_ucomile_sd:
6053 case Intrinsic::x86_sse2_ucomigt_sd:
6054 case Intrinsic::x86_sse2_ucomige_sd:
6055 case Intrinsic::x86_sse2_ucomineq_sd:
6056 handleVectorCompareScalarIntrinsic(
I);
6059 case Intrinsic::x86_avx_cmp_pd_256:
6060 case Intrinsic::x86_avx_cmp_ps_256:
6061 case Intrinsic::x86_sse2_cmp_pd:
6062 case Intrinsic::x86_sse_cmp_ps:
6063 handleVectorComparePackedIntrinsic(
I);
6066 case Intrinsic::x86_bmi_bextr_32:
6067 case Intrinsic::x86_bmi_bextr_64:
6068 case Intrinsic::x86_bmi_bzhi_32:
6069 case Intrinsic::x86_bmi_bzhi_64:
6070 case Intrinsic::x86_bmi_pdep_32:
6071 case Intrinsic::x86_bmi_pdep_64:
6072 case Intrinsic::x86_bmi_pext_32:
6073 case Intrinsic::x86_bmi_pext_64:
6074 handleBmiIntrinsic(
I);
6077 case Intrinsic::x86_pclmulqdq:
6078 case Intrinsic::x86_pclmulqdq_256:
6079 case Intrinsic::x86_pclmulqdq_512:
6080 handlePclmulIntrinsic(
I);
6083 case Intrinsic::x86_avx_round_pd_256:
6084 case Intrinsic::x86_avx_round_ps_256:
6085 case Intrinsic::x86_sse41_round_pd:
6086 case Intrinsic::x86_sse41_round_ps:
6087 handleRoundPdPsIntrinsic(
I);
6090 case Intrinsic::x86_sse41_round_sd:
6091 case Intrinsic::x86_sse41_round_ss:
6092 handleUnarySdSsIntrinsic(
I);
6095 case Intrinsic::x86_sse2_max_sd:
6096 case Intrinsic::x86_sse_max_ss:
6097 case Intrinsic::x86_sse2_min_sd:
6098 case Intrinsic::x86_sse_min_ss:
6099 handleBinarySdSsIntrinsic(
I);
6102 case Intrinsic::x86_avx_vtestc_pd:
6103 case Intrinsic::x86_avx_vtestc_pd_256:
6104 case Intrinsic::x86_avx_vtestc_ps:
6105 case Intrinsic::x86_avx_vtestc_ps_256:
6106 case Intrinsic::x86_avx_vtestnzc_pd:
6107 case Intrinsic::x86_avx_vtestnzc_pd_256:
6108 case Intrinsic::x86_avx_vtestnzc_ps:
6109 case Intrinsic::x86_avx_vtestnzc_ps_256:
6110 case Intrinsic::x86_avx_vtestz_pd:
6111 case Intrinsic::x86_avx_vtestz_pd_256:
6112 case Intrinsic::x86_avx_vtestz_ps:
6113 case Intrinsic::x86_avx_vtestz_ps_256:
6114 case Intrinsic::x86_avx_ptestc_256:
6115 case Intrinsic::x86_avx_ptestnzc_256:
6116 case Intrinsic::x86_avx_ptestz_256:
6117 case Intrinsic::x86_sse41_ptestc:
6118 case Intrinsic::x86_sse41_ptestnzc:
6119 case Intrinsic::x86_sse41_ptestz:
6120 handleVtestIntrinsic(
I);
6124 case Intrinsic::x86_ssse3_phadd_w:
6125 case Intrinsic::x86_ssse3_phadd_w_128:
6126 case Intrinsic::x86_ssse3_phsub_w:
6127 case Intrinsic::x86_ssse3_phsub_w_128:
6128 handlePairwiseShadowOrIntrinsic(
I, 1,
6132 case Intrinsic::x86_avx2_phadd_w:
6133 case Intrinsic::x86_avx2_phsub_w:
6134 handlePairwiseShadowOrIntrinsic(
I, 2,
6139 case Intrinsic::x86_ssse3_phadd_d:
6140 case Intrinsic::x86_ssse3_phadd_d_128:
6141 case Intrinsic::x86_ssse3_phsub_d:
6142 case Intrinsic::x86_ssse3_phsub_d_128:
6143 handlePairwiseShadowOrIntrinsic(
I, 1,
6147 case Intrinsic::x86_avx2_phadd_d:
6148 case Intrinsic::x86_avx2_phsub_d:
6149 handlePairwiseShadowOrIntrinsic(
I, 2,
6154 case Intrinsic::x86_ssse3_phadd_sw:
6155 case Intrinsic::x86_ssse3_phadd_sw_128:
6156 case Intrinsic::x86_ssse3_phsub_sw:
6157 case Intrinsic::x86_ssse3_phsub_sw_128:
6158 handlePairwiseShadowOrIntrinsic(
I, 1,
6162 case Intrinsic::x86_avx2_phadd_sw:
6163 case Intrinsic::x86_avx2_phsub_sw:
6164 handlePairwiseShadowOrIntrinsic(
I, 2,
6169 case Intrinsic::x86_sse3_hadd_ps:
6170 case Intrinsic::x86_sse3_hadd_pd:
6171 case Intrinsic::x86_sse3_hsub_ps:
6172 case Intrinsic::x86_sse3_hsub_pd:
6173 handlePairwiseShadowOrIntrinsic(
I, 1);
6176 case Intrinsic::x86_avx_hadd_pd_256:
6177 case Intrinsic::x86_avx_hadd_ps_256:
6178 case Intrinsic::x86_avx_hsub_pd_256:
6179 case Intrinsic::x86_avx_hsub_ps_256:
6180 handlePairwiseShadowOrIntrinsic(
I, 2);
6183 case Intrinsic::x86_avx_maskstore_ps:
6184 case Intrinsic::x86_avx_maskstore_pd:
6185 case Intrinsic::x86_avx_maskstore_ps_256:
6186 case Intrinsic::x86_avx_maskstore_pd_256:
6187 case Intrinsic::x86_avx2_maskstore_d:
6188 case Intrinsic::x86_avx2_maskstore_q:
6189 case Intrinsic::x86_avx2_maskstore_d_256:
6190 case Intrinsic::x86_avx2_maskstore_q_256: {
6191 handleAVXMaskedStore(
I);
6195 case Intrinsic::x86_avx_maskload_ps:
6196 case Intrinsic::x86_avx_maskload_pd:
6197 case Intrinsic::x86_avx_maskload_ps_256:
6198 case Intrinsic::x86_avx_maskload_pd_256:
6199 case Intrinsic::x86_avx2_maskload_d:
6200 case Intrinsic::x86_avx2_maskload_q:
6201 case Intrinsic::x86_avx2_maskload_d_256:
6202 case Intrinsic::x86_avx2_maskload_q_256: {
6203 handleAVXMaskedLoad(
I);
6208 case Intrinsic::x86_avx512fp16_add_ph_512:
6209 case Intrinsic::x86_avx512fp16_sub_ph_512:
6210 case Intrinsic::x86_avx512fp16_mul_ph_512:
6211 case Intrinsic::x86_avx512fp16_div_ph_512:
6212 case Intrinsic::x86_avx512fp16_max_ph_512:
6213 case Intrinsic::x86_avx512fp16_min_ph_512:
6214 case Intrinsic::x86_avx512_min_ps_512:
6215 case Intrinsic::x86_avx512_min_pd_512:
6216 case Intrinsic::x86_avx512_max_ps_512:
6217 case Intrinsic::x86_avx512_max_pd_512: {
6222 [[maybe_unused]]
bool Success =
6223 maybeHandleSimpleNomemIntrinsic(
I, 1);
6228 case Intrinsic::x86_avx_vpermilvar_pd:
6229 case Intrinsic::x86_avx_vpermilvar_pd_256:
6230 case Intrinsic::x86_avx512_vpermilvar_pd_512:
6231 case Intrinsic::x86_avx_vpermilvar_ps:
6232 case Intrinsic::x86_avx_vpermilvar_ps_256:
6233 case Intrinsic::x86_avx512_vpermilvar_ps_512: {
6234 handleAVXVpermilvar(
I);
6238 case Intrinsic::x86_avx512_vpermi2var_d_128:
6239 case Intrinsic::x86_avx512_vpermi2var_d_256:
6240 case Intrinsic::x86_avx512_vpermi2var_d_512:
6241 case Intrinsic::x86_avx512_vpermi2var_hi_128:
6242 case Intrinsic::x86_avx512_vpermi2var_hi_256:
6243 case Intrinsic::x86_avx512_vpermi2var_hi_512:
6244 case Intrinsic::x86_avx512_vpermi2var_pd_128:
6245 case Intrinsic::x86_avx512_vpermi2var_pd_256:
6246 case Intrinsic::x86_avx512_vpermi2var_pd_512:
6247 case Intrinsic::x86_avx512_vpermi2var_ps_128:
6248 case Intrinsic::x86_avx512_vpermi2var_ps_256:
6249 case Intrinsic::x86_avx512_vpermi2var_ps_512:
6250 case Intrinsic::x86_avx512_vpermi2var_q_128:
6251 case Intrinsic::x86_avx512_vpermi2var_q_256:
6252 case Intrinsic::x86_avx512_vpermi2var_q_512:
6253 case Intrinsic::x86_avx512_vpermi2var_qi_128:
6254 case Intrinsic::x86_avx512_vpermi2var_qi_256:
6255 case Intrinsic::x86_avx512_vpermi2var_qi_512:
6256 handleAVXVpermi2var(
I);
6270 case Intrinsic::x86_avx2_pshuf_b:
6271 case Intrinsic::x86_sse_pshuf_w:
6272 case Intrinsic::x86_ssse3_pshuf_b_128:
6273 case Intrinsic::x86_ssse3_pshuf_b:
6274 case Intrinsic::x86_avx512_pshuf_b_512:
6275 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
6281 case Intrinsic::x86_avx512_mask_pmov_dw_512:
6282 case Intrinsic::x86_avx512_mask_pmov_db_512:
6283 case Intrinsic::x86_avx512_mask_pmov_qb_512:
6284 case Intrinsic::x86_avx512_mask_pmov_qw_512: {
6287 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
6295 case Intrinsic::x86_avx512_mask_pmovs_dw_512:
6296 case Intrinsic::x86_avx512_mask_pmovus_dw_512: {
6297 handleIntrinsicByApplyingToShadow(
I,
6298 Intrinsic::x86_avx512_mask_pmov_dw_512,
6303 case Intrinsic::x86_avx512_mask_pmovs_db_512:
6304 case Intrinsic::x86_avx512_mask_pmovus_db_512: {
6305 handleIntrinsicByApplyingToShadow(
I,
6306 Intrinsic::x86_avx512_mask_pmov_db_512,
6311 case Intrinsic::x86_avx512_mask_pmovs_qb_512:
6312 case Intrinsic::x86_avx512_mask_pmovus_qb_512: {
6313 handleIntrinsicByApplyingToShadow(
I,
6314 Intrinsic::x86_avx512_mask_pmov_qb_512,
6319 case Intrinsic::x86_avx512_mask_pmovs_qw_512:
6320 case Intrinsic::x86_avx512_mask_pmovus_qw_512: {
6321 handleIntrinsicByApplyingToShadow(
I,
6322 Intrinsic::x86_avx512_mask_pmov_qw_512,
6327 case Intrinsic::x86_avx512_mask_pmovs_qd_512:
6328 case Intrinsic::x86_avx512_mask_pmovus_qd_512:
6329 case Intrinsic::x86_avx512_mask_pmovs_wb_512:
6330 case Intrinsic::x86_avx512_mask_pmovus_wb_512: {
6334 handleAVX512VectorDownConvert(
I);
6374 case Intrinsic::x86_avx512_rsqrt14_ps_512:
6375 case Intrinsic::x86_avx512_rsqrt14_ps_256:
6376 case Intrinsic::x86_avx512_rsqrt14_ps_128:
6377 case Intrinsic::x86_avx512_rsqrt14_pd_512:
6378 case Intrinsic::x86_avx512_rsqrt14_pd_256:
6379 case Intrinsic::x86_avx512_rsqrt14_pd_128:
6380 case Intrinsic::x86_avx10_mask_rsqrt_bf16_512:
6381 case Intrinsic::x86_avx10_mask_rsqrt_bf16_256:
6382 case Intrinsic::x86_avx10_mask_rsqrt_bf16_128:
6383 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_512:
6384 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_256:
6385 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_128:
6386 handleAVX512VectorGenericMaskedFP(
I, 0, 1,
6426 case Intrinsic::x86_avx512_rcp14_ps_512:
6427 case Intrinsic::x86_avx512_rcp14_ps_256:
6428 case Intrinsic::x86_avx512_rcp14_ps_128:
6429 case Intrinsic::x86_avx512_rcp14_pd_512:
6430 case Intrinsic::x86_avx512_rcp14_pd_256:
6431 case Intrinsic::x86_avx512_rcp14_pd_128:
6432 case Intrinsic::x86_avx10_mask_rcp_bf16_512:
6433 case Intrinsic::x86_avx10_mask_rcp_bf16_256:
6434 case Intrinsic::x86_avx10_mask_rcp_bf16_128:
6435 case Intrinsic::x86_avx512fp16_mask_rcp_ph_512:
6436 case Intrinsic::x86_avx512fp16_mask_rcp_ph_256:
6437 case Intrinsic::x86_avx512fp16_mask_rcp_ph_128:
6438 handleAVX512VectorGenericMaskedFP(
I, 0, 1,
6482 case Intrinsic::x86_avx512fp16_mask_rndscale_ph_512:
6483 case Intrinsic::x86_avx512fp16_mask_rndscale_ph_256:
6484 case Intrinsic::x86_avx512fp16_mask_rndscale_ph_128:
6485 case Intrinsic::x86_avx512_mask_rndscale_ps_512:
6486 case Intrinsic::x86_avx512_mask_rndscale_ps_256:
6487 case Intrinsic::x86_avx512_mask_rndscale_ps_128:
6488 case Intrinsic::x86_avx512_mask_rndscale_pd_512:
6489 case Intrinsic::x86_avx512_mask_rndscale_pd_256:
6490 case Intrinsic::x86_avx512_mask_rndscale_pd_128:
6491 case Intrinsic::x86_avx10_mask_rndscale_bf16_512:
6492 case Intrinsic::x86_avx10_mask_rndscale_bf16_256:
6493 case Intrinsic::x86_avx10_mask_rndscale_bf16_128:
6494 handleAVX512VectorGenericMaskedFP(
I, 0, 2,
6499 case Intrinsic::x86_avx512fp16_mask_add_sh_round:
6500 case Intrinsic::x86_avx512fp16_mask_sub_sh_round:
6501 case Intrinsic::x86_avx512fp16_mask_mul_sh_round:
6502 case Intrinsic::x86_avx512fp16_mask_div_sh_round:
6503 case Intrinsic::x86_avx512fp16_mask_max_sh_round:
6504 case Intrinsic::x86_avx512fp16_mask_min_sh_round: {
6505 visitGenericScalarHalfwordInst(
I);
6510 case Intrinsic::x86_vgf2p8affineqb_128:
6511 case Intrinsic::x86_vgf2p8affineqb_256:
6512 case Intrinsic::x86_vgf2p8affineqb_512:
6513 handleAVXGF2P8Affine(
I);
6523 bool maybeHandleArmSIMDIntrinsic(IntrinsicInst &
I) {
6524 switch (
I.getIntrinsicID()) {
6525 case Intrinsic::aarch64_neon_rshrn:
6526 case Intrinsic::aarch64_neon_sqrshl:
6527 case Intrinsic::aarch64_neon_sqrshrn:
6528 case Intrinsic::aarch64_neon_sqrshrun:
6529 case Intrinsic::aarch64_neon_sqshl:
6530 case Intrinsic::aarch64_neon_sqshlu:
6531 case Intrinsic::aarch64_neon_sqshrn:
6532 case Intrinsic::aarch64_neon_sqshrun:
6533 case Intrinsic::aarch64_neon_srshl:
6534 case Intrinsic::aarch64_neon_sshl:
6535 case Intrinsic::aarch64_neon_uqrshl:
6536 case Intrinsic::aarch64_neon_uqrshrn:
6537 case Intrinsic::aarch64_neon_uqshl:
6538 case Intrinsic::aarch64_neon_uqshrn:
6539 case Intrinsic::aarch64_neon_urshl:
6540 case Intrinsic::aarch64_neon_ushl:
6542 handleVectorShiftIntrinsic(
I,
false);
6547 case Intrinsic::aarch64_neon_fmaxp:
6548 case Intrinsic::aarch64_neon_fminp:
6550 case Intrinsic::aarch64_neon_fmaxnmp:
6551 case Intrinsic::aarch64_neon_fminnmp:
6553 case Intrinsic::aarch64_neon_smaxp:
6554 case Intrinsic::aarch64_neon_sminp:
6555 case Intrinsic::aarch64_neon_umaxp:
6556 case Intrinsic::aarch64_neon_uminp:
6558 case Intrinsic::aarch64_neon_addp:
6560 case Intrinsic::aarch64_neon_faddp:
6562 case Intrinsic::aarch64_neon_saddlp:
6563 case Intrinsic::aarch64_neon_uaddlp: {
6564 handlePairwiseShadowOrIntrinsic(
I, 1);
6569 case Intrinsic::aarch64_neon_fcvtas:
6570 case Intrinsic::aarch64_neon_fcvtau:
6572 case Intrinsic::aarch64_neon_fcvtms:
6573 case Intrinsic::aarch64_neon_fcvtmu:
6575 case Intrinsic::aarch64_neon_fcvtns:
6576 case Intrinsic::aarch64_neon_fcvtnu:
6578 case Intrinsic::aarch64_neon_fcvtps:
6579 case Intrinsic::aarch64_neon_fcvtpu:
6581 case Intrinsic::aarch64_neon_fcvtzs:
6582 case Intrinsic::aarch64_neon_fcvtzu:
6584 case Intrinsic::aarch64_neon_fcvtxn: {
6585 handleNEONVectorConvertIntrinsic(
I);
6590 case Intrinsic::aarch64_neon_faddv:
6591 case Intrinsic::aarch64_neon_saddv:
6592 case Intrinsic::aarch64_neon_uaddv:
6595 case Intrinsic::aarch64_neon_smaxv:
6596 case Intrinsic::aarch64_neon_sminv:
6597 case Intrinsic::aarch64_neon_umaxv:
6598 case Intrinsic::aarch64_neon_uminv:
6602 case Intrinsic::aarch64_neon_fmaxv:
6603 case Intrinsic::aarch64_neon_fminv:
6604 case Intrinsic::aarch64_neon_fmaxnmv:
6605 case Intrinsic::aarch64_neon_fminnmv:
6607 case Intrinsic::aarch64_neon_saddlv:
6608 case Intrinsic::aarch64_neon_uaddlv:
6609 handleVectorReduceIntrinsic(
I,
true);
6612 case Intrinsic::aarch64_neon_ld1x2:
6613 case Intrinsic::aarch64_neon_ld1x3:
6614 case Intrinsic::aarch64_neon_ld1x4:
6615 case Intrinsic::aarch64_neon_ld2:
6616 case Intrinsic::aarch64_neon_ld3:
6617 case Intrinsic::aarch64_neon_ld4:
6618 case Intrinsic::aarch64_neon_ld2r:
6619 case Intrinsic::aarch64_neon_ld3r:
6620 case Intrinsic::aarch64_neon_ld4r: {
6621 handleNEONVectorLoad(
I,
false);
6625 case Intrinsic::aarch64_neon_ld2lane:
6626 case Intrinsic::aarch64_neon_ld3lane:
6627 case Intrinsic::aarch64_neon_ld4lane: {
6628 handleNEONVectorLoad(
I,
true);
6633 case Intrinsic::aarch64_neon_sqxtn:
6634 case Intrinsic::aarch64_neon_sqxtun:
6635 case Intrinsic::aarch64_neon_uqxtn:
6642 case Intrinsic::aarch64_neon_st1x2:
6643 case Intrinsic::aarch64_neon_st1x3:
6644 case Intrinsic::aarch64_neon_st1x4:
6645 case Intrinsic::aarch64_neon_st2:
6646 case Intrinsic::aarch64_neon_st3:
6647 case Intrinsic::aarch64_neon_st4: {
6648 handleNEONVectorStoreIntrinsic(
I,
false);
6652 case Intrinsic::aarch64_neon_st2lane:
6653 case Intrinsic::aarch64_neon_st3lane:
6654 case Intrinsic::aarch64_neon_st4lane: {
6655 handleNEONVectorStoreIntrinsic(
I,
true);
6668 case Intrinsic::aarch64_neon_tbl1:
6669 case Intrinsic::aarch64_neon_tbl2:
6670 case Intrinsic::aarch64_neon_tbl3:
6671 case Intrinsic::aarch64_neon_tbl4:
6672 case Intrinsic::aarch64_neon_tbx1:
6673 case Intrinsic::aarch64_neon_tbx2:
6674 case Intrinsic::aarch64_neon_tbx3:
6675 case Intrinsic::aarch64_neon_tbx4: {
6677 handleIntrinsicByApplyingToShadow(
6678 I,
I.getIntrinsicID(),
6683 case Intrinsic::aarch64_neon_fmulx:
6684 case Intrinsic::aarch64_neon_pmul:
6685 case Intrinsic::aarch64_neon_pmull:
6686 case Intrinsic::aarch64_neon_smull:
6687 case Intrinsic::aarch64_neon_pmull64:
6688 case Intrinsic::aarch64_neon_umull: {
6689 handleNEONVectorMultiplyIntrinsic(
I);
6700 void visitIntrinsicInst(IntrinsicInst &
I) {
6701 if (maybeHandleCrossPlatformIntrinsic(
I))
6704 if (maybeHandleX86SIMDIntrinsic(
I))
6707 if (maybeHandleArmSIMDIntrinsic(
I))
6710 if (maybeHandleUnknownIntrinsic(
I))
6713 visitInstruction(
I);
6716 void visitLibAtomicLoad(CallBase &CB) {
6727 Value *NewOrdering =
6731 NextNodeIRBuilder NextIRB(&CB);
6732 Value *SrcShadowPtr, *SrcOriginPtr;
6733 std::tie(SrcShadowPtr, SrcOriginPtr) =
6734 getShadowOriginPtr(SrcPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
6736 Value *DstShadowPtr =
6737 getShadowOriginPtr(DstPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
6741 NextIRB.CreateMemCpy(DstShadowPtr,
Align(1), SrcShadowPtr,
Align(1),
Size);
6742 if (MS.TrackOrigins) {
6743 Value *SrcOrigin = NextIRB.CreateAlignedLoad(MS.OriginTy, SrcOriginPtr,
6745 Value *NewOrigin = updateOrigin(SrcOrigin, NextIRB);
6746 NextIRB.CreateCall(MS.MsanSetOriginFn, {DstPtr, Size, NewOrigin});
6750 void visitLibAtomicStore(CallBase &CB) {
6757 Value *NewOrdering =
6761 Value *DstShadowPtr =
6771 void visitCallBase(CallBase &CB) {
6779 visitAsmInstruction(CB);
6781 visitInstruction(CB);
6790 case LibFunc_atomic_load:
6792 llvm::errs() <<
"MSAN -- cannot instrument invoke of libatomic load."
6796 visitLibAtomicLoad(CB);
6798 case LibFunc_atomic_store:
6799 visitLibAtomicStore(CB);
6815 B.addAttribute(Attribute::Memory).addAttribute(Attribute::Speculatable);
6819 Func->removeFnAttrs(
B);
6825 bool MayCheckCall = MS.EagerChecks;
6829 MayCheckCall &= !
Func->getName().starts_with(
"__sanitizer_unaligned_");
6832 unsigned ArgOffset = 0;
6835 if (!
A->getType()->isSized()) {
6836 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is not sized: " << CB <<
"\n");
6840 if (
A->getType()->isScalableTy()) {
6841 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is vscale: " << CB <<
"\n");
6843 insertCheckShadowOf(
A, &CB);
6848 const DataLayout &
DL =
F.getDataLayout();
6852 bool EagerCheck = MayCheckCall && !ByVal && NoUndef;
6855 insertCheckShadowOf(
A, &CB);
6856 Size =
DL.getTypeAllocSize(
A->getType());
6862 Value *ArgShadow = getShadow(
A);
6863 Value *ArgShadowBase = getShadowPtrForArgument(IRB, ArgOffset);
6865 <<
" Shadow: " << *ArgShadow <<
"\n");
6869 assert(
A->getType()->isPointerTy() &&
6870 "ByVal argument is not a pointer!");
6875 MaybeAlign Alignment = std::nullopt;
6878 Value *AShadowPtr, *AOriginPtr;
6879 std::tie(AShadowPtr, AOriginPtr) =
6880 getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(), Alignment,
6882 if (!PropagateShadow) {
6889 if (MS.TrackOrigins) {
6890 Value *ArgOriginBase = getOriginPtrForArgument(IRB, ArgOffset);
6904 Size =
DL.getTypeAllocSize(
A->getType());
6910 if (MS.TrackOrigins && !(Cst && Cst->
isNullValue())) {
6912 getOriginPtrForArgument(IRB, ArgOffset));
6915 assert(Store !=
nullptr);
6924 if (FT->isVarArg()) {
6925 VAHelper->visitCallBase(CB, IRB);
6935 if (MayCheckCall && CB.
hasRetAttr(Attribute::NoUndef)) {
6936 setShadow(&CB, getCleanShadow(&CB));
6937 setOrigin(&CB, getCleanOrigin());
6943 Value *
Base = getShadowPtrForRetval(IRBBefore);
6944 IRBBefore.CreateAlignedStore(getCleanShadow(&CB),
Base,
6956 setShadow(&CB, getCleanShadow(&CB));
6957 setOrigin(&CB, getCleanOrigin());
6964 "Could not find insertion point for retval shadow load");
6967 Value *RetvalShadow = IRBAfter.CreateAlignedLoad(
6970 setShadow(&CB, RetvalShadow);
6971 if (MS.TrackOrigins)
6972 setOrigin(&CB, IRBAfter.CreateLoad(MS.OriginTy, getOriginPtrForRetval()));
6977 RetVal =
I->getOperand(0);
6980 return I->isMustTailCall();
6985 void visitReturnInst(ReturnInst &
I) {
6987 Value *RetVal =
I.getReturnValue();
6993 Value *ShadowPtr = getShadowPtrForRetval(IRB);
6994 bool HasNoUndef =
F.hasRetAttribute(Attribute::NoUndef);
6995 bool StoreShadow = !(MS.EagerChecks && HasNoUndef);
6998 bool EagerCheck = (MS.EagerChecks && HasNoUndef) || (
F.getName() ==
"main");
7000 Value *Shadow = getShadow(RetVal);
7001 bool StoreOrigin =
true;
7003 insertCheckShadowOf(RetVal, &
I);
7004 Shadow = getCleanShadow(RetVal);
7005 StoreOrigin =
false;
7012 if (MS.TrackOrigins && StoreOrigin)
7013 IRB.
CreateStore(getOrigin(RetVal), getOriginPtrForRetval());
7017 void visitPHINode(PHINode &
I) {
7019 if (!PropagateShadow) {
7020 setShadow(&
I, getCleanShadow(&
I));
7021 setOrigin(&
I, getCleanOrigin());
7025 ShadowPHINodes.push_back(&
I);
7026 setShadow(&
I, IRB.
CreatePHI(getShadowTy(&
I),
I.getNumIncomingValues(),
7028 if (MS.TrackOrigins)
7030 &
I, IRB.
CreatePHI(MS.OriginTy,
I.getNumIncomingValues(),
"_msphi_o"));
7033 Value *getLocalVarIdptr(AllocaInst &
I) {
7034 ConstantInt *IntConst =
7035 ConstantInt::get(Type::getInt32Ty((*
F.getParent()).getContext()), 0);
7036 return new GlobalVariable(*
F.getParent(), IntConst->
getType(),
7041 Value *getLocalVarDescription(AllocaInst &
I) {
7047 IRB.
CreateCall(MS.MsanPoisonStackFn, {&I, Len});
7049 Value *ShadowBase, *OriginBase;
7050 std::tie(ShadowBase, OriginBase) = getShadowOriginPtr(
7054 IRB.
CreateMemSet(ShadowBase, PoisonValue, Len,
I.getAlign());
7057 if (PoisonStack && MS.TrackOrigins) {
7058 Value *Idptr = getLocalVarIdptr(
I);
7060 Value *Descr = getLocalVarDescription(
I);
7061 IRB.
CreateCall(MS.MsanSetAllocaOriginWithDescriptionFn,
7062 {&I, Len, Idptr, Descr});
7064 IRB.
CreateCall(MS.MsanSetAllocaOriginNoDescriptionFn, {&I, Len, Idptr});
7070 Value *Descr = getLocalVarDescription(
I);
7072 IRB.
CreateCall(MS.MsanPoisonAllocaFn, {&I, Len, Descr});
7074 IRB.
CreateCall(MS.MsanUnpoisonAllocaFn, {&I, Len});
7078 void instrumentAlloca(AllocaInst &
I, Instruction *InsPoint =
nullptr) {
7081 NextNodeIRBuilder IRB(InsPoint);
7082 const DataLayout &
DL =
F.getDataLayout();
7083 TypeSize TS =
DL.getTypeAllocSize(
I.getAllocatedType());
7085 if (
I.isArrayAllocation())
7089 if (MS.CompileKernel)
7090 poisonAllocaKmsan(
I, IRB, Len);
7092 poisonAllocaUserspace(
I, IRB, Len);
7095 void visitAllocaInst(AllocaInst &
I) {
7096 setShadow(&
I, getCleanShadow(&
I));
7097 setOrigin(&
I, getCleanOrigin());
7103 void visitSelectInst(SelectInst &
I) {
7109 handleSelectLikeInst(
I,
B,
C,
D);
7115 Value *Sb = getShadow(
B);
7116 Value *Sc = getShadow(
C);
7117 Value *Sd = getShadow(
D);
7119 Value *Ob = MS.TrackOrigins ? getOrigin(
B) : nullptr;
7120 Value *Oc = MS.TrackOrigins ? getOrigin(
C) : nullptr;
7121 Value *Od = MS.TrackOrigins ? getOrigin(
D) : nullptr;
7126 if (
I.getType()->isAggregateType()) {
7130 Sa1 = getPoisonedShadow(getShadowTy(
I.getType()));
7131 }
else if (isScalableNonVectorType(
I.getType())) {
7139 Sa1 = getCleanShadow(getShadowTy(
I.getType()));
7147 C = CreateAppToShadowCast(IRB,
C);
7148 D = CreateAppToShadowCast(IRB,
D);
7155 if (MS.TrackOrigins) {
7158 if (
B->getType()->isVectorTy()) {
7159 B = convertToBool(
B, IRB);
7160 Sb = convertToBool(Sb, IRB);
7168 void visitLandingPadInst(LandingPadInst &
I) {
7171 setShadow(&
I, getCleanShadow(&
I));
7172 setOrigin(&
I, getCleanOrigin());
7175 void visitCatchSwitchInst(CatchSwitchInst &
I) {
7176 setShadow(&
I, getCleanShadow(&
I));
7177 setOrigin(&
I, getCleanOrigin());
7180 void visitFuncletPadInst(FuncletPadInst &
I) {
7181 setShadow(&
I, getCleanShadow(&
I));
7182 setOrigin(&
I, getCleanOrigin());
7185 void visitGetElementPtrInst(GetElementPtrInst &
I) { handleShadowOr(
I); }
7187 void visitExtractValueInst(ExtractValueInst &
I) {
7189 Value *Agg =
I.getAggregateOperand();
7191 Value *AggShadow = getShadow(Agg);
7195 setShadow(&
I, ResShadow);
7196 setOriginForNaryOp(
I);
7199 void visitInsertValueInst(InsertValueInst &
I) {
7202 Value *AggShadow = getShadow(
I.getAggregateOperand());
7203 Value *InsShadow = getShadow(
I.getInsertedValueOperand());
7209 setOriginForNaryOp(
I);
7212 void dumpInst(Instruction &
I) {
7216 errs() <<
"ZZZ " <<
I.getOpcodeName() <<
"\n";
7218 errs() <<
"QQQ " <<
I <<
"\n";
7221 void visitResumeInst(ResumeInst &
I) {
7226 void visitCleanupReturnInst(CleanupReturnInst &CRI) {
7231 void visitCatchReturnInst(CatchReturnInst &CRI) {
7236 void instrumentAsmArgument(
Value *Operand,
Type *ElemTy, Instruction &
I,
7245 insertCheckShadowOf(Operand, &
I);
7252 auto Size =
DL.getTypeStoreSize(ElemTy);
7254 if (MS.CompileKernel) {
7255 IRB.
CreateCall(MS.MsanInstrumentAsmStoreFn, {Operand, SizeVal});
7261 auto [ShadowPtr,
_] =
7262 getShadowOriginPtrUserspace(Operand, IRB, IRB.
getInt8Ty(),
Align(1));
7272 int getNumOutputArgs(InlineAsm *IA, CallBase *CB) {
7273 int NumRetOutputs = 0;
7280 NumRetOutputs =
ST->getNumElements();
7285 for (
const InlineAsm::ConstraintInfo &
Info : Constraints) {
7286 switch (
Info.Type) {
7294 return NumOutputs - NumRetOutputs;
7297 void visitAsmInstruction(Instruction &
I) {
7313 const DataLayout &
DL =
F.getDataLayout();
7317 int OutputArgs = getNumOutputArgs(IA, CB);
7323 for (
int i = OutputArgs; i < NumOperands; i++) {
7331 for (
int i = 0; i < OutputArgs; i++) {
7337 setShadow(&
I, getCleanShadow(&
I));
7338 setOrigin(&
I, getCleanOrigin());
7341 void visitFreezeInst(FreezeInst &
I) {
7343 setShadow(&
I, getCleanShadow(&
I));
7344 setOrigin(&
I, getCleanOrigin());
7347 void visitInstruction(Instruction &
I) {
7352 for (
size_t i = 0, n =
I.getNumOperands(); i < n; i++) {
7353 Value *Operand =
I.getOperand(i);
7355 insertCheckShadowOf(Operand, &
I);
7357 setShadow(&
I, getCleanShadow(&
I));
7358 setOrigin(&
I, getCleanOrigin());
7362struct VarArgHelperBase :
public VarArgHelper {
7364 MemorySanitizer &MS;
7365 MemorySanitizerVisitor &MSV;
7367 const unsigned VAListTagSize;
7369 VarArgHelperBase(Function &
F, MemorySanitizer &MS,
7370 MemorySanitizerVisitor &MSV,
unsigned VAListTagSize)
7371 :
F(
F), MS(MS), MSV(MSV), VAListTagSize(VAListTagSize) {}
7375 return IRB.
CreateAdd(
Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
7381 MS.VAArgTLS, ConstantInt::get(MS.IntptrTy, ArgOffset),
"_msarg_va_s");
7390 return getShadowPtrForVAArgument(IRB, ArgOffset);
7399 ConstantInt::get(MS.IntptrTy, ArgOffset),
7404 unsigned BaseOffset) {
7413 TailSize,
Align(8));
7416 void unpoisonVAListTagForInst(IntrinsicInst &
I) {
7418 Value *VAListTag =
I.getArgOperand(0);
7420 auto [ShadowPtr, OriginPtr] = MSV.getShadowOriginPtr(
7421 VAListTag, IRB, IRB.
getInt8Ty(), Alignment,
true);
7424 VAListTagSize, Alignment,
false);
7427 void visitVAStartInst(VAStartInst &
I)
override {
7428 if (
F.getCallingConv() == CallingConv::Win64)
7431 unpoisonVAListTagForInst(
I);
7434 void visitVACopyInst(VACopyInst &
I)
override {
7435 if (
F.getCallingConv() == CallingConv::Win64)
7437 unpoisonVAListTagForInst(
I);
7442struct VarArgAMD64Helper :
public VarArgHelperBase {
7445 static const unsigned AMD64GpEndOffset = 48;
7446 static const unsigned AMD64FpEndOffsetSSE = 176;
7448 static const unsigned AMD64FpEndOffsetNoSSE = AMD64GpEndOffset;
7450 unsigned AMD64FpEndOffset;
7451 AllocaInst *VAArgTLSCopy =
nullptr;
7452 AllocaInst *VAArgTLSOriginCopy =
nullptr;
7453 Value *VAArgOverflowSize =
nullptr;
7455 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
7457 VarArgAMD64Helper(Function &
F, MemorySanitizer &MS,
7458 MemorySanitizerVisitor &MSV)
7459 : VarArgHelperBase(
F, MS, MSV, 24) {
7460 AMD64FpEndOffset = AMD64FpEndOffsetSSE;
7461 for (
const auto &Attr :
F.getAttributes().getFnAttrs()) {
7462 if (Attr.isStringAttribute() &&
7463 (Attr.getKindAsString() ==
"target-features")) {
7464 if (Attr.getValueAsString().contains(
"-sse"))
7465 AMD64FpEndOffset = AMD64FpEndOffsetNoSSE;
7471 ArgKind classifyArgument(
Value *arg) {
7474 if (
T->isX86_FP80Ty())
7476 if (
T->isFPOrFPVectorTy())
7477 return AK_FloatingPoint;
7478 if (
T->isIntegerTy() &&
T->getPrimitiveSizeInBits() <= 64)
7479 return AK_GeneralPurpose;
7480 if (
T->isPointerTy())
7481 return AK_GeneralPurpose;
7493 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7494 unsigned GpOffset = 0;
7495 unsigned FpOffset = AMD64GpEndOffset;
7496 unsigned OverflowOffset = AMD64FpEndOffset;
7497 const DataLayout &
DL =
F.getDataLayout();
7501 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
7508 assert(
A->getType()->isPointerTy());
7510 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
7511 uint64_t AlignedSize =
alignTo(ArgSize, 8);
7512 unsigned BaseOffset = OverflowOffset;
7513 Value *ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
7514 Value *OriginBase =
nullptr;
7515 if (MS.TrackOrigins)
7516 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
7517 OverflowOffset += AlignedSize;
7520 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
7524 Value *ShadowPtr, *OriginPtr;
7525 std::tie(ShadowPtr, OriginPtr) =
7530 if (MS.TrackOrigins)
7534 ArgKind AK = classifyArgument(
A);
7535 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
7537 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
7539 Value *ShadowBase, *OriginBase =
nullptr;
7541 case AK_GeneralPurpose:
7542 ShadowBase = getShadowPtrForVAArgument(IRB, GpOffset);
7543 if (MS.TrackOrigins)
7544 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset);
7548 case AK_FloatingPoint:
7549 ShadowBase = getShadowPtrForVAArgument(IRB, FpOffset);
7550 if (MS.TrackOrigins)
7551 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
7558 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
7559 uint64_t AlignedSize =
alignTo(ArgSize, 8);
7560 unsigned BaseOffset = OverflowOffset;
7561 ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
7562 if (MS.TrackOrigins) {
7563 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
7565 OverflowOffset += AlignedSize;
7568 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
7577 Value *Shadow = MSV.getShadow(
A);
7579 if (MS.TrackOrigins) {
7580 Value *Origin = MSV.getOrigin(
A);
7581 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
7582 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
7588 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
7589 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
7592 void finalizeInstrumentation()
override {
7593 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
7594 "finalizeInstrumentation called twice");
7595 if (!VAStartInstrumentationList.
empty()) {
7602 ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), VAArgOverflowSize);
7603 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7609 Intrinsic::umin, CopySize,
7613 if (MS.TrackOrigins) {
7614 VAArgTLSOriginCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7623 for (CallInst *OrigInst : VAStartInstrumentationList) {
7624 NextNodeIRBuilder IRB(OrigInst);
7625 Value *VAListTag = OrigInst->getArgOperand(0);
7627 Value *RegSaveAreaPtrPtr =
7628 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, 16));
7630 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
7632 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
7633 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7635 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
7637 if (MS.TrackOrigins)
7638 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
7639 Alignment, AMD64FpEndOffset);
7640 Value *OverflowArgAreaPtrPtr =
7641 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, 8));
7642 Value *OverflowArgAreaPtr =
7643 IRB.
CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
7644 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
7645 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
7646 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
7650 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
7652 if (MS.TrackOrigins) {
7655 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
7663struct VarArgAArch64Helper :
public VarArgHelperBase {
7664 static const unsigned kAArch64GrArgSize = 64;
7665 static const unsigned kAArch64VrArgSize = 128;
7667 static const unsigned AArch64GrBegOffset = 0;
7668 static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
7670 static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
7671 static const unsigned AArch64VrEndOffset =
7672 AArch64VrBegOffset + kAArch64VrArgSize;
7673 static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
7675 AllocaInst *VAArgTLSCopy =
nullptr;
7676 Value *VAArgOverflowSize =
nullptr;
7678 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
7680 VarArgAArch64Helper(Function &
F, MemorySanitizer &MS,
7681 MemorySanitizerVisitor &MSV)
7682 : VarArgHelperBase(
F, MS, MSV, 32) {}
7685 std::pair<ArgKind, uint64_t> classifyArgument(
Type *
T) {
7686 if (
T->isIntOrPtrTy() &&
T->getPrimitiveSizeInBits() <= 64)
7687 return {AK_GeneralPurpose, 1};
7688 if (
T->isFloatingPointTy() &&
T->getPrimitiveSizeInBits() <= 128)
7689 return {AK_FloatingPoint, 1};
7691 if (
T->isArrayTy()) {
7692 auto R = classifyArgument(
T->getArrayElementType());
7693 R.second *=
T->getScalarType()->getArrayNumElements();
7698 auto R = classifyArgument(FV->getScalarType());
7699 R.second *= FV->getNumElements();
7704 return {AK_Memory, 0};
7716 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7717 unsigned GrOffset = AArch64GrBegOffset;
7718 unsigned VrOffset = AArch64VrBegOffset;
7719 unsigned OverflowOffset = AArch64VAEndOffset;
7721 const DataLayout &
DL =
F.getDataLayout();
7724 auto [AK, RegNum] = classifyArgument(
A->getType());
7725 if (AK == AK_GeneralPurpose &&
7726 (GrOffset + RegNum * 8) > AArch64GrEndOffset)
7728 if (AK == AK_FloatingPoint &&
7729 (VrOffset + RegNum * 16) > AArch64VrEndOffset)
7733 case AK_GeneralPurpose:
7734 Base = getShadowPtrForVAArgument(IRB, GrOffset);
7735 GrOffset += 8 * RegNum;
7737 case AK_FloatingPoint:
7738 Base = getShadowPtrForVAArgument(IRB, VrOffset);
7739 VrOffset += 16 * RegNum;
7746 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
7747 uint64_t AlignedSize =
alignTo(ArgSize, 8);
7748 unsigned BaseOffset = OverflowOffset;
7749 Base = getShadowPtrForVAArgument(IRB, BaseOffset);
7750 OverflowOffset += AlignedSize;
7753 CleanUnusedTLS(IRB,
Base, BaseOffset);
7765 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
7766 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
7771 Value *SaveAreaPtrPtr =
7772 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, offset));
7773 return IRB.
CreateLoad(Type::getInt64Ty(*MS.C), SaveAreaPtrPtr);
7778 Value *SaveAreaPtr =
7779 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, offset));
7781 return IRB.
CreateSExt(SaveArea32, MS.IntptrTy);
7784 void finalizeInstrumentation()
override {
7785 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
7786 "finalizeInstrumentation called twice");
7787 if (!VAStartInstrumentationList.empty()) {
7794 ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset), VAArgOverflowSize);
7795 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7801 Intrinsic::umin, CopySize,
7807 Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
7808 Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize);
7812 for (CallInst *OrigInst : VAStartInstrumentationList) {
7813 NextNodeIRBuilder IRB(OrigInst);
7815 Value *VAListTag = OrigInst->getArgOperand(0);
7832 Value *StackSaveAreaPtr =
7833 IRB.
CreateIntToPtr(getVAField64(IRB, VAListTag, 0), RegSaveAreaPtrTy);
7836 Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
7837 Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
7840 IRB.
CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea), RegSaveAreaPtrTy);
7843 Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
7844 Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
7847 IRB.
CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea), RegSaveAreaPtrTy);
7853 Value *GrRegSaveAreaShadowPtrOff =
7854 IRB.
CreateAdd(GrArgSize, GrOffSaveArea);
7856 Value *GrRegSaveAreaShadowPtr =
7857 MSV.getShadowOriginPtr(GrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7863 Value *GrCopySize = IRB.
CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
7869 Value *VrRegSaveAreaShadowPtrOff =
7870 IRB.
CreateAdd(VrArgSize, VrOffSaveArea);
7872 Value *VrRegSaveAreaShadowPtr =
7873 MSV.getShadowOriginPtr(VrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7880 VrRegSaveAreaShadowPtrOff);
7881 Value *VrCopySize = IRB.
CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
7887 Value *StackSaveAreaShadowPtr =
7888 MSV.getShadowOriginPtr(StackSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7893 VAArgTLSCopy, IRB.
getInt32(AArch64VAEndOffset));
7896 Align(16), VAArgOverflowSize);
7902struct VarArgPowerPC64Helper :
public VarArgHelperBase {
7903 AllocaInst *VAArgTLSCopy =
nullptr;
7904 Value *VAArgSize =
nullptr;
7906 VarArgPowerPC64Helper(Function &
F, MemorySanitizer &MS,
7907 MemorySanitizerVisitor &MSV)
7908 : VarArgHelperBase(
F, MS, MSV, 8) {}
7910 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7918 Triple TargetTriple(
F.getParent()->getTargetTriple());
7922 if (TargetTriple.isPPC64ELFv2ABI())
7926 unsigned VAArgOffset = VAArgBase;
7927 const DataLayout &
DL =
F.getDataLayout();
7930 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
7932 assert(
A->getType()->isPointerTy());
7934 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
7937 ArgAlign =
Align(8);
7938 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
7941 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
7943 Value *AShadowPtr, *AOriginPtr;
7944 std::tie(AShadowPtr, AOriginPtr) =
7945 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
7955 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
7957 if (
A->getType()->isArrayTy()) {
7960 Type *ElementTy =
A->getType()->getArrayElementType();
7962 ArgAlign =
Align(
DL.getTypeAllocSize(ElementTy));
7963 }
else if (
A->getType()->isVectorTy()) {
7965 ArgAlign =
Align(ArgSize);
7968 ArgAlign =
Align(8);
7969 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
7970 if (
DL.isBigEndian()) {
7974 VAArgOffset += (8 - ArgSize);
7978 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
7982 VAArgOffset += ArgSize;
7986 VAArgBase = VAArgOffset;
7990 ConstantInt::get(MS.IntptrTy, VAArgOffset - VAArgBase);
7993 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
7996 void finalizeInstrumentation()
override {
7997 assert(!VAArgSize && !VAArgTLSCopy &&
7998 "finalizeInstrumentation called twice");
8001 Value *CopySize = VAArgSize;
8003 if (!VAStartInstrumentationList.empty()) {
8007 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8013 Intrinsic::umin, CopySize,
8021 for (CallInst *OrigInst : VAStartInstrumentationList) {
8022 NextNodeIRBuilder IRB(OrigInst);
8023 Value *VAListTag = OrigInst->getArgOperand(0);
8026 RegSaveAreaPtrPtr = IRB.
CreateIntToPtr(RegSaveAreaPtrPtr, MS.PtrTy);
8029 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8030 const DataLayout &
DL =
F.getDataLayout();
8031 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8033 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8034 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8036 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8043struct VarArgPowerPC32Helper :
public VarArgHelperBase {
8044 AllocaInst *VAArgTLSCopy =
nullptr;
8045 Value *VAArgSize =
nullptr;
8047 VarArgPowerPC32Helper(Function &
F, MemorySanitizer &MS,
8048 MemorySanitizerVisitor &MSV)
8049 : VarArgHelperBase(
F, MS, MSV, 12) {}
8051 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8055 unsigned VAArgOffset = VAArgBase;
8056 const DataLayout &
DL =
F.getDataLayout();
8057 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8060 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
8062 assert(
A->getType()->isPointerTy());
8064 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
8066 if (ArgAlign < IntptrSize)
8067 ArgAlign =
Align(IntptrSize);
8068 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8071 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
8073 Value *AShadowPtr, *AOriginPtr;
8074 std::tie(AShadowPtr, AOriginPtr) =
8075 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
8085 Type *ArgTy =
A->getType();
8091 uint64_t ArgSize =
DL.getTypeAllocSize(ArgTy);
8098 ArgAlign =
Align(
DL.getTypeAllocSize(ElementTy));
8101 ArgAlign =
Align(ArgSize);
8103 if (ArgAlign < IntptrSize)
8104 ArgAlign =
Align(IntptrSize);
8105 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8106 if (
DL.isBigEndian()) {
8109 if (ArgSize < IntptrSize)
8110 VAArgOffset += (IntptrSize - ArgSize);
8113 Base = getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase,
8119 VAArgOffset += ArgSize;
8126 ConstantInt::get(MS.IntptrTy, VAArgOffset - VAArgBase);
8129 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8132 void finalizeInstrumentation()
override {
8133 assert(!VAArgSize && !VAArgTLSCopy &&
8134 "finalizeInstrumentation called twice");
8136 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
8137 Value *CopySize = VAArgSize;
8139 if (!VAStartInstrumentationList.empty()) {
8143 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8149 Intrinsic::umin, CopySize,
8157 for (CallInst *OrigInst : VAStartInstrumentationList) {
8158 NextNodeIRBuilder IRB(OrigInst);
8159 Value *VAListTag = OrigInst->getArgOperand(0);
8161 Value *RegSaveAreaSize = CopySize;
8165 IRB.
CreateAdd(RegSaveAreaPtrPtr, ConstantInt::get(MS.IntptrTy, 8));
8169 Intrinsic::umin, CopySize, ConstantInt::get(MS.IntptrTy, 32));
8171 RegSaveAreaPtrPtr = IRB.
CreateIntToPtr(RegSaveAreaPtrPtr, MS.PtrTy);
8174 const DataLayout &
DL =
F.getDataLayout();
8175 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8179 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8180 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8181 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8183 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy,
8184 Alignment, RegSaveAreaSize);
8186 RegSaveAreaShadowPtr =
8189 ConstantInt::get(MS.IntptrTy, 32));
8194 ConstantInt::get(MS.IntptrTy, 32), Alignment);
8199 Value *OverflowAreaSize = IRB.
CreateSub(CopySize, RegSaveAreaSize);
8202 OverflowAreaPtrPtr =
8203 IRB.
CreateAdd(OverflowAreaPtrPtr, ConstantInt::get(MS.IntptrTy, 4));
8204 OverflowAreaPtrPtr = IRB.
CreateIntToPtr(OverflowAreaPtrPtr, MS.PtrTy);
8206 Value *OverflowAreaPtr = IRB.
CreateLoad(MS.PtrTy, OverflowAreaPtrPtr);
8208 Value *OverflowAreaShadowPtr, *OverflowAreaOriginPtr;
8209 std::tie(OverflowAreaShadowPtr, OverflowAreaOriginPtr) =
8210 MSV.getShadowOriginPtr(OverflowAreaPtr, IRB, IRB.
getInt8Ty(),
8213 Value *OverflowVAArgTLSCopyPtr =
8215 OverflowVAArgTLSCopyPtr =
8216 IRB.
CreateAdd(OverflowVAArgTLSCopyPtr, RegSaveAreaSize);
8218 OverflowVAArgTLSCopyPtr =
8221 OverflowVAArgTLSCopyPtr, Alignment, OverflowAreaSize);
8228struct VarArgSystemZHelper :
public VarArgHelperBase {
8229 static const unsigned SystemZGpOffset = 16;
8230 static const unsigned SystemZGpEndOffset = 56;
8231 static const unsigned SystemZFpOffset = 128;
8232 static const unsigned SystemZFpEndOffset = 160;
8233 static const unsigned SystemZMaxVrArgs = 8;
8234 static const unsigned SystemZRegSaveAreaSize = 160;
8235 static const unsigned SystemZOverflowOffset = 160;
8236 static const unsigned SystemZVAListTagSize = 32;
8237 static const unsigned SystemZOverflowArgAreaPtrOffset = 16;
8238 static const unsigned SystemZRegSaveAreaPtrOffset = 24;
8240 bool IsSoftFloatABI;
8241 AllocaInst *VAArgTLSCopy =
nullptr;
8242 AllocaInst *VAArgTLSOriginCopy =
nullptr;
8243 Value *VAArgOverflowSize =
nullptr;
8245 enum class ArgKind {
8253 enum class ShadowExtension {
None,
Zero, Sign };
8255 VarArgSystemZHelper(Function &
F, MemorySanitizer &MS,
8256 MemorySanitizerVisitor &MSV)
8257 : VarArgHelperBase(
F, MS, MSV, SystemZVAListTagSize),
8258 IsSoftFloatABI(
F.getFnAttribute(
"use-soft-float").getValueAsBool()) {}
8260 ArgKind classifyArgument(
Type *
T) {
8267 if (
T->isIntegerTy(128) ||
T->isFP128Ty())
8268 return ArgKind::Indirect;
8269 if (
T->isFloatingPointTy())
8270 return IsSoftFloatABI ? ArgKind::GeneralPurpose : ArgKind::FloatingPoint;
8271 if (
T->isIntegerTy() ||
T->isPointerTy())
8272 return ArgKind::GeneralPurpose;
8273 if (
T->isVectorTy())
8274 return ArgKind::Vector;
8275 return ArgKind::Memory;
8278 ShadowExtension getShadowExtension(
const CallBase &CB,
unsigned ArgNo) {
8288 return ShadowExtension::Zero;
8292 return ShadowExtension::Sign;
8294 return ShadowExtension::None;
8297 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8298 unsigned GpOffset = SystemZGpOffset;
8299 unsigned FpOffset = SystemZFpOffset;
8300 unsigned VrIndex = 0;
8301 unsigned OverflowOffset = SystemZOverflowOffset;
8302 const DataLayout &
DL =
F.getDataLayout();
8308 ArgKind AK = classifyArgument(
T);
8309 if (AK == ArgKind::Indirect) {
8311 AK = ArgKind::GeneralPurpose;
8313 if (AK == ArgKind::GeneralPurpose && GpOffset >= SystemZGpEndOffset)
8314 AK = ArgKind::Memory;
8315 if (AK == ArgKind::FloatingPoint && FpOffset >= SystemZFpEndOffset)
8316 AK = ArgKind::Memory;
8317 if (AK == ArgKind::Vector && (VrIndex >= SystemZMaxVrArgs || !IsFixed))
8318 AK = ArgKind::Memory;
8319 Value *ShadowBase =
nullptr;
8320 Value *OriginBase =
nullptr;
8321 ShadowExtension SE = ShadowExtension::None;
8323 case ArgKind::GeneralPurpose: {
8325 uint64_t ArgSize = 8;
8328 SE = getShadowExtension(CB, ArgNo);
8329 uint64_t GapSize = 0;
8330 if (SE == ShadowExtension::None) {
8331 uint64_t ArgAllocSize =
DL.getTypeAllocSize(
T);
8332 assert(ArgAllocSize <= ArgSize);
8333 GapSize = ArgSize - ArgAllocSize;
8335 ShadowBase = getShadowAddrForVAArgument(IRB, GpOffset + GapSize);
8336 if (MS.TrackOrigins)
8337 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset + GapSize);
8339 GpOffset += ArgSize;
8345 case ArgKind::FloatingPoint: {
8347 uint64_t ArgSize = 8;
8354 ShadowBase = getShadowAddrForVAArgument(IRB, FpOffset);
8355 if (MS.TrackOrigins)
8356 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
8358 FpOffset += ArgSize;
8364 case ArgKind::Vector: {
8371 case ArgKind::Memory: {
8376 uint64_t ArgAllocSize =
DL.getTypeAllocSize(
T);
8377 uint64_t ArgSize =
alignTo(ArgAllocSize, 8);
8379 SE = getShadowExtension(CB, ArgNo);
8381 SE == ShadowExtension::None ? ArgSize - ArgAllocSize : 0;
8383 getShadowAddrForVAArgument(IRB, OverflowOffset + GapSize);
8384 if (MS.TrackOrigins)
8386 getOriginPtrForVAArgument(IRB, OverflowOffset + GapSize);
8387 OverflowOffset += ArgSize;
8394 case ArgKind::Indirect:
8397 if (ShadowBase ==
nullptr)
8399 Value *Shadow = MSV.getShadow(
A);
8400 if (SE != ShadowExtension::None)
8401 Shadow = MSV.CreateShadowCast(IRB, Shadow, IRB.
getInt64Ty(),
8402 SE == ShadowExtension::Sign);
8403 ShadowBase = IRB.
CreateIntToPtr(ShadowBase, MS.PtrTy,
"_msarg_va_s");
8405 if (MS.TrackOrigins) {
8406 Value *Origin = MSV.getOrigin(
A);
8407 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
8408 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
8412 Constant *OverflowSize = ConstantInt::get(
8413 IRB.
getInt64Ty(), OverflowOffset - SystemZOverflowOffset);
8414 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
8421 ConstantInt::get(MS.IntptrTy, SystemZRegSaveAreaPtrOffset)),
8424 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8426 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8427 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(), Alignment,
8432 unsigned RegSaveAreaSize =
8433 IsSoftFloatABI ? SystemZGpEndOffset : SystemZRegSaveAreaSize;
8434 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8436 if (MS.TrackOrigins)
8437 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
8438 Alignment, RegSaveAreaSize);
8447 ConstantInt::get(MS.IntptrTy, SystemZOverflowArgAreaPtrOffset)),
8449 Value *OverflowArgAreaPtr = IRB.
CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
8450 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
8452 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
8453 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
8456 SystemZOverflowOffset);
8457 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
8459 if (MS.TrackOrigins) {
8461 SystemZOverflowOffset);
8462 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
8467 void finalizeInstrumentation()
override {
8468 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
8469 "finalizeInstrumentation called twice");
8470 if (!VAStartInstrumentationList.empty()) {
8477 IRB.
CreateAdd(ConstantInt::get(MS.IntptrTy, SystemZOverflowOffset),
8479 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8485 Intrinsic::umin, CopySize,
8489 if (MS.TrackOrigins) {
8490 VAArgTLSOriginCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8499 for (CallInst *OrigInst : VAStartInstrumentationList) {
8500 NextNodeIRBuilder IRB(OrigInst);
8501 Value *VAListTag = OrigInst->getArgOperand(0);
8502 copyRegSaveArea(IRB, VAListTag);
8503 copyOverflowArea(IRB, VAListTag);
8509struct VarArgI386Helper :
public VarArgHelperBase {
8510 AllocaInst *VAArgTLSCopy =
nullptr;
8511 Value *VAArgSize =
nullptr;
8513 VarArgI386Helper(Function &
F, MemorySanitizer &MS,
8514 MemorySanitizerVisitor &MSV)
8515 : VarArgHelperBase(
F, MS, MSV, 4) {}
8517 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8518 const DataLayout &
DL =
F.getDataLayout();
8519 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8520 unsigned VAArgOffset = 0;
8523 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
8525 assert(
A->getType()->isPointerTy());
8527 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
8529 if (ArgAlign < IntptrSize)
8530 ArgAlign =
Align(IntptrSize);
8531 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8533 Value *
Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8535 Value *AShadowPtr, *AOriginPtr;
8536 std::tie(AShadowPtr, AOriginPtr) =
8537 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
8547 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
8549 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8550 if (
DL.isBigEndian()) {
8553 if (ArgSize < IntptrSize)
8554 VAArgOffset += (IntptrSize - ArgSize);
8557 Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8560 VAArgOffset += ArgSize;
8566 Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
8569 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8572 void finalizeInstrumentation()
override {
8573 assert(!VAArgSize && !VAArgTLSCopy &&
8574 "finalizeInstrumentation called twice");
8576 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
8577 Value *CopySize = VAArgSize;
8579 if (!VAStartInstrumentationList.empty()) {
8582 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8588 Intrinsic::umin, CopySize,
8596 for (CallInst *OrigInst : VAStartInstrumentationList) {
8597 NextNodeIRBuilder IRB(OrigInst);
8598 Value *VAListTag = OrigInst->getArgOperand(0);
8599 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
8600 Value *RegSaveAreaPtrPtr =
8602 PointerType::get(*MS.C, 0));
8603 Value *RegSaveAreaPtr =
8604 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
8605 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8606 const DataLayout &
DL =
F.getDataLayout();
8607 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8609 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8610 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8612 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8620struct VarArgGenericHelper :
public VarArgHelperBase {
8621 AllocaInst *VAArgTLSCopy =
nullptr;
8622 Value *VAArgSize =
nullptr;
8624 VarArgGenericHelper(Function &
F, MemorySanitizer &MS,
8625 MemorySanitizerVisitor &MSV,
const unsigned VAListTagSize)
8626 : VarArgHelperBase(
F, MS, MSV, VAListTagSize) {}
8628 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8629 unsigned VAArgOffset = 0;
8630 const DataLayout &
DL =
F.getDataLayout();
8631 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8636 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
8637 if (
DL.isBigEndian()) {
8640 if (ArgSize < IntptrSize)
8641 VAArgOffset += (IntptrSize - ArgSize);
8643 Value *
Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8644 VAArgOffset += ArgSize;
8645 VAArgOffset =
alignTo(VAArgOffset, IntptrSize);
8651 Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
8654 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8657 void finalizeInstrumentation()
override {
8658 assert(!VAArgSize && !VAArgTLSCopy &&
8659 "finalizeInstrumentation called twice");
8661 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
8662 Value *CopySize = VAArgSize;
8664 if (!VAStartInstrumentationList.empty()) {
8667 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8673 Intrinsic::umin, CopySize,
8681 for (CallInst *OrigInst : VAStartInstrumentationList) {
8682 NextNodeIRBuilder IRB(OrigInst);
8683 Value *VAListTag = OrigInst->getArgOperand(0);
8684 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
8685 Value *RegSaveAreaPtrPtr =
8687 PointerType::get(*MS.C, 0));
8688 Value *RegSaveAreaPtr =
8689 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
8690 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8691 const DataLayout &
DL =
F.getDataLayout();
8692 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8694 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8695 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8697 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8705using VarArgARM32Helper = VarArgGenericHelper;
8706using VarArgRISCVHelper = VarArgGenericHelper;
8707using VarArgMIPSHelper = VarArgGenericHelper;
8708using VarArgLoongArch64Helper = VarArgGenericHelper;
8711struct VarArgNoOpHelper :
public VarArgHelper {
8712 VarArgNoOpHelper(Function &
F, MemorySanitizer &MS,
8713 MemorySanitizerVisitor &MSV) {}
8715 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {}
8717 void visitVAStartInst(VAStartInst &
I)
override {}
8719 void visitVACopyInst(VACopyInst &
I)
override {}
8721 void finalizeInstrumentation()
override {}
8727 MemorySanitizerVisitor &Visitor) {
8730 Triple TargetTriple(Func.getParent()->getTargetTriple());
8733 return new VarArgI386Helper(Func, Msan, Visitor);
8736 return new VarArgAMD64Helper(Func, Msan, Visitor);
8738 if (TargetTriple.
isARM())
8739 return new VarArgARM32Helper(Func, Msan, Visitor, 4);
8742 return new VarArgAArch64Helper(Func, Msan, Visitor);
8745 return new VarArgSystemZHelper(Func, Msan, Visitor);
8750 return new VarArgPowerPC32Helper(Func, Msan, Visitor);
8753 return new VarArgPowerPC64Helper(Func, Msan, Visitor);
8756 return new VarArgRISCVHelper(Func, Msan, Visitor, 4);
8759 return new VarArgRISCVHelper(Func, Msan, Visitor, 8);
8762 return new VarArgMIPSHelper(Func, Msan, Visitor, 4);
8765 return new VarArgMIPSHelper(Func, Msan, Visitor, 8);
8768 return new VarArgLoongArch64Helper(Func, Msan, Visitor,
8771 return new VarArgNoOpHelper(Func, Msan, Visitor);
8778 if (
F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
8781 MemorySanitizerVisitor Visitor(
F, *
this, TLI);
8788 return Visitor.runOnFunction();
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
static bool isStore(int Opcode)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClWithComdat("asan-with-comdat", cl::desc("Place ASan constructors in comdat sections"), cl::Hidden, cl::init(true))
VarLocInsertPt getNextNode(const DbgRecord *DVR)
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const MemoryMapParams Linux_LoongArch64_MemoryMapParams
const MemoryMapParams Linux_X86_64_MemoryMapParams
static cl::opt< int > ClTrackOrigins("dfsan-track-origins", cl::desc("Track origins of labels"), cl::Hidden, cl::init(0))
static AtomicOrdering addReleaseOrdering(AtomicOrdering AO)
static AtomicOrdering addAcquireOrdering(AtomicOrdering AO)
const MemoryMapParams Linux_AArch64_MemoryMapParams
static bool isAMustTailRetVal(Value *RetVal)
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
shuff Hexagon Optimize Shuffle Vector
Module.h This file contains the declarations for the Module class.
Machine Check Debug Module
static const PlatformMemoryMapParams Linux_S390_MemoryMapParams
static const Align kMinOriginAlignment
static cl::opt< uint64_t > ClShadowBase("msan-shadow-base", cl::desc("Define custom MSan ShadowBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClPoisonUndef("msan-poison-undef", cl::desc("Poison fully undef temporary values. " "Partially undefined constant vectors " "are unaffected by this flag (see " "-msan-poison-undef-vectors)."), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_X86_MemoryMapParams
static cl::opt< uint64_t > ClOriginBase("msan-origin-base", cl::desc("Define custom MSan OriginBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClCheckConstantShadow("msan-check-constant-shadow", cl::desc("Insert checks for constant shadow values"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_LoongArch_MemoryMapParams
static const MemoryMapParams NetBSD_X86_64_MemoryMapParams
static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams
static const unsigned kOriginSize
static cl::opt< bool > ClWithComdat("msan-with-comdat", cl::desc("Place MSan constructors in comdat sections"), cl::Hidden, cl::init(false))
static cl::opt< int > ClTrackOrigins("msan-track-origins", cl::desc("Track origins (allocation sites) of poisoned memory"), cl::Hidden, cl::init(0))
Track origins of uninitialized values.
static cl::opt< int > ClInstrumentationWithCallThreshold("msan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented requires more than " "this number of checks and origin stores, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(3500))
static cl::opt< int > ClPoisonStackPattern("msan-poison-stack-pattern", cl::desc("poison uninitialized stack variables with the given pattern"), cl::Hidden, cl::init(0xff))
static const Align kShadowTLSAlignment
static cl::opt< bool > ClHandleICmpExact("msan-handle-icmp-exact", cl::desc("exact handling of relational integer ICmp"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams
static cl::opt< bool > ClDumpStrictInstructions("msan-dump-strict-instructions", cl::desc("print out instructions with default strict semantics i.e.," "check that all the inputs are fully initialized, and mark " "the output as fully initialized. These semantics are applied " "to instructions that could not be handled explicitly nor " "heuristically."), cl::Hidden, cl::init(false))
static Constant * getOrInsertGlobal(Module &M, StringRef Name, Type *Ty)
static cl::opt< bool > ClPreciseDisjointOr("msan-precise-disjoint-or", cl::desc("Precisely poison disjoint OR. If false (legacy behavior), " "disjointedness is ignored (i.e., 1|1 is initialized)."), cl::Hidden, cl::init(false))
static const MemoryMapParams Linux_S390X_MemoryMapParams
static cl::opt< bool > ClPoisonStack("msan-poison-stack", cl::desc("poison uninitialized stack variables"), cl::Hidden, cl::init(true))
static const MemoryMapParams Linux_I386_MemoryMapParams
const char kMsanInitName[]
static cl::opt< bool > ClPoisonUndefVectors("msan-poison-undef-vectors", cl::desc("Precisely poison partially undefined constant vectors. " "If false (legacy behavior), the entire vector is " "considered fully initialized, which may lead to false " "negatives. Fully undefined constant vectors are " "unaffected by this flag (see -msan-poison-undef)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClPrintStackNames("msan-print-stack-names", cl::desc("Print name of local stack variable"), cl::Hidden, cl::init(true))
static cl::opt< uint64_t > ClAndMask("msan-and-mask", cl::desc("Define custom MSan AndMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleLifetimeIntrinsics("msan-handle-lifetime-intrinsics", cl::desc("when possible, poison scoped variables at the beginning of the scope " "(slower, but more precise)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKeepGoing("msan-keep-going", cl::desc("keep going after reporting a UMR"), cl::Hidden, cl::init(false))
static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams
static GlobalVariable * createPrivateConstGlobalForString(Module &M, StringRef Str)
Create a non-const global initialized with the given string.
static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClEagerChecks("msan-eager-checks", cl::desc("check arguments and return values at function call boundaries"), cl::Hidden, cl::init(false))
static cl::opt< int > ClDisambiguateWarning("msan-disambiguate-warning-threshold", cl::desc("Define threshold for number of checks per " "debug location to force origin update."), cl::Hidden, cl::init(3))
static VarArgHelper * CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, MemorySanitizerVisitor &Visitor)
static const MemoryMapParams Linux_MIPS64_MemoryMapParams
static const MemoryMapParams Linux_PowerPC64_MemoryMapParams
static cl::opt< uint64_t > ClXorMask("msan-xor-mask", cl::desc("Define custom MSan XorMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleAsmConservative("msan-handle-asm-conservative", cl::desc("conservative handling of inline assembly"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams
static const PlatformMemoryMapParams FreeBSD_ARM_MemoryMapParams
static const unsigned kParamTLSSize
static cl::opt< bool > ClHandleICmp("msan-handle-icmp", cl::desc("propagate shadow through ICmpEQ and ICmpNE"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClEnableKmsan("msan-kernel", cl::desc("Enable KernelMemorySanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClPoisonStackWithCall("msan-poison-stack-with-call", cl::desc("poison uninitialized stack variables with a call"), cl::Hidden, cl::init(false))
static const PlatformMemoryMapParams NetBSD_X86_MemoryMapParams
static cl::opt< bool > ClDumpHeuristicInstructions("msan-dump-heuristic-instructions", cl::desc("Prints 'unknown' instructions that were handled heuristically. " "Use -msan-dump-strict-instructions to print instructions that " "could not be handled explicitly nor heuristically."), cl::Hidden, cl::init(false))
static const unsigned kRetvalTLSSize
static const MemoryMapParams FreeBSD_AArch64_MemoryMapParams
const char kMsanModuleCtorName[]
static const MemoryMapParams FreeBSD_I386_MemoryMapParams
static cl::opt< bool > ClCheckAccessAddress("msan-check-access-address", cl::desc("report accesses through a pointer which has poisoned shadow"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDisableChecks("msan-disable-checks", cl::desc("Apply no_sanitize to the whole file"), cl::Hidden, cl::init(false))
FunctionAnalysisManager FAM
const SmallVectorImpl< MachineOperand > & Cond
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
void setAlignment(Align Align)
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
const T & front() const
front - Get the first element.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
This class stores enough information to efficiently remove some attributes from an existing AttrBuild...
AttributeMask & addAttribute(Attribute::AttrKind Val)
Add an attribute to the mask.
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
InstListType::iterator iterator
Instruction iterators...
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
void removeFnAttrs(const AttributeMask &AttrsToRemove)
Removes the attributes from the function.
MaybeAlign getParamAlign(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
void setArgOperand(unsigned i, Value *v)
FunctionType * getFunctionType() const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_SGT
signed greater than
@ ICMP_SGE
signed greater or equal
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getString(LLVMContext &Context, StringRef Initializer, bool AddNull=true)
This method constructs a CDS and initializes it with a text string.
static LLVM_ABI Constant * get(LLVMContext &Context, ArrayRef< uint8_t > Elts)
get() constructors - Return a constant with vector type with an element count and element type matchi...
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
LLVM_ABI bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isZeroValue() const
Return true if the value is negative zero or null value.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
static bool shouldExecute(CounterInfo &Counter)
unsigned getNumElements() const
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
static FixedVectorType * getHalfElementsVectorType(FixedVectorType *VTy)
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
LLVM_ABI void setComdat(Comdat *C)
@ PrivateLinkage
Like Internal, but omit from symbol table.
@ ExternalLinkage
Externally visible function.
Analysis pass providing a never-invalidated alias analysis result.
ConstantInt * getInt1(bool V)
Get a constant value representing either true or false.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
LLVM_ABI CallInst * CreateMaskedCompressStore(Value *Val, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr)
Create a call to Masked Compress Store intrinsic.
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memcpy between the specified pointers.
LLVM_ABI CallInst * CreateAndReduce(Value *Src)
Create a vector int AND reduction intrinsic of the source vector.
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
LLVM_ABI CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
ConstantInt * getInt8(uint8_t C)
Get a constant 8-bit value.
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Value * CreateUDiv(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
LLVM_ABI CallInst * CreateOrReduce(Value *Src)
Create a vector int OR reduction intrinsic of the source vector.
LLVM_ABI Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
LLVM_ABI DebugLoc getCurrentDebugLocation() const
Get location information used by debugging information.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memset to the specified pointer and the specified value.
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
LLVMContext & getContext() const
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
LLVM_ABI CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name="")
LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Value * CreateIsNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg == 0.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Type * getVoidTy()
Fetch the type representing void.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
LLVM_ABI CallInst * CreateMaskedExpandLoad(Type *Ty, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Expand Load intrinsic.
Value * CreateInBoundsPtrAdd(Value *Ptr, Value *Offset, const Twine &Name="")
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
LLVM_ABI CallInst * CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, Value *Mask=nullptr)
Create a call to Masked Scatter intrinsic.
LLVM_ABI CallInst * CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Gather intrinsic.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
std::vector< ConstraintInfo > ConstraintInfoVector
void visit(Iterator Start, Iterator End)
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
LLVM_ABI MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
A Module instance is used to store all the information related to an LLVM module.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & abandon()
Mark an analysis as abandoned.
bool remove(const value_type &X)
Remove an item from the set vector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
StringRef - Represent a constant reference to a string, i.e.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
unsigned getNumElements() const
Random access to the elements.
Type * getElementType(unsigned N) const
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Triple - Helper class for working with autoconf configuration names.
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
bool isRISCV32() const
Tests whether the target is 32-bit RISC-V.
bool isPPC32() const
Tests whether the target is 32-bit PowerPC (little and big endian).
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isRISCV64() const
Tests whether the target is 64-bit RISC-V.
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
bool isARM() const
Tests whether the target is ARM (little and big endian).
bool isPPC64() const
Tests whether the target is 64-bit PowerPC (little and big endian).
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
bool isSystemZ() const
Tests whether the target is SystemZ.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isArrayTy() const
True if this is an instance of ArrayType.
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
Type * getArrayElementType() const
bool isPPC_FP128Ty() const
Return true if this is powerpc long double.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
bool isVoidTy() const
Return true if this is 'void'.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Type * getElementType() const
int getNumOccurrences() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
initializer< Ty > init(const Ty &Val)
Function * Kernel
Summary of a kernel (=entry point for target offloading).
NodeAddr< FuncNode * > Func
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
FunctionAddr VTableAddr Value
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
LLVM_ABI std::pair< Instruction *, Value * > SplitBlockAndInsertSimpleForLoop(Value *End, BasicBlock::iterator SplitBefore)
Insert a for (int i = 0; i < End; i++) loop structure (with the exception that End is assumed > 0,...
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
auto dyn_cast_or_null(const Y &Val)
LLVM_ABI std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
@ Or
Bitwise or logical OR of integers.
@ And
Bitwise or logical AND of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
RoundingMode
Rounding mode.
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
iterator_range< df_iterator< T > > depth_first(const T &G)
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
LLVM_ABI bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
LLVM_ABI bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
std::string itostr(int64_t X)
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
A CRTP mix-in to automatically provide informational APIs needed for passes.