184#include "llvm/IR/IntrinsicsAArch64.h"
185#include "llvm/IR/IntrinsicsX86.h"
216#define DEBUG_TYPE "msan"
219 "Controls which checks to insert");
222 "Controls which instruction to instrument");
241 "msan-track-origins",
246 cl::desc(
"keep going after reporting a UMR"),
255 "msan-poison-stack-with-call",
260 "msan-poison-stack-pattern",
261 cl::desc(
"poison uninitialized stack variables with the given pattern"),
266 cl::desc(
"Print name of local stack variable"),
271 cl::desc(
"Poison fully undef temporary values. "
272 "Partially undefined constant vectors "
273 "are unaffected by this flag (see "
274 "-msan-poison-undef-vectors)."),
278 "msan-poison-undef-vectors",
279 cl::desc(
"Precisely poison partially undefined constant vectors. "
280 "If false (legacy behavior), the entire vector is "
281 "considered fully initialized, which may lead to false "
282 "negatives. Fully undefined constant vectors are "
283 "unaffected by this flag (see -msan-poison-undef)."),
287 "msan-precise-disjoint-or",
288 cl::desc(
"Precisely poison disjoint OR. If false (legacy behavior), "
289 "disjointedness is ignored (i.e., 1|1 is initialized)."),
294 cl::desc(
"propagate shadow through ICmpEQ and ICmpNE"),
299 cl::desc(
"exact handling of relational integer ICmp"),
303 "msan-handle-lifetime-intrinsics",
305 "when possible, poison scoped variables at the beginning of the scope "
306 "(slower, but more precise)"),
317 "msan-handle-asm-conservative",
328 "msan-check-access-address",
329 cl::desc(
"report accesses through a pointer which has poisoned shadow"),
334 cl::desc(
"check arguments and return values at function call boundaries"),
338 "msan-dump-strict-instructions",
339 cl::desc(
"print out instructions with default strict semantics i.e.,"
340 "check that all the inputs are fully initialized, and mark "
341 "the output as fully initialized. These semantics are applied "
342 "to instructions that could not be handled explicitly nor "
351 "msan-dump-heuristic-instructions",
352 cl::desc(
"Prints 'unknown' instructions that were handled heuristically. "
353 "Use -msan-dump-strict-instructions to print instructions that "
354 "could not be handled explicitly nor heuristically."),
358 "msan-instrumentation-with-call-threshold",
360 "If the function being instrumented requires more than "
361 "this number of checks and origin stores, use callbacks instead of "
362 "inline checks (-1 means never use callbacks)."),
367 cl::desc(
"Enable KernelMemorySanitizer instrumentation"),
377 cl::desc(
"Insert checks for constant shadow values"),
384 cl::desc(
"Place MSan constructors in comdat sections"),
390 cl::desc(
"Define custom MSan AndMask"),
394 cl::desc(
"Define custom MSan XorMask"),
398 cl::desc(
"Define custom MSan ShadowBase"),
402 cl::desc(
"Define custom MSan OriginBase"),
407 cl::desc(
"Define threshold for number of checks per "
408 "debug location to force origin update."),
420struct MemoryMapParams {
427struct PlatformMemoryMapParams {
428 const MemoryMapParams *bits32;
429 const MemoryMapParams *bits64;
593class MemorySanitizer {
602 MemorySanitizer(MemorySanitizer &&) =
delete;
603 MemorySanitizer &operator=(MemorySanitizer &&) =
delete;
604 MemorySanitizer(
const MemorySanitizer &) =
delete;
605 MemorySanitizer &operator=(
const MemorySanitizer &) =
delete;
607 bool sanitizeFunction(Function &
F, TargetLibraryInfo &TLI);
610 friend struct MemorySanitizerVisitor;
611 friend struct VarArgHelperBase;
612 friend struct VarArgAMD64Helper;
613 friend struct VarArgAArch64Helper;
614 friend struct VarArgPowerPC64Helper;
615 friend struct VarArgPowerPC32Helper;
616 friend struct VarArgSystemZHelper;
617 friend struct VarArgI386Helper;
618 friend struct VarArgGenericHelper;
620 void initializeModule(
Module &M);
621 void initializeCallbacks(
Module &M,
const TargetLibraryInfo &TLI);
622 void createKernelApi(
Module &M,
const TargetLibraryInfo &TLI);
623 void createUserspaceApi(
Module &M,
const TargetLibraryInfo &TLI);
625 template <
typename... ArgsTy>
626 FunctionCallee getOrInsertMsanMetadataFunction(
Module &M, StringRef Name,
652 Value *ParamOriginTLS;
658 Value *RetvalOriginTLS;
664 Value *VAArgOriginTLS;
667 Value *VAArgOverflowSizeTLS;
670 bool CallbacksInitialized =
false;
673 FunctionCallee WarningFn;
677 FunctionCallee MaybeWarningVarSizeFn;
682 FunctionCallee MsanSetAllocaOriginWithDescriptionFn;
684 FunctionCallee MsanSetAllocaOriginNoDescriptionFn;
687 FunctionCallee MsanPoisonStackFn;
691 FunctionCallee MsanChainOriginFn;
694 FunctionCallee MsanSetOriginFn;
697 FunctionCallee MemmoveFn, MemcpyFn, MemsetFn;
700 StructType *MsanContextStateTy;
701 FunctionCallee MsanGetContextStateFn;
704 FunctionCallee MsanPoisonAllocaFn, MsanUnpoisonAllocaFn;
710 FunctionCallee MsanMetadataPtrForLoadN, MsanMetadataPtrForStoreN;
711 FunctionCallee MsanMetadataPtrForLoad_1_8[4];
712 FunctionCallee MsanMetadataPtrForStore_1_8[4];
713 FunctionCallee MsanInstrumentAsmStoreFn;
716 Value *MsanMetadataAlloca;
719 FunctionCallee getKmsanShadowOriginAccessFn(
bool isStore,
int size);
722 const MemoryMapParams *MapParams;
726 MemoryMapParams CustomMapParams;
728 MDNode *ColdCallWeights;
731 MDNode *OriginStoreWeights;
734void insertModuleCtor(
Module &M) {
771 if (!Options.Kernel) {
780 MemorySanitizer Msan(*
F.getParent(), Options);
799 OS, MapClassName2PassName);
805 if (Options.EagerChecks)
806 OS <<
"eager-checks;";
807 OS <<
"track-origins=" << Options.TrackOrigins;
823template <
typename... ArgsTy>
825MemorySanitizer::getOrInsertMsanMetadataFunction(
Module &M,
StringRef Name,
830 std::forward<ArgsTy>(Args)...);
833 return M.getOrInsertFunction(Name, MsanMetadata,
834 std::forward<ArgsTy>(Args)...);
843 RetvalOriginTLS =
nullptr;
845 ParamOriginTLS =
nullptr;
847 VAArgOriginTLS =
nullptr;
848 VAArgOverflowSizeTLS =
nullptr;
850 WarningFn =
M.getOrInsertFunction(
"__msan_warning",
852 IRB.getVoidTy(), IRB.getInt32Ty());
863 MsanGetContextStateFn =
864 M.getOrInsertFunction(
"__msan_get_context_state", PtrTy);
868 for (
int ind = 0,
size = 1; ind < 4; ind++,
size <<= 1) {
869 std::string name_load =
870 "__msan_metadata_ptr_for_load_" + std::to_string(
size);
871 std::string name_store =
872 "__msan_metadata_ptr_for_store_" + std::to_string(
size);
873 MsanMetadataPtrForLoad_1_8[ind] =
874 getOrInsertMsanMetadataFunction(M, name_load, PtrTy);
875 MsanMetadataPtrForStore_1_8[ind] =
876 getOrInsertMsanMetadataFunction(M, name_store, PtrTy);
879 MsanMetadataPtrForLoadN = getOrInsertMsanMetadataFunction(
880 M,
"__msan_metadata_ptr_for_load_n", PtrTy, IntptrTy);
881 MsanMetadataPtrForStoreN = getOrInsertMsanMetadataFunction(
882 M,
"__msan_metadata_ptr_for_store_n", PtrTy, IntptrTy);
885 MsanPoisonAllocaFn =
M.getOrInsertFunction(
886 "__msan_poison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
887 MsanUnpoisonAllocaFn =
M.getOrInsertFunction(
888 "__msan_unpoison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy);
892 return M.getOrInsertGlobal(Name, Ty, [&] {
894 nullptr, Name,
nullptr,
900void MemorySanitizer::createUserspaceApi(
Module &M,
908 StringRef WarningFnName = Recover ?
"__msan_warning_with_origin"
909 :
"__msan_warning_with_origin_noreturn";
910 WarningFn =
M.getOrInsertFunction(WarningFnName,
912 IRB.getVoidTy(), IRB.getInt32Ty());
915 Recover ?
"__msan_warning" :
"__msan_warning_noreturn";
916 WarningFn =
M.getOrInsertFunction(WarningFnName, IRB.getVoidTy());
943 IRB.getIntPtrTy(
M.getDataLayout()));
947 unsigned AccessSize = 1 << AccessSizeIndex;
948 std::string FunctionName =
"__msan_maybe_warning_" +
itostr(AccessSize);
949 MaybeWarningFn[AccessSizeIndex] =
M.getOrInsertFunction(
951 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt32Ty());
952 MaybeWarningVarSizeFn =
M.getOrInsertFunction(
953 "__msan_maybe_warning_N", TLI.
getAttrList(
C, {},
false),
954 IRB.getVoidTy(), PtrTy, IRB.getInt64Ty(), IRB.getInt32Ty());
955 FunctionName =
"__msan_maybe_store_origin_" +
itostr(AccessSize);
956 MaybeStoreOriginFn[AccessSizeIndex] =
M.getOrInsertFunction(
958 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), PtrTy,
962 MsanSetAllocaOriginWithDescriptionFn =
963 M.getOrInsertFunction(
"__msan_set_alloca_origin_with_descr",
964 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy, PtrTy);
965 MsanSetAllocaOriginNoDescriptionFn =
966 M.getOrInsertFunction(
"__msan_set_alloca_origin_no_descr",
967 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
968 MsanPoisonStackFn =
M.getOrInsertFunction(
"__msan_poison_stack",
969 IRB.getVoidTy(), PtrTy, IntptrTy);
973void MemorySanitizer::initializeCallbacks(
Module &M,
976 if (CallbacksInitialized)
982 MsanChainOriginFn =
M.getOrInsertFunction(
983 "__msan_chain_origin",
986 MsanSetOriginFn =
M.getOrInsertFunction(
988 IRB.getVoidTy(), PtrTy, IntptrTy, IRB.getInt32Ty());
990 M.getOrInsertFunction(
"__msan_memmove", PtrTy, PtrTy, PtrTy, IntptrTy);
992 M.getOrInsertFunction(
"__msan_memcpy", PtrTy, PtrTy, PtrTy, IntptrTy);
993 MemsetFn =
M.getOrInsertFunction(
"__msan_memset",
995 PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
997 MsanInstrumentAsmStoreFn =
M.getOrInsertFunction(
998 "__msan_instrument_asm_store", IRB.getVoidTy(), PtrTy, IntptrTy);
1000 if (CompileKernel) {
1001 createKernelApi(M, TLI);
1003 createUserspaceApi(M, TLI);
1005 CallbacksInitialized =
true;
1011 isStore ? MsanMetadataPtrForStore_1_8 : MsanMetadataPtrForLoad_1_8;
1029void MemorySanitizer::initializeModule(
Module &M) {
1030 auto &
DL =
M.getDataLayout();
1032 TargetTriple =
M.getTargetTriple();
1034 bool ShadowPassed =
ClShadowBase.getNumOccurrences() > 0;
1035 bool OriginPassed =
ClOriginBase.getNumOccurrences() > 0;
1037 if (ShadowPassed || OriginPassed) {
1042 MapParams = &CustomMapParams;
1044 switch (TargetTriple.getOS()) {
1046 switch (TargetTriple.getArch()) {
1061 switch (TargetTriple.getArch()) {
1070 switch (TargetTriple.getArch()) {
1104 C = &(
M.getContext());
1106 IntptrTy = IRB.getIntPtrTy(
DL);
1107 OriginTy = IRB.getInt32Ty();
1108 PtrTy = IRB.getPtrTy();
1113 if (!CompileKernel) {
1115 M.getOrInsertGlobal(
"__msan_track_origins", IRB.getInt32Ty(), [&] {
1116 return new GlobalVariable(
1117 M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
1118 IRB.getInt32(TrackOrigins),
"__msan_track_origins");
1122 M.getOrInsertGlobal(
"__msan_keep_going", IRB.getInt32Ty(), [&] {
1123 return new GlobalVariable(M, IRB.getInt32Ty(), true,
1124 GlobalValue::WeakODRLinkage,
1125 IRB.getInt32(Recover),
"__msan_keep_going");
1140struct VarArgHelper {
1141 virtual ~VarArgHelper() =
default;
1144 virtual void visitCallBase(CallBase &CB,
IRBuilder<> &IRB) = 0;
1147 virtual void visitVAStartInst(VAStartInst &
I) = 0;
1150 virtual void visitVACopyInst(VACopyInst &
I) = 0;
1156 virtual void finalizeInstrumentation() = 0;
1159struct MemorySanitizerVisitor;
1164 MemorySanitizerVisitor &Visitor);
1171 if (TypeSizeFixed <= 8)
1180class NextNodeIRBuilder :
public IRBuilder<> {
1193struct MemorySanitizerVisitor :
public InstVisitor<MemorySanitizerVisitor> {
1195 MemorySanitizer &MS;
1197 ValueMap<Value *, Value *> ShadowMap, OriginMap;
1198 std::unique_ptr<VarArgHelper> VAHelper;
1199 const TargetLibraryInfo *TLI;
1206 bool PropagateShadow;
1209 bool PoisonUndefVectors;
1211 struct ShadowOriginAndInsertPoint {
1216 ShadowOriginAndInsertPoint(
Value *S,
Value *O, Instruction *
I)
1217 : Shadow(S), Origin(
O), OrigIns(
I) {}
1220 DenseMap<const DILocation *, int> LazyWarningDebugLocationCount;
1221 SmallSetVector<AllocaInst *, 16> AllocaSet;
1224 int64_t SplittableBlocksCount = 0;
1226 MemorySanitizerVisitor(Function &
F, MemorySanitizer &MS,
1227 const TargetLibraryInfo &TLI)
1229 bool SanitizeFunction =
1231 InsertChecks = SanitizeFunction;
1232 PropagateShadow = SanitizeFunction;
1243 MS.initializeCallbacks(*
F.getParent(), TLI);
1245 IRBuilder<>(&
F.getEntryBlock(),
F.getEntryBlock().getFirstNonPHIIt())
1246 .CreateIntrinsic(Intrinsic::donothing, {});
1248 if (MS.CompileKernel) {
1250 insertKmsanPrologue(IRB);
1254 <<
"MemorySanitizer is not inserting checks into '"
1255 <<
F.getName() <<
"'\n");
1258 bool instrumentWithCalls(
Value *V) {
1262 ++SplittableBlocksCount;
1267 bool isInPrologue(Instruction &
I) {
1268 return I.getParent() == FnPrologueEnd->
getParent() &&
1277 if (MS.TrackOrigins <= 1)
1279 return IRB.
CreateCall(MS.MsanChainOriginFn, V);
1283 const DataLayout &
DL =
F.getDataLayout();
1284 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1294 TypeSize TS, Align Alignment) {
1295 const DataLayout &
DL =
F.getDataLayout();
1296 const Align IntptrAlignment =
DL.getABITypeAlign(MS.IntptrTy);
1297 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1309 auto [InsertPt,
Index] =
1321 Align CurrentAlignment = Alignment;
1322 if (Alignment >= IntptrAlignment && IntptrSize >
kOriginSize) {
1323 Value *IntptrOrigin = originToIntptr(IRB, Origin);
1325 for (
unsigned i = 0; i <
Size / IntptrSize; ++i) {
1330 CurrentAlignment = IntptrAlignment;
1343 Value *OriginPtr, Align Alignment) {
1344 const DataLayout &
DL =
F.getDataLayout();
1346 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
1348 Value *ConvertedShadow = convertShadowToScalar(Shadow, IRB);
1357 paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
1364 TypeSize TypeSizeInBits =
DL.getTypeSizeInBits(ConvertedShadow->
getType());
1366 if (instrumentWithCalls(ConvertedShadow) &&
1368 FunctionCallee Fn = MS.MaybeStoreOriginFn[SizeIndex];
1369 Value *ConvertedShadow2 =
1371 CallBase *CB = IRB.
CreateCall(Fn, {ConvertedShadow2, Addr, Origin});
1375 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1379 paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), OriginPtr, StoreSize,
1384 void materializeStores() {
1385 for (StoreInst *SI : StoreList) {
1387 Value *Val =
SI->getValueOperand();
1388 Value *Addr =
SI->getPointerOperand();
1389 Value *Shadow =
SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
1390 Value *ShadowPtr, *OriginPtr;
1392 const Align Alignment =
SI->getAlign();
1394 std::tie(ShadowPtr, OriginPtr) =
1395 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
true);
1397 [[maybe_unused]] StoreInst *NewSI =
1404 if (MS.TrackOrigins && !
SI->isAtomic())
1405 storeOrigin(IRB, Addr, Shadow, getOrigin(Val), OriginPtr,
1412 if (MS.TrackOrigins < 2)
1415 if (LazyWarningDebugLocationCount.
empty())
1416 for (
const auto &
I : InstrumentationList)
1417 ++LazyWarningDebugLocationCount[
I.OrigIns->getDebugLoc()];
1433 auto NewDebugLoc = OI->getDebugLoc();
1440 IRBOrigin.SetCurrentDebugLocation(NewDebugLoc);
1441 Origin = updateOrigin(Origin, IRBOrigin);
1446 if (MS.CompileKernel || MS.TrackOrigins)
1457 const DataLayout &
DL =
F.getDataLayout();
1458 TypeSize TypeSizeInBits =
DL.getTypeSizeInBits(ConvertedShadow->
getType());
1460 if (instrumentWithCalls(ConvertedShadow) && !MS.CompileKernel) {
1462 ConvertedShadow = convertShadowToScalar(ConvertedShadow, IRB);
1463 Value *ConvertedShadow2 =
1467 FunctionCallee Fn = MS.MaybeWarningFn[SizeIndex];
1471 MS.TrackOrigins && Origin ? Origin : (
Value *)IRB.
getInt32(0)});
1475 FunctionCallee Fn = MS.MaybeWarningVarSizeFn;
1478 unsigned ShadowSize =
DL.getTypeAllocSize(ConvertedShadow2->
getType());
1481 {ShadowAlloca, ConstantInt::get(IRB.
getInt64Ty(), ShadowSize),
1482 MS.TrackOrigins && Origin ? Origin : (
Value *)IRB.
getInt32(0)});
1487 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1490 !MS.Recover, MS.ColdCallWeights);
1493 insertWarningFn(IRB, Origin);
1498 void materializeInstructionChecks(
1500 const DataLayout &
DL =
F.getDataLayout();
1503 bool Combine = !MS.TrackOrigins;
1505 Value *Shadow =
nullptr;
1506 for (
const auto &ShadowData : InstructionChecks) {
1507 assert(ShadowData.OrigIns == Instruction);
1510 Value *ConvertedShadow = ShadowData.Shadow;
1519 insertWarningFn(IRB, ShadowData.Origin);
1529 materializeOneCheck(IRB, ConvertedShadow, ShadowData.Origin);
1534 Shadow = ConvertedShadow;
1538 Shadow = convertToBool(Shadow, IRB,
"_mscmp");
1539 ConvertedShadow = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1540 Shadow = IRB.
CreateOr(Shadow, ConvertedShadow,
"_msor");
1546 materializeOneCheck(IRB, Shadow,
nullptr);
1550 static bool isAArch64SVCount(
Type *Ty) {
1552 return TTy->
getName() ==
"aarch64.svcount";
1558 static bool isScalableNonVectorType(
Type *Ty) {
1559 if (!isAArch64SVCount(Ty))
1560 LLVM_DEBUG(
dbgs() <<
"isScalableNonVectorType: Unexpected type " << *Ty
1566 void materializeChecks() {
1569 SmallPtrSet<Instruction *, 16>
Done;
1572 for (
auto I = InstrumentationList.begin();
1573 I != InstrumentationList.end();) {
1574 auto OrigIns =
I->OrigIns;
1578 auto J = std::find_if(
I + 1, InstrumentationList.end(),
1579 [OrigIns](
const ShadowOriginAndInsertPoint &R) {
1580 return OrigIns != R.OrigIns;
1594 MS.ParamTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1595 {Zero, IRB.getInt32(0)},
"param_shadow");
1596 MS.RetvalTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1597 {Zero, IRB.getInt32(1)},
"retval_shadow");
1598 MS.VAArgTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1599 {Zero, IRB.getInt32(2)},
"va_arg_shadow");
1600 MS.VAArgOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1601 {Zero, IRB.getInt32(3)},
"va_arg_origin");
1602 MS.VAArgOverflowSizeTLS =
1603 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1604 {Zero, IRB.getInt32(4)},
"va_arg_overflow_size");
1605 MS.ParamOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1606 {Zero, IRB.getInt32(5)},
"param_origin");
1607 MS.RetvalOriginTLS =
1608 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1609 {Zero, IRB.getInt32(6)},
"retval_origin");
1611 MS.MsanMetadataAlloca = IRB.
CreateAlloca(MS.MsanMetadata, 0u);
1624 for (Instruction *
I : Instructions)
1628 for (PHINode *PN : ShadowPHINodes) {
1630 PHINode *PNO = MS.TrackOrigins ?
cast<PHINode>(getOrigin(PN)) : nullptr;
1631 size_t NumValues = PN->getNumIncomingValues();
1632 for (
size_t v = 0;
v < NumValues;
v++) {
1633 PNS->
addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
1635 PNO->
addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
1639 VAHelper->finalizeInstrumentation();
1644 for (
auto Item : LifetimeStartList) {
1645 instrumentAlloca(*Item.second, Item.first);
1646 AllocaSet.
remove(Item.second);
1651 for (AllocaInst *AI : AllocaSet)
1652 instrumentAlloca(*AI);
1655 materializeChecks();
1659 materializeStores();
1665 Type *getShadowTy(
Value *V) {
return getShadowTy(
V->getType()); }
1676 const DataLayout &
DL =
F.getDataLayout();
1678 uint32_t EltSize =
DL.getTypeSizeInBits(VT->getElementType());
1680 VT->getElementCount());
1683 return ArrayType::get(getShadowTy(AT->getElementType()),
1684 AT->getNumElements());
1688 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
1689 Elements.push_back(getShadowTy(
ST->getElementType(i)));
1691 LLVM_DEBUG(
dbgs() <<
"getShadowTy: " << *ST <<
" ===> " << *Res <<
"\n");
1694 if (isScalableNonVectorType(OrigTy)) {
1695 LLVM_DEBUG(
dbgs() <<
"getShadowTy: Scalable non-vector type: " << *OrigTy
1700 uint32_t TypeSize =
DL.getTypeSizeInBits(OrigTy);
1705 Value *collapseStructShadow(StructType *Struct,
Value *Shadow,
1710 for (
unsigned Idx = 0; Idx <
Struct->getNumElements(); Idx++) {
1713 Value *ShadowBool = convertToBool(ShadowItem, IRB);
1715 if (Aggregator != FalseVal)
1716 Aggregator = IRB.
CreateOr(Aggregator, ShadowBool);
1718 Aggregator = ShadowBool;
1725 Value *collapseArrayShadow(ArrayType *Array,
Value *Shadow,
1727 if (!
Array->getNumElements())
1731 Value *Aggregator = convertShadowToScalar(FirstItem, IRB);
1733 for (
unsigned Idx = 1; Idx <
Array->getNumElements(); Idx++) {
1735 Value *ShadowInner = convertShadowToScalar(ShadowItem, IRB);
1736 Aggregator = IRB.
CreateOr(Aggregator, ShadowInner);
1746 return collapseStructShadow(Struct, V, IRB);
1748 return collapseArrayShadow(Array, V, IRB);
1753 V->getType()->getPrimitiveSizeInBits().getFixedValue();
1761 Type *VTy =
V->getType();
1763 return convertToBool(convertShadowToScalar(V, IRB), IRB,
name);
1770 Type *ptrToIntPtrType(
Type *PtrTy)
const {
1772 return VectorType::get(ptrToIntPtrType(VectTy->getElementType()),
1773 VectTy->getElementCount());
1779 Type *getPtrToShadowPtrType(
Type *IntPtrTy,
Type *ShadowTy)
const {
1781 return VectorType::get(
1782 getPtrToShadowPtrType(VectTy->getElementType(), ShadowTy),
1783 VectTy->getElementCount());
1785 assert(IntPtrTy == MS.IntptrTy);
1792 VectTy->getElementCount(),
1793 constToIntPtr(VectTy->getElementType(),
C));
1795 assert(IntPtrTy == MS.IntptrTy);
1798 return ConstantInt::get(MS.IntptrTy,
C,
false,
1812 Type *IntptrTy = ptrToIntPtrType(Addr->
getType());
1815 if (uint64_t AndMask = MS.MapParams->AndMask)
1816 OffsetLong = IRB.
CreateAnd(OffsetLong, constToIntPtr(IntptrTy, ~AndMask));
1818 if (uint64_t XorMask = MS.MapParams->XorMask)
1819 OffsetLong = IRB.
CreateXor(OffsetLong, constToIntPtr(IntptrTy, XorMask));
1831 std::pair<Value *, Value *>
1833 MaybeAlign Alignment) {
1838 assert(VectTy->getElementType()->isPointerTy());
1840 Type *IntptrTy = ptrToIntPtrType(Addr->
getType());
1841 Value *ShadowOffset = getShadowPtrOffset(Addr, IRB);
1842 Value *ShadowLong = ShadowOffset;
1843 if (uint64_t ShadowBase = MS.MapParams->ShadowBase) {
1845 IRB.
CreateAdd(ShadowLong, constToIntPtr(IntptrTy, ShadowBase));
1848 ShadowLong, getPtrToShadowPtrType(IntptrTy, ShadowTy));
1850 Value *OriginPtr =
nullptr;
1851 if (MS.TrackOrigins) {
1852 Value *OriginLong = ShadowOffset;
1853 uint64_t OriginBase = MS.MapParams->OriginBase;
1854 if (OriginBase != 0)
1856 IRB.
CreateAdd(OriginLong, constToIntPtr(IntptrTy, OriginBase));
1859 OriginLong = IRB.
CreateAnd(OriginLong, constToIntPtr(IntptrTy, ~Mask));
1862 OriginLong, getPtrToShadowPtrType(IntptrTy, MS.OriginTy));
1864 return std::make_pair(ShadowPtr, OriginPtr);
1867 template <
typename... ArgsTy>
1872 {MS.MsanMetadataAlloca, std::forward<ArgsTy>(Args)...});
1873 return IRB.
CreateLoad(MS.MsanMetadata, MS.MsanMetadataAlloca);
1876 return IRB.
CreateCall(Callee, {std::forward<ArgsTy>(Args)...});
1879 std::pair<Value *, Value *> getShadowOriginPtrKernelNoVec(
Value *Addr,
1883 Value *ShadowOriginPtrs;
1884 const DataLayout &
DL =
F.getDataLayout();
1885 TypeSize
Size =
DL.getTypeStoreSize(ShadowTy);
1887 FunctionCallee Getter = MS.getKmsanShadowOriginAccessFn(
isStore,
Size);
1890 ShadowOriginPtrs = createMetadataCall(IRB, Getter, AddrCast);
1892 Value *SizeVal = ConstantInt::get(MS.IntptrTy,
Size);
1893 ShadowOriginPtrs = createMetadataCall(
1895 isStore ? MS.MsanMetadataPtrForStoreN : MS.MsanMetadataPtrForLoadN,
1902 return std::make_pair(ShadowPtr, OriginPtr);
1908 std::pair<Value *, Value *> getShadowOriginPtrKernel(
Value *Addr,
1915 return getShadowOriginPtrKernelNoVec(Addr, IRB, ShadowTy,
isStore);
1920 Value *ShadowPtrs = ConstantInt::getNullValue(
1922 Value *OriginPtrs =
nullptr;
1923 if (MS.TrackOrigins)
1924 OriginPtrs = ConstantInt::getNullValue(
1926 for (
unsigned i = 0; i < NumElements; ++i) {
1929 auto [ShadowPtr, OriginPtr] =
1930 getShadowOriginPtrKernelNoVec(OneAddr, IRB, ShadowTy,
isStore);
1933 ShadowPtrs, ShadowPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1934 if (MS.TrackOrigins)
1936 OriginPtrs, OriginPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1938 return {ShadowPtrs, OriginPtrs};
1941 std::pair<Value *, Value *> getShadowOriginPtr(
Value *Addr,
IRBuilder<> &IRB,
1943 MaybeAlign Alignment,
1945 if (MS.CompileKernel)
1946 return getShadowOriginPtrKernel(Addr, IRB, ShadowTy,
isStore);
1947 return getShadowOriginPtrUserspace(Addr, IRB, ShadowTy, Alignment);
1955 ConstantInt::get(MS.IntptrTy, ArgOffset),
"_msarg");
1960 if (!MS.TrackOrigins)
1963 ConstantInt::get(MS.IntptrTy, ArgOffset),
1973 Value *getOriginPtrForRetval() {
1975 return MS.RetvalOriginTLS;
1980 assert(!ShadowMap.
count(V) &&
"Values may only have one shadow");
1981 ShadowMap[
V] = PropagateShadow ? SV : getCleanShadow(V);
1986 if (!MS.TrackOrigins)
1988 assert(!OriginMap.
count(V) &&
"Values may only have one origin");
1989 LLVM_DEBUG(
dbgs() <<
"ORIGIN: " << *V <<
" ==> " << *Origin <<
"\n");
1990 OriginMap[
V] = Origin;
1994 Type *ShadowTy = getShadowTy(OrigTy);
2004 Constant *getCleanShadow(
Value *V) {
return getCleanShadow(
V->getType()); }
2012 SmallVector<Constant *, 4> Vals(AT->getNumElements(),
2013 getPoisonedShadow(AT->getElementType()));
2017 SmallVector<Constant *, 4> Vals;
2018 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
2019 Vals.
push_back(getPoisonedShadow(
ST->getElementType(i)));
2027 Type *ShadowTy = getShadowTy(V);
2030 return getPoisonedShadow(ShadowTy);
2042 if (!PropagateShadow ||
I->getMetadata(LLVMContext::MD_nosanitize))
2043 return getCleanShadow(V);
2045 Value *Shadow = ShadowMap[
V];
2047 LLVM_DEBUG(
dbgs() <<
"No shadow: " << *V <<
"\n" << *(
I->getParent()));
2048 assert(Shadow &&
"No shadow for a value");
2055 Value *
AllOnes = (PropagateShadow && PoisonUndef) ? getPoisonedShadow(V)
2056 : getCleanShadow(V);
2062 Value *&ShadowPtr = ShadowMap[
V];
2067 unsigned ArgOffset = 0;
2068 const DataLayout &
DL =
F->getDataLayout();
2069 for (
auto &FArg :
F->args()) {
2070 if (!FArg.getType()->isSized() || FArg.getType()->isScalableTy()) {
2072 ?
"vscale not fully supported\n"
2073 :
"Arg is not sized\n"));
2075 ShadowPtr = getCleanShadow(V);
2076 setOrigin(
A, getCleanOrigin());
2082 unsigned Size = FArg.hasByValAttr()
2083 ?
DL.getTypeAllocSize(FArg.getParamByValType())
2084 :
DL.getTypeAllocSize(FArg.getType());
2088 if (FArg.hasByValAttr()) {
2092 const Align ArgAlign =
DL.getValueOrABITypeAlignment(
2093 FArg.getParamAlign(), FArg.getParamByValType());
2094 Value *CpShadowPtr, *CpOriginPtr;
2095 std::tie(CpShadowPtr, CpOriginPtr) =
2096 getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign,
2098 if (!PropagateShadow || Overflow) {
2100 EntryIRB.CreateMemSet(
2104 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2106 [[maybe_unused]]
Value *Cpy = EntryIRB.CreateMemCpy(
2107 CpShadowPtr, CopyAlign,
Base, CopyAlign,
Size);
2110 if (MS.TrackOrigins) {
2111 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2115 EntryIRB.CreateMemCpy(
2124 if (!PropagateShadow || Overflow || FArg.hasByValAttr() ||
2125 (MS.EagerChecks && FArg.hasAttribute(Attribute::NoUndef))) {
2126 ShadowPtr = getCleanShadow(V);
2127 setOrigin(
A, getCleanOrigin());
2130 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2131 ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg),
Base,
2133 if (MS.TrackOrigins) {
2134 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2135 setOrigin(
A, EntryIRB.CreateLoad(MS.OriginTy, OriginPtr));
2139 <<
" ARG: " << FArg <<
" ==> " << *ShadowPtr <<
"\n");
2145 assert(ShadowPtr &&
"Could not find shadow for an argument");
2152 cast<Constant>(V)->containsUndefOrPoisonElement() && PropagateShadow &&
2153 PoisonUndefVectors) {
2156 for (
unsigned i = 0; i != NumElems; ++i) {
2159 : getCleanShadow(Elem);
2163 LLVM_DEBUG(
dbgs() <<
"Partial undef constant vector: " << *V <<
" ==> "
2164 << *ShadowConstant <<
"\n");
2166 return ShadowConstant;
2172 return getCleanShadow(V);
2176 Value *getShadow(Instruction *
I,
int i) {
2177 return getShadow(
I->getOperand(i));
2182 if (!MS.TrackOrigins)
2185 return getCleanOrigin();
2187 "Unexpected value type in getOrigin()");
2189 if (
I->getMetadata(LLVMContext::MD_nosanitize))
2190 return getCleanOrigin();
2192 Value *Origin = OriginMap[
V];
2193 assert(Origin &&
"Missing origin");
2198 Value *getOrigin(Instruction *
I,
int i) {
2199 return getOrigin(
I->getOperand(i));
2206 void insertCheckShadow(
Value *Shadow,
Value *Origin, Instruction *OrigIns) {
2212 LLVM_DEBUG(
dbgs() <<
"Skipping check of " << *Shadow <<
" before "
2213 << *OrigIns <<
"\n");
2218 if (isScalableNonVectorType(ShadowTy)) {
2219 LLVM_DEBUG(
dbgs() <<
"Skipping check of scalable non-vector " << *Shadow
2220 <<
" before " << *OrigIns <<
"\n");
2226 "Can only insert checks for integer, vector, and aggregate shadow "
2229 InstrumentationList.push_back(
2230 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
2238 void insertCheckShadowOf(
Value *Val, Instruction *OrigIns) {
2240 Value *Shadow, *Origin;
2242 Shadow = getShadow(Val);
2245 Origin = getOrigin(Val);
2252 insertCheckShadow(Shadow, Origin, OrigIns);
2257 case AtomicOrdering::NotAtomic:
2258 return AtomicOrdering::NotAtomic;
2259 case AtomicOrdering::Unordered:
2260 case AtomicOrdering::Monotonic:
2261 case AtomicOrdering::Release:
2262 return AtomicOrdering::Release;
2263 case AtomicOrdering::Acquire:
2264 case AtomicOrdering::AcquireRelease:
2265 return AtomicOrdering::AcquireRelease;
2266 case AtomicOrdering::SequentiallyConsistent:
2267 return AtomicOrdering::SequentiallyConsistent;
2273 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2274 uint32_t OrderingTable[NumOrderings] = {};
2276 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2277 OrderingTable[(
int)AtomicOrderingCABI::release] =
2278 (int)AtomicOrderingCABI::release;
2279 OrderingTable[(int)AtomicOrderingCABI::consume] =
2280 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2281 OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
2282 (
int)AtomicOrderingCABI::acq_rel;
2283 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2284 (
int)AtomicOrderingCABI::seq_cst;
2291 case AtomicOrdering::NotAtomic:
2292 return AtomicOrdering::NotAtomic;
2293 case AtomicOrdering::Unordered:
2294 case AtomicOrdering::Monotonic:
2295 case AtomicOrdering::Acquire:
2296 return AtomicOrdering::Acquire;
2297 case AtomicOrdering::Release:
2298 case AtomicOrdering::AcquireRelease:
2299 return AtomicOrdering::AcquireRelease;
2300 case AtomicOrdering::SequentiallyConsistent:
2301 return AtomicOrdering::SequentiallyConsistent;
2307 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2308 uint32_t OrderingTable[NumOrderings] = {};
2310 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2311 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2312 OrderingTable[(int)AtomicOrderingCABI::consume] =
2313 (
int)AtomicOrderingCABI::acquire;
2314 OrderingTable[(int)AtomicOrderingCABI::release] =
2315 OrderingTable[(
int)AtomicOrderingCABI::acq_rel] =
2316 (int)AtomicOrderingCABI::acq_rel;
2317 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2318 (
int)AtomicOrderingCABI::seq_cst;
2324 using InstVisitor<MemorySanitizerVisitor>
::visit;
2325 void visit(Instruction &
I) {
2326 if (
I.getMetadata(LLVMContext::MD_nosanitize))
2329 if (isInPrologue(
I))
2334 setShadow(&
I, getCleanShadow(&
I));
2335 setOrigin(&
I, getCleanOrigin());
2346 void visitLoadInst(LoadInst &
I) {
2347 assert(
I.getType()->isSized() &&
"Load type must have size");
2348 assert(!
I.getMetadata(LLVMContext::MD_nosanitize));
2349 NextNodeIRBuilder IRB(&
I);
2350 Type *ShadowTy = getShadowTy(&
I);
2351 Value *Addr =
I.getPointerOperand();
2352 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
2353 const Align Alignment =
I.getAlign();
2354 if (PropagateShadow) {
2355 std::tie(ShadowPtr, OriginPtr) =
2356 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
false);
2360 setShadow(&
I, getCleanShadow(&
I));
2364 insertCheckShadowOf(
I.getPointerOperand(), &
I);
2369 if (MS.TrackOrigins) {
2370 if (PropagateShadow) {
2375 setOrigin(&
I, getCleanOrigin());
2384 void visitStoreInst(StoreInst &
I) {
2385 StoreList.push_back(&
I);
2387 insertCheckShadowOf(
I.getPointerOperand(), &
I);
2390 void handleCASOrRMW(Instruction &
I) {
2394 Value *Addr =
I.getOperand(0);
2395 Value *Val =
I.getOperand(1);
2396 Value *ShadowPtr = getShadowOriginPtr(Addr, IRB, getShadowTy(Val),
Align(1),
2401 insertCheckShadowOf(Addr, &
I);
2407 insertCheckShadowOf(Val, &
I);
2411 setShadow(&
I, getCleanShadow(&
I));
2412 setOrigin(&
I, getCleanOrigin());
2415 void visitAtomicRMWInst(AtomicRMWInst &
I) {
2420 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &
I) {
2426 void visitExtractElementInst(ExtractElementInst &
I) {
2427 insertCheckShadowOf(
I.getOperand(1), &
I);
2431 setOrigin(&
I, getOrigin(&
I, 0));
2434 void visitInsertElementInst(InsertElementInst &
I) {
2435 insertCheckShadowOf(
I.getOperand(2), &
I);
2437 auto *Shadow0 = getShadow(&
I, 0);
2438 auto *Shadow1 = getShadow(&
I, 1);
2441 setOriginForNaryOp(
I);
2444 void visitShuffleVectorInst(ShuffleVectorInst &
I) {
2446 auto *Shadow0 = getShadow(&
I, 0);
2447 auto *Shadow1 = getShadow(&
I, 1);
2450 setOriginForNaryOp(
I);
2454 void visitSExtInst(SExtInst &
I) {
2456 setShadow(&
I, IRB.
CreateSExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2457 setOrigin(&
I, getOrigin(&
I, 0));
2460 void visitZExtInst(ZExtInst &
I) {
2462 setShadow(&
I, IRB.
CreateZExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2463 setOrigin(&
I, getOrigin(&
I, 0));
2466 void visitTruncInst(TruncInst &
I) {
2468 setShadow(&
I, IRB.
CreateTrunc(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2469 setOrigin(&
I, getOrigin(&
I, 0));
2472 void visitBitCastInst(BitCastInst &
I) {
2477 if (CI->isMustTailCall())
2481 setOrigin(&
I, getOrigin(&
I, 0));
2484 void visitPtrToIntInst(PtrToIntInst &
I) {
2487 "_msprop_ptrtoint"));
2488 setOrigin(&
I, getOrigin(&
I, 0));
2491 void visitIntToPtrInst(IntToPtrInst &
I) {
2494 "_msprop_inttoptr"));
2495 setOrigin(&
I, getOrigin(&
I, 0));
2498 void visitFPToSIInst(CastInst &
I) { handleShadowOr(
I); }
2499 void visitFPToUIInst(CastInst &
I) { handleShadowOr(
I); }
2500 void visitSIToFPInst(CastInst &
I) { handleShadowOr(
I); }
2501 void visitUIToFPInst(CastInst &
I) { handleShadowOr(
I); }
2502 void visitFPExtInst(CastInst &
I) { handleShadowOr(
I); }
2503 void visitFPTruncInst(CastInst &
I) { handleShadowOr(
I); }
2527 return IRB.
CreateOr({S1S2, V1S2, S1V2});
2531 void visitAnd(BinaryOperator &
I) {
2533 Value *V1 =
I.getOperand(0);
2534 Value *V2 =
I.getOperand(1);
2536 Value *S2 = getShadow(&
I, 1);
2538 Value *OutShadow = handleBitwiseAnd(IRB, V1, V2,
S1, S2);
2540 setShadow(&
I, OutShadow);
2541 setOriginForNaryOp(
I);
2544 void visitOr(BinaryOperator &
I) {
2557 Value *S2 = getShadow(&
I, 1);
2558 Value *V1 =
I.getOperand(0);
2559 Value *V2 =
I.getOperand(1);
2578 S = IRB.
CreateOr(S, DisjointOrShadow,
"_ms_disjoint");
2582 setOriginForNaryOp(
I);
2600 template <
bool CombineShadow>
class Combiner {
2601 Value *Shadow =
nullptr;
2602 Value *Origin =
nullptr;
2604 MemorySanitizerVisitor *MSV;
2607 Combiner(MemorySanitizerVisitor *MSV,
IRBuilder<> &IRB)
2608 : IRB(IRB), MSV(MSV) {}
2612 if (CombineShadow) {
2617 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
2618 Shadow = IRB.
CreateOr(Shadow, OpShadow,
"_msprop");
2622 if (MSV->MS.TrackOrigins) {
2629 if (!ConstOrigin || !ConstOrigin->
isNullValue()) {
2630 Value *
Cond = MSV->convertToBool(OpShadow, IRB);
2640 Value *OpShadow = MSV->getShadow(V);
2641 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) :
nullptr;
2642 return Add(OpShadow, OpOrigin);
2647 void Done(Instruction *
I) {
2648 if (CombineShadow) {
2650 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(
I));
2651 MSV->setShadow(
I, Shadow);
2653 if (MSV->MS.TrackOrigins) {
2655 MSV->setOrigin(
I, Origin);
2661 void DoneAndStoreOrigin(TypeSize TS,
Value *OriginPtr) {
2662 if (MSV->MS.TrackOrigins) {
2669 using ShadowAndOriginCombiner = Combiner<true>;
2670 using OriginCombiner = Combiner<false>;
2673 void setOriginForNaryOp(Instruction &
I) {
2674 if (!MS.TrackOrigins)
2677 OriginCombiner OC(
this, IRB);
2678 for (Use &
Op :
I.operands())
2683 size_t VectorOrPrimitiveTypeSizeInBits(
Type *Ty) {
2685 "Vector of pointers is not a valid shadow type");
2695 Type *srcTy =
V->getType();
2698 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
2699 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
2700 if (srcSizeInBits > 1 && dstSizeInBits == 1)
2718 Type *ShadowTy = getShadowTy(V);
2719 if (
V->getType() == ShadowTy)
2721 if (
V->getType()->isPtrOrPtrVectorTy())
2728 void handleShadowOr(Instruction &
I) {
2730 ShadowAndOriginCombiner SC(
this, IRB);
2731 for (Use &
Op :
I.operands())
2758 Value *horizontalReduce(IntrinsicInst &
I,
unsigned ReductionFactor,
2759 unsigned Shards,
Value *VectorA,
Value *VectorB) {
2764 [[maybe_unused]]
unsigned TotalNumElems = NumElems;
2770 assert(NumElems % (ReductionFactor * Shards) == 0);
2775 for (
unsigned i = 0; i < ReductionFactor; i++) {
2776 SmallVector<int, 16>
Mask;
2778 for (
unsigned j = 0;
j < Shards;
j++) {
2779 unsigned Offset = NumElems / Shards *
j;
2781 for (
unsigned X = 0;
X < NumElems / Shards;
X += ReductionFactor)
2785 for (
unsigned X = 0;
X < NumElems / Shards;
X += ReductionFactor)
2810 void handlePairwiseShadowOrIntrinsic(IntrinsicInst &
I,
unsigned Shards) {
2811 assert(
I.arg_size() == 1 ||
I.arg_size() == 2);
2813 assert(
I.getType()->isVectorTy());
2814 assert(
I.getArgOperand(0)->getType()->isVectorTy());
2816 [[maybe_unused]] FixedVectorType *ParamType =
2820 [[maybe_unused]] FixedVectorType *
ReturnType =
2828 Value *FirstArgShadow = getShadow(&
I, 0);
2829 Value *SecondArgShadow =
nullptr;
2830 if (
I.arg_size() == 2)
2831 SecondArgShadow = getShadow(&
I, 1);
2833 Value *OrShadow = horizontalReduce(
I, 2, Shards,
2834 FirstArgShadow, SecondArgShadow);
2836 OrShadow = CreateShadowCast(IRB, OrShadow, getShadowTy(&
I));
2838 setShadow(&
I, OrShadow);
2839 setOriginForNaryOp(
I);
2849 void handlePairwiseShadowOrIntrinsic(IntrinsicInst &
I,
unsigned Shards,
2850 int ReinterpretElemWidth) {
2851 assert(
I.arg_size() == 1 ||
I.arg_size() == 2);
2853 assert(
I.getType()->isVectorTy());
2854 assert(
I.getArgOperand(0)->getType()->isVectorTy());
2856 FixedVectorType *ParamType =
2861 [[maybe_unused]] FixedVectorType *
ReturnType =
2868 FixedVectorType *ReinterpretShadowTy =
nullptr;
2876 Value *FirstArgShadow = getShadow(&
I, 0);
2877 FirstArgShadow = IRB.
CreateBitCast(FirstArgShadow, ReinterpretShadowTy);
2887 Value *SecondArgShadow =
nullptr;
2888 if (
I.arg_size() == 2) {
2889 SecondArgShadow = getShadow(&
I, 1);
2890 SecondArgShadow = IRB.
CreateBitCast(SecondArgShadow, ReinterpretShadowTy);
2893 Value *OrShadow = horizontalReduce(
I, 2, Shards,
2894 FirstArgShadow, SecondArgShadow);
2896 OrShadow = CreateShadowCast(IRB, OrShadow, getShadowTy(&
I));
2898 setShadow(&
I, OrShadow);
2899 setOriginForNaryOp(
I);
2902 void visitFNeg(UnaryOperator &
I) { handleShadowOr(
I); }
2913 void handleMulByConstant(BinaryOperator &
I, Constant *ConstArg,
2919 Type *EltTy = VTy->getElementType();
2921 for (
unsigned Idx = 0; Idx < NumElements; ++Idx) {
2922 if (ConstantInt *Elt =
2924 const APInt &
V = Elt->getValue();
2925 APInt V2 = APInt(
V.getBitWidth(), 1) <<
V.countr_zero();
2926 Elements.push_back(ConstantInt::get(EltTy, V2));
2928 Elements.push_back(ConstantInt::get(EltTy, 1));
2934 const APInt &
V = Elt->getValue();
2935 APInt V2 = APInt(
V.getBitWidth(), 1) <<
V.countr_zero();
2936 ShadowMul = ConstantInt::get(Ty, V2);
2938 ShadowMul = ConstantInt::get(Ty, 1);
2944 IRB.
CreateMul(getShadow(OtherArg), ShadowMul,
"msprop_mul_cst"));
2945 setOrigin(&
I, getOrigin(OtherArg));
2948 void visitMul(BinaryOperator &
I) {
2951 if (constOp0 && !constOp1)
2952 handleMulByConstant(
I, constOp0,
I.getOperand(1));
2953 else if (constOp1 && !constOp0)
2954 handleMulByConstant(
I, constOp1,
I.getOperand(0));
2959 void visitFAdd(BinaryOperator &
I) { handleShadowOr(
I); }
2960 void visitFSub(BinaryOperator &
I) { handleShadowOr(
I); }
2961 void visitFMul(BinaryOperator &
I) { handleShadowOr(
I); }
2962 void visitAdd(BinaryOperator &
I) { handleShadowOr(
I); }
2963 void visitSub(BinaryOperator &
I) { handleShadowOr(
I); }
2964 void visitXor(BinaryOperator &
I) { handleShadowOr(
I); }
2966 void handleIntegerDiv(Instruction &
I) {
2969 insertCheckShadowOf(
I.getOperand(1), &
I);
2970 setShadow(&
I, getShadow(&
I, 0));
2971 setOrigin(&
I, getOrigin(&
I, 0));
2974 void visitUDiv(BinaryOperator &
I) { handleIntegerDiv(
I); }
2975 void visitSDiv(BinaryOperator &
I) { handleIntegerDiv(
I); }
2976 void visitURem(BinaryOperator &
I) { handleIntegerDiv(
I); }
2977 void visitSRem(BinaryOperator &
I) { handleIntegerDiv(
I); }
2981 void visitFDiv(BinaryOperator &
I) { handleShadowOr(
I); }
2982 void visitFRem(BinaryOperator &
I) { handleShadowOr(
I); }
2988 void handleEqualityComparison(ICmpInst &
I) {
2992 Value *Sa = getShadow(
A);
2993 Value *Sb = getShadow(
B);
3019 setOriginForNaryOp(
I);
3027 void handleRelationalComparisonExact(ICmpInst &
I) {
3031 Value *Sa = getShadow(
A);
3032 Value *Sb = getShadow(
B);
3043 bool IsSigned =
I.isSigned();
3045 auto GetMinMaxUnsigned = [&](
Value *
V,
Value *S) {
3055 V = IRB.
CreateXor(V, ConstantInt::get(
V->getType(), MinVal));
3060 return std::make_pair(Min, Max);
3063 auto [Amin, Amax] = GetMinMaxUnsigned(
A, Sa);
3064 auto [Bmin, Bmax] = GetMinMaxUnsigned(
B, Sb);
3070 setOriginForNaryOp(
I);
3077 void handleSignedRelationalComparison(ICmpInst &
I) {
3082 op =
I.getOperand(0);
3083 pre =
I.getPredicate();
3085 op =
I.getOperand(1);
3086 pre =
I.getSwappedPredicate();
3099 setShadow(&
I, Shadow);
3100 setOrigin(&
I, getOrigin(
op));
3106 void visitICmpInst(ICmpInst &
I) {
3111 if (
I.isEquality()) {
3112 handleEqualityComparison(
I);
3118 handleRelationalComparisonExact(
I);
3122 handleSignedRelationalComparison(
I);
3128 handleRelationalComparisonExact(
I);
3135 void visitFCmpInst(FCmpInst &
I) { handleShadowOr(
I); }
3137 void handleShift(BinaryOperator &
I) {
3142 Value *S2 = getShadow(&
I, 1);
3145 Value *V2 =
I.getOperand(1);
3147 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3148 setOriginForNaryOp(
I);
3151 void visitShl(BinaryOperator &
I) { handleShift(
I); }
3152 void visitAShr(BinaryOperator &
I) { handleShift(
I); }
3153 void visitLShr(BinaryOperator &
I) { handleShift(
I); }
3155 void handleFunnelShift(IntrinsicInst &
I) {
3159 Value *S0 = getShadow(&
I, 0);
3161 Value *S2 = getShadow(&
I, 2);
3164 Value *V2 =
I.getOperand(2);
3167 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3168 setOriginForNaryOp(
I);
3181 void visitMemMoveInst(MemMoveInst &
I) {
3182 getShadow(
I.getArgOperand(1));
3185 {I.getArgOperand(0), I.getArgOperand(1),
3186 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3204 void visitMemCpyInst(MemCpyInst &
I) {
3205 getShadow(
I.getArgOperand(1));
3208 {I.getArgOperand(0), I.getArgOperand(1),
3209 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3214 void visitMemSetInst(MemSetInst &
I) {
3218 {I.getArgOperand(0),
3219 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
3220 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3224 void visitVAStartInst(VAStartInst &
I) { VAHelper->visitVAStartInst(
I); }
3226 void visitVACopyInst(VACopyInst &
I) { VAHelper->visitVACopyInst(
I); }
3232 bool handleVectorStoreIntrinsic(IntrinsicInst &
I) {
3236 Value *Addr =
I.getArgOperand(0);
3237 Value *Shadow = getShadow(&
I, 1);
3238 Value *ShadowPtr, *OriginPtr;
3242 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
3247 insertCheckShadowOf(Addr, &
I);
3250 if (MS.TrackOrigins)
3259 bool handleVectorLoadIntrinsic(IntrinsicInst &
I) {
3263 Value *Addr =
I.getArgOperand(0);
3265 Type *ShadowTy = getShadowTy(&
I);
3266 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
3267 if (PropagateShadow) {
3271 std::tie(ShadowPtr, OriginPtr) =
3272 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
false);
3276 setShadow(&
I, getCleanShadow(&
I));
3280 insertCheckShadowOf(Addr, &
I);
3282 if (MS.TrackOrigins) {
3283 if (PropagateShadow)
3284 setOrigin(&
I, IRB.
CreateLoad(MS.OriginTy, OriginPtr));
3286 setOrigin(&
I, getCleanOrigin());
3306 [[maybe_unused]]
bool
3307 maybeHandleSimpleNomemIntrinsic(IntrinsicInst &
I,
3308 unsigned int trailingFlags) {
3309 Type *RetTy =
I.getType();
3313 unsigned NumArgOperands =
I.arg_size();
3314 assert(NumArgOperands >= trailingFlags);
3315 for (
unsigned i = 0; i < NumArgOperands - trailingFlags; ++i) {
3316 Type *Ty =
I.getArgOperand(i)->getType();
3322 ShadowAndOriginCombiner SC(
this, IRB);
3323 for (
unsigned i = 0; i < NumArgOperands; ++i)
3324 SC.Add(
I.getArgOperand(i));
3341 bool maybeHandleUnknownIntrinsicUnlogged(IntrinsicInst &
I) {
3342 unsigned NumArgOperands =
I.arg_size();
3343 if (NumArgOperands == 0)
3346 if (NumArgOperands == 2 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
3347 I.getArgOperand(1)->getType()->isVectorTy() &&
3348 I.getType()->isVoidTy() && !
I.onlyReadsMemory()) {
3350 return handleVectorStoreIntrinsic(
I);
3353 if (NumArgOperands == 1 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
3354 I.getType()->isVectorTy() &&
I.onlyReadsMemory()) {
3356 return handleVectorLoadIntrinsic(
I);
3359 if (
I.doesNotAccessMemory())
3360 if (maybeHandleSimpleNomemIntrinsic(
I, 0))
3368 bool maybeHandleUnknownIntrinsic(IntrinsicInst &
I) {
3369 if (maybeHandleUnknownIntrinsicUnlogged(
I)) {
3373 LLVM_DEBUG(
dbgs() <<
"UNKNOWN INSTRUCTION HANDLED HEURISTICALLY: " <<
I
3380 void handleInvariantGroup(IntrinsicInst &
I) {
3381 setShadow(&
I, getShadow(&
I, 0));
3382 setOrigin(&
I, getOrigin(&
I, 0));
3385 void handleLifetimeStart(IntrinsicInst &
I) {
3390 LifetimeStartList.push_back(std::make_pair(&
I, AI));
3393 void handleBswap(IntrinsicInst &
I) {
3396 Type *OpType =
Op->getType();
3399 setOrigin(&
I, getOrigin(
Op));
3420 void handleCountLeadingTrailingZeros(IntrinsicInst &
I) {
3422 Value *Src =
I.getArgOperand(0);
3423 Value *SrcShadow = getShadow(Src);
3427 I.getType(),
I.getIntrinsicID(), {Src, False});
3429 I.getType(),
I.getIntrinsicID(), {SrcShadow, False});
3432 ConcreteZerosCount, ShadowZerosCount,
"_mscz_cmp_zeros");
3434 Value *NotAllZeroShadow =
3436 Value *OutputShadow =
3437 IRB.
CreateAnd(CompareConcreteZeros, NotAllZeroShadow,
"_mscz_main");
3443 OutputShadow = IRB.
CreateOr(OutputShadow, BoolZeroPoison,
"_mscz_bs");
3446 OutputShadow = IRB.
CreateSExt(OutputShadow, getShadowTy(Src),
"_mscz_os");
3448 setShadow(&
I, OutputShadow);
3449 setOriginForNaryOp(
I);
3464 void handleNEONVectorConvertIntrinsic(IntrinsicInst &
I,
bool FixedPoint) {
3471 Value *S0 = getShadow(&
I, 0);
3474 Value *Precision =
I.getOperand(1);
3475 insertCheckShadowOf(Precision, &
I);
3485 setShadow(&
I, OutShadow);
3486 setOriginForNaryOp(
I);
3495 FixedVectorType *maybeShrinkVectorShadowType(
Value *Src, IntrinsicInst &
I) {
3515 Value *maybeExtendVectorShadowWithZeros(
Value *Shadow, IntrinsicInst &
I) {
3520 Value *FullShadow = getCleanShadow(&
I);
3521 unsigned ShadowNumElems =
3523 unsigned FullShadowNumElems =
3526 assert((ShadowNumElems == FullShadowNumElems) ||
3527 (ShadowNumElems * 2 == FullShadowNumElems));
3529 if (ShadowNumElems == FullShadowNumElems) {
3530 FullShadow = Shadow;
3534 std::iota(ShadowMask.begin(), ShadowMask.end(), 0);
3559 void handleSSEVectorConvertIntrinsicByProp(IntrinsicInst &
I,
3560 bool HasRoundingMode) {
3561 if (HasRoundingMode) {
3569 Value *Src =
I.getArgOperand(0);
3570 assert(Src->getType()->isVectorTy());
3574 VectorType *ShadowType = maybeShrinkVectorShadowType(Src,
I);
3577 Value *S0 = getShadow(&
I, 0);
3589 Value *FullShadow = maybeExtendVectorShadowWithZeros(Shadow,
I);
3591 setShadow(&
I, FullShadow);
3592 setOriginForNaryOp(
I);
3613 void handleSSEVectorConvertIntrinsic(IntrinsicInst &
I,
int NumUsedElements,
3614 bool HasRoundingMode =
false) {
3616 Value *CopyOp, *ConvertOp;
3618 assert((!HasRoundingMode ||
3620 "Invalid rounding mode");
3622 switch (
I.arg_size() - HasRoundingMode) {
3624 CopyOp =
I.getArgOperand(0);
3625 ConvertOp =
I.getArgOperand(1);
3628 ConvertOp =
I.getArgOperand(0);
3642 Value *ConvertShadow = getShadow(ConvertOp);
3643 Value *AggShadow =
nullptr;
3646 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), 0));
3647 for (
int i = 1; i < NumUsedElements; ++i) {
3649 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), i));
3650 AggShadow = IRB.
CreateOr(AggShadow, MoreShadow);
3653 AggShadow = ConvertShadow;
3656 insertCheckShadow(AggShadow, getOrigin(ConvertOp), &
I);
3663 Value *ResultShadow = getShadow(CopyOp);
3665 for (
int i = 0; i < NumUsedElements; ++i) {
3667 ResultShadow, ConstantInt::getNullValue(EltTy),
3670 setShadow(&
I, ResultShadow);
3671 setOrigin(&
I, getOrigin(CopyOp));
3673 setShadow(&
I, getCleanShadow(&
I));
3674 setOrigin(&
I, getCleanOrigin());
3682 S = CreateShadowCast(IRB, S, IRB.
getInt64Ty(),
true);
3685 return CreateShadowCast(IRB, S2,
T,
true);
3693 return CreateShadowCast(IRB, S2,
T,
true);
3710 void handleVectorShiftIntrinsic(IntrinsicInst &
I,
bool Variable) {
3716 Value *S2 = getShadow(&
I, 1);
3718 : Lower64ShadowExtend(IRB, S2, getShadowTy(&
I));
3719 Value *V1 =
I.getOperand(0);
3720 Value *V2 =
I.getOperand(1);
3722 {IRB.CreateBitCast(S1, V1->getType()), V2});
3724 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3725 setOriginForNaryOp(
I);
3730 Type *getMMXVectorTy(
unsigned EltSizeInBits,
3731 unsigned X86_MMXSizeInBits = 64) {
3732 assert(EltSizeInBits != 0 && (X86_MMXSizeInBits % EltSizeInBits) == 0 &&
3733 "Illegal MMX vector element size");
3735 X86_MMXSizeInBits / EltSizeInBits);
3742 case Intrinsic::x86_sse2_packsswb_128:
3743 case Intrinsic::x86_sse2_packuswb_128:
3744 return Intrinsic::x86_sse2_packsswb_128;
3746 case Intrinsic::x86_sse2_packssdw_128:
3747 case Intrinsic::x86_sse41_packusdw:
3748 return Intrinsic::x86_sse2_packssdw_128;
3750 case Intrinsic::x86_avx2_packsswb:
3751 case Intrinsic::x86_avx2_packuswb:
3752 return Intrinsic::x86_avx2_packsswb;
3754 case Intrinsic::x86_avx2_packssdw:
3755 case Intrinsic::x86_avx2_packusdw:
3756 return Intrinsic::x86_avx2_packssdw;
3758 case Intrinsic::x86_mmx_packsswb:
3759 case Intrinsic::x86_mmx_packuswb:
3760 return Intrinsic::x86_mmx_packsswb;
3762 case Intrinsic::x86_mmx_packssdw:
3763 return Intrinsic::x86_mmx_packssdw;
3765 case Intrinsic::x86_avx512_packssdw_512:
3766 case Intrinsic::x86_avx512_packusdw_512:
3767 return Intrinsic::x86_avx512_packssdw_512;
3769 case Intrinsic::x86_avx512_packsswb_512:
3770 case Intrinsic::x86_avx512_packuswb_512:
3771 return Intrinsic::x86_avx512_packsswb_512;
3787 void handleVectorPackIntrinsic(IntrinsicInst &
I,
3788 unsigned MMXEltSizeInBits = 0) {
3792 Value *S2 = getShadow(&
I, 1);
3793 assert(
S1->getType()->isVectorTy());
3799 MMXEltSizeInBits ? getMMXVectorTy(MMXEltSizeInBits) :
S1->
getType();
3800 if (MMXEltSizeInBits) {
3808 if (MMXEltSizeInBits) {
3814 {S1_ext, S2_ext},
nullptr,
3815 "_msprop_vector_pack");
3816 if (MMXEltSizeInBits)
3819 setOriginForNaryOp(
I);
3823 Constant *createDppMask(
unsigned Width,
unsigned Mask) {
3824 SmallVector<Constant *, 4>
R(Width);
3836 const unsigned Width =
3843 Value *DstMaskV = createDppMask(Width, DstMask);
3860 void handleDppIntrinsic(IntrinsicInst &
I) {
3863 Value *S0 = getShadow(&
I, 0);
3867 const unsigned Width =
3869 assert(Width == 2 || Width == 4 || Width == 8);
3872 const unsigned SrcMask =
Mask >> 4;
3873 const unsigned DstMask =
Mask & 0xf;
3876 Value *SI1 = findDppPoisonedOutput(IRB, S, SrcMask, DstMask);
3881 SI1, findDppPoisonedOutput(IRB, S, SrcMask << 4, DstMask << 4));
3888 setOriginForNaryOp(
I);
3892 C = CreateAppToShadowCast(IRB,
C);
3901 void handleBlendvIntrinsic(IntrinsicInst &
I) {
3906 Value *Sc = getShadow(&
I, 2);
3907 Value *Oc = MS.TrackOrigins ? getOrigin(
C) : nullptr;
3912 C = convertBlendvToSelectMask(IRB,
C);
3913 Sc = convertBlendvToSelectMask(IRB, Sc);
3919 handleSelectLikeInst(
I,
C,
T,
F);
3923 void handleVectorSadIntrinsic(IntrinsicInst &
I,
bool IsMMX =
false) {
3924 const unsigned SignificantBitsPerResultElement = 16;
3926 unsigned ZeroBitsPerResultElement =
3930 auto *Shadow0 = getShadow(&
I, 0);
3931 auto *Shadow1 = getShadow(&
I, 1);
3936 S = IRB.
CreateLShr(S, ZeroBitsPerResultElement);
3939 setOriginForNaryOp(
I);
3963 void handleVectorDotProductIntrinsic(IntrinsicInst &
I,
3964 unsigned ReductionFactor,
3966 unsigned EltSizeInBits,
3970 [[maybe_unused]] FixedVectorType *
ReturnType =
3975 Value *Va =
nullptr;
3976 Value *Vb =
nullptr;
3977 Value *Sa =
nullptr;
3978 Value *Sb =
nullptr;
3980 assert(
I.arg_size() == 2 ||
I.arg_size() == 3);
3981 if (
I.arg_size() == 2) {
3984 Va =
I.getOperand(0);
3985 Vb =
I.getOperand(1);
3987 Sa = getShadow(&
I, 0);
3988 Sb = getShadow(&
I, 1);
3989 }
else if (
I.arg_size() == 3) {
3991 Va =
I.getOperand(1);
3992 Vb =
I.getOperand(2);
3994 Sa = getShadow(&
I, 1);
3995 Sb = getShadow(&
I, 2);
4012 Sa, getPclmulMask(Width, Lanes ==
kOddLanes));
4014 Sb, getPclmulMask(Width, Lanes ==
kOddLanes));
4024 if (
I.arg_size() == 3) {
4025 [[maybe_unused]]
auto *AccumulatorType =
4027 assert(AccumulatorType == ReturnType);
4030 FixedVectorType *ImplicitReturnType =
4033 if (EltSizeInBits) {
4035 getMMXVectorTy(EltSizeInBits * ReductionFactor,
4047 ReturnType->getNumElements() * ReductionFactor);
4064 VaInt = CreateAppToShadowCast(IRB, Va);
4065 VbInt = CreateAppToShadowCast(IRB, Vb);
4072 And = handleBitwiseAnd(IRB, VaNonZero, VbNonZero, SaNonZero, SbNonZero);
4094 ImplicitReturnType);
4099 OutShadow = CreateShadowCast(IRB, OutShadow, getShadowTy(&
I));
4102 if (
I.arg_size() == 3)
4103 OutShadow = IRB.
CreateOr(OutShadow, getShadow(&
I, 0));
4105 setShadow(&
I, OutShadow);
4106 setOriginForNaryOp(
I);
4120 void handleVectorComparePackedIntrinsic(IntrinsicInst &
I,
4121 bool PredicateAsOperand) {
4122 if (PredicateAsOperand) {
4124 assert(
I.paramHasAttr(2, Attribute::ImmArg));
4132 Type *ResTy = getShadowTy(&
I);
4133 auto *Shadow0 = getShadow(&
I, 0);
4134 auto *Shadow1 = getShadow(&
I, 1);
4139 setOriginForNaryOp(
I);
4145 void handleVectorCompareScalarIntrinsic(IntrinsicInst &
I) {
4147 auto *Shadow0 = getShadow(&
I, 0);
4148 auto *Shadow1 = getShadow(&
I, 1);
4150 Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&
I));
4152 setOriginForNaryOp(
I);
4161 void handleVectorReduceIntrinsic(IntrinsicInst &
I,
bool AllowShadowCast) {
4166 if (AllowShadowCast)
4167 S = CreateShadowCast(IRB, S, getShadowTy(&
I));
4171 setOriginForNaryOp(
I);
4181 void handleVectorReduceWithStarterIntrinsic(IntrinsicInst &
I) {
4185 Value *Shadow0 = getShadow(&
I, 0);
4191 setOriginForNaryOp(
I);
4197 void handleVectorReduceOrIntrinsic(IntrinsicInst &
I) {
4201 Value *OperandShadow = getShadow(&
I, 0);
4203 Value *OperandUnsetOrPoison = IRB.
CreateOr(OperandUnsetBits, OperandShadow);
4211 setOrigin(&
I, getOrigin(&
I, 0));
4217 void handleVectorReduceAndIntrinsic(IntrinsicInst &
I) {
4221 Value *OperandShadow = getShadow(&
I, 0);
4222 Value *OperandSetOrPoison = IRB.
CreateOr(
I.getOperand(0), OperandShadow);
4230 setOrigin(&
I, getOrigin(&
I, 0));
4233 void handleStmxcsr(IntrinsicInst &
I) {
4235 Value *Addr =
I.getArgOperand(0);
4238 getShadowOriginPtr(Addr, IRB, Ty,
Align(1),
true).first;
4243 insertCheckShadowOf(Addr, &
I);
4246 void handleLdmxcsr(IntrinsicInst &
I) {
4251 Value *Addr =
I.getArgOperand(0);
4254 Value *ShadowPtr, *OriginPtr;
4255 std::tie(ShadowPtr, OriginPtr) =
4256 getShadowOriginPtr(Addr, IRB, Ty, Alignment,
false);
4259 insertCheckShadowOf(Addr, &
I);
4262 Value *Origin = MS.TrackOrigins ? IRB.
CreateLoad(MS.OriginTy, OriginPtr)
4264 insertCheckShadow(Shadow, Origin, &
I);
4267 void handleMaskedExpandLoad(IntrinsicInst &
I) {
4269 Value *Ptr =
I.getArgOperand(0);
4270 MaybeAlign
Align =
I.getParamAlign(0);
4272 Value *PassThru =
I.getArgOperand(2);
4275 insertCheckShadowOf(Ptr, &
I);
4276 insertCheckShadowOf(Mask, &
I);
4279 if (!PropagateShadow) {
4280 setShadow(&
I, getCleanShadow(&
I));
4281 setOrigin(&
I, getCleanOrigin());
4285 Type *ShadowTy = getShadowTy(&
I);
4287 auto [ShadowPtr, OriginPtr] =
4288 getShadowOriginPtr(Ptr, IRB, ElementShadowTy, Align,
false);
4292 getShadow(PassThru),
"_msmaskedexpload");
4294 setShadow(&
I, Shadow);
4297 setOrigin(&
I, getCleanOrigin());
4300 void handleMaskedCompressStore(IntrinsicInst &
I) {
4302 Value *Values =
I.getArgOperand(0);
4303 Value *Ptr =
I.getArgOperand(1);
4304 MaybeAlign
Align =
I.getParamAlign(1);
4308 insertCheckShadowOf(Ptr, &
I);
4309 insertCheckShadowOf(Mask, &
I);
4312 Value *Shadow = getShadow(Values);
4313 Type *ElementShadowTy =
4315 auto [ShadowPtr, OriginPtrs] =
4316 getShadowOriginPtr(Ptr, IRB, ElementShadowTy, Align,
true);
4323 void handleMaskedGather(IntrinsicInst &
I) {
4325 Value *Ptrs =
I.getArgOperand(0);
4326 const Align Alignment =
I.getParamAlign(0).valueOrOne();
4328 Value *PassThru =
I.getArgOperand(2);
4330 Type *PtrsShadowTy = getShadowTy(Ptrs);
4332 insertCheckShadowOf(Mask, &
I);
4336 insertCheckShadow(MaskedPtrShadow, getOrigin(Ptrs), &
I);
4339 if (!PropagateShadow) {
4340 setShadow(&
I, getCleanShadow(&
I));
4341 setOrigin(&
I, getCleanOrigin());
4345 Type *ShadowTy = getShadowTy(&
I);
4347 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
4348 Ptrs, IRB, ElementShadowTy, Alignment,
false);
4352 getShadow(PassThru),
"_msmaskedgather");
4354 setShadow(&
I, Shadow);
4357 setOrigin(&
I, getCleanOrigin());
4360 void handleMaskedScatter(IntrinsicInst &
I) {
4362 Value *Values =
I.getArgOperand(0);
4363 Value *Ptrs =
I.getArgOperand(1);
4364 const Align Alignment =
I.getParamAlign(1).valueOrOne();
4367 Type *PtrsShadowTy = getShadowTy(Ptrs);
4369 insertCheckShadowOf(Mask, &
I);
4373 insertCheckShadow(MaskedPtrShadow, getOrigin(Ptrs), &
I);
4376 Value *Shadow = getShadow(Values);
4377 Type *ElementShadowTy =
4379 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
4380 Ptrs, IRB, ElementShadowTy, Alignment,
true);
4391 void handleMaskedStore(IntrinsicInst &
I) {
4393 Value *
V =
I.getArgOperand(0);
4394 Value *Ptr =
I.getArgOperand(1);
4395 const Align Alignment =
I.getParamAlign(1).valueOrOne();
4397 Value *Shadow = getShadow(V);
4400 insertCheckShadowOf(Ptr, &
I);
4401 insertCheckShadowOf(Mask, &
I);
4406 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
4407 Ptr, IRB, Shadow->
getType(), Alignment,
true);
4411 if (!MS.TrackOrigins)
4414 auto &
DL =
F.getDataLayout();
4415 paintOrigin(IRB, getOrigin(V), OriginPtr,
4424 void handleMaskedLoad(IntrinsicInst &
I) {
4426 Value *Ptr =
I.getArgOperand(0);
4427 const Align Alignment =
I.getParamAlign(0).valueOrOne();
4429 Value *PassThru =
I.getArgOperand(2);
4432 insertCheckShadowOf(Ptr, &
I);
4433 insertCheckShadowOf(Mask, &
I);
4436 if (!PropagateShadow) {
4437 setShadow(&
I, getCleanShadow(&
I));
4438 setOrigin(&
I, getCleanOrigin());
4442 Type *ShadowTy = getShadowTy(&
I);
4443 Value *ShadowPtr, *OriginPtr;
4444 std::tie(ShadowPtr, OriginPtr) =
4445 getShadowOriginPtr(Ptr, IRB, ShadowTy, Alignment,
false);
4447 getShadow(PassThru),
"_msmaskedld"));
4449 if (!MS.TrackOrigins)
4456 Value *NotNull = convertToBool(MaskedPassThruShadow, IRB,
"_mscmp");
4461 setOrigin(&
I, Origin);
4477 void handleAVXMaskedStore(IntrinsicInst &
I) {
4482 Value *Dst =
I.getArgOperand(0);
4483 assert(Dst->getType()->isPointerTy() &&
"Destination is not a pointer!");
4488 Value *Src =
I.getArgOperand(2);
4493 Value *SrcShadow = getShadow(Src);
4496 insertCheckShadowOf(Dst, &
I);
4497 insertCheckShadowOf(Mask, &
I);
4500 Value *DstShadowPtr;
4501 Value *DstOriginPtr;
4502 std::tie(DstShadowPtr, DstOriginPtr) = getShadowOriginPtr(
4503 Dst, IRB, SrcShadow->
getType(), Alignment,
true);
4505 SmallVector<Value *, 2> ShadowArgs;
4506 ShadowArgs.
append(1, DstShadowPtr);
4507 ShadowArgs.
append(1, Mask);
4518 if (!MS.TrackOrigins)
4522 auto &
DL =
F.getDataLayout();
4523 paintOrigin(IRB, getOrigin(Src), DstOriginPtr,
4524 DL.getTypeStoreSize(SrcShadow->
getType()),
4543 void handleAVXMaskedLoad(IntrinsicInst &
I) {
4548 Value *Src =
I.getArgOperand(0);
4549 assert(Src->getType()->isPointerTy() &&
"Source is not a pointer!");
4557 insertCheckShadowOf(Mask, &
I);
4560 Type *SrcShadowTy = getShadowTy(Src);
4561 Value *SrcShadowPtr, *SrcOriginPtr;
4562 std::tie(SrcShadowPtr, SrcOriginPtr) =
4563 getShadowOriginPtr(Src, IRB, SrcShadowTy, Alignment,
false);
4565 SmallVector<Value *, 2> ShadowArgs;
4566 ShadowArgs.
append(1, SrcShadowPtr);
4567 ShadowArgs.
append(1, Mask);
4576 if (!MS.TrackOrigins)
4583 setOrigin(&
I, PtrSrcOrigin);
4592 assert(isFixedIntVector(Idx));
4593 auto IdxVectorSize =
4601 auto *IdxShadow = getShadow(Idx);
4606 insertCheckShadow(Truncated, getOrigin(Idx),
I);
4611 void handleAVXVpermilvar(IntrinsicInst &
I) {
4613 Value *Shadow = getShadow(&
I, 0);
4614 maskedCheckAVXIndexShadow(IRB,
I.getArgOperand(1), &
I);
4618 Shadow = IRB.
CreateBitCast(Shadow,
I.getArgOperand(0)->getType());
4620 {Shadow, I.getArgOperand(1)});
4623 setOriginForNaryOp(
I);
4628 void handleAVXVpermi2var(IntrinsicInst &
I) {
4633 [[maybe_unused]]
auto ArgVectorSize =
4636 ->getNumElements() == ArgVectorSize);
4638 ->getNumElements() == ArgVectorSize);
4639 assert(
I.getArgOperand(0)->getType() ==
I.getArgOperand(2)->getType());
4640 assert(
I.getType() ==
I.getArgOperand(0)->getType());
4641 assert(
I.getArgOperand(1)->getType()->isIntOrIntVectorTy());
4643 Value *AShadow = getShadow(&
I, 0);
4644 Value *Idx =
I.getArgOperand(1);
4645 Value *BShadow = getShadow(&
I, 2);
4647 maskedCheckAVXIndexShadow(IRB, Idx, &
I);
4651 AShadow = IRB.
CreateBitCast(AShadow,
I.getArgOperand(0)->getType());
4652 BShadow = IRB.
CreateBitCast(BShadow,
I.getArgOperand(2)->getType());
4654 {AShadow, Idx, BShadow});
4656 setOriginForNaryOp(
I);
4659 [[maybe_unused]]
static bool isFixedIntVectorTy(
const Type *
T) {
4663 [[maybe_unused]]
static bool isFixedFPVectorTy(
const Type *
T) {
4667 [[maybe_unused]]
static bool isFixedIntVector(
const Value *V) {
4668 return isFixedIntVectorTy(
V->getType());
4671 [[maybe_unused]]
static bool isFixedFPVector(
const Value *V) {
4672 return isFixedFPVectorTy(
V->getType());
4694 void handleAVX512VectorConvertFPToInt(IntrinsicInst &
I,
bool LastMask) {
4699 Value *WriteThrough;
4703 WriteThrough =
I.getOperand(2);
4704 Mask =
I.getOperand(3);
4707 WriteThrough =
I.getOperand(1);
4708 Mask =
I.getOperand(2);
4713 assert(isFixedIntVector(WriteThrough));
4715 unsigned ANumElements =
4717 [[maybe_unused]]
unsigned WriteThruNumElements =
4719 assert(ANumElements == WriteThruNumElements ||
4720 ANumElements * 2 == WriteThruNumElements);
4723 unsigned MaskNumElements =
Mask->getType()->getScalarSizeInBits();
4724 assert(ANumElements == MaskNumElements ||
4725 ANumElements * 2 == MaskNumElements);
4727 assert(WriteThruNumElements == MaskNumElements);
4731 insertCheckShadowOf(Mask, &
I);
4741 Value *AShadow = getShadow(
A);
4742 AShadow = maybeExtendVectorShadowWithZeros(AShadow,
I);
4744 if (ANumElements * 2 == MaskNumElements) {
4756 "_ms_mask_bitcast");
4766 getShadowTy(&
I),
"_ms_a_shadow");
4768 Value *WriteThroughShadow = getShadow(WriteThrough);
4770 "_ms_writethru_select");
4772 setShadow(&
I, Shadow);
4773 setOriginForNaryOp(
I);
4781 void handleBmiIntrinsic(IntrinsicInst &
I) {
4783 Type *ShadowTy = getShadowTy(&
I);
4786 Value *SMask = getShadow(&
I, 1);
4791 {getShadow(&I, 0), I.getOperand(1)});
4794 setOriginForNaryOp(
I);
4797 static SmallVector<int, 8> getPclmulMask(
unsigned Width,
bool OddElements) {
4798 SmallVector<int, 8>
Mask;
4799 for (
unsigned X = OddElements ? 1 : 0;
X < Width;
X += 2) {
4813 void handlePclmulIntrinsic(IntrinsicInst &
I) {
4818 "pclmul 3rd operand must be a constant");
4821 getPclmulMask(Width, Imm & 0x01));
4823 getPclmulMask(Width, Imm & 0x10));
4824 ShadowAndOriginCombiner SOC(
this, IRB);
4825 SOC.Add(Shuf0, getOrigin(&
I, 0));
4826 SOC.Add(Shuf1, getOrigin(&
I, 1));
4831 void handleUnarySdSsIntrinsic(IntrinsicInst &
I) {
4836 Value *Second = getShadow(&
I, 1);
4838 SmallVector<int, 16>
Mask;
4839 Mask.push_back(Width);
4840 for (
unsigned i = 1; i < Width; i++)
4844 setShadow(&
I, Shadow);
4845 setOriginForNaryOp(
I);
4848 void handleVtestIntrinsic(IntrinsicInst &
I) {
4850 Value *Shadow0 = getShadow(&
I, 0);
4851 Value *Shadow1 = getShadow(&
I, 1);
4857 setShadow(&
I, Shadow);
4858 setOriginForNaryOp(
I);
4861 void handleBinarySdSsIntrinsic(IntrinsicInst &
I) {
4866 Value *Second = getShadow(&
I, 1);
4869 SmallVector<int, 16>
Mask;
4870 Mask.push_back(Width);
4871 for (
unsigned i = 1; i < Width; i++)
4875 setShadow(&
I, Shadow);
4876 setOriginForNaryOp(
I);
4882 void handleRoundPdPsIntrinsic(IntrinsicInst &
I) {
4883 assert(
I.getArgOperand(0)->getType() ==
I.getType());
4888 ShadowAndOriginCombiner SC(
this, IRB);
4889 SC.Add(
I.getArgOperand(0));
4897 void handleAbsIntrinsic(IntrinsicInst &
I) {
4899 Value *Src =
I.getArgOperand(0);
4900 Value *IsIntMinPoison =
I.getArgOperand(1);
4902 assert(
I.getType()->isIntOrIntVectorTy());
4904 assert(Src->getType() ==
I.getType());
4910 Value *SrcShadow = getShadow(Src);
4914 Value *MinValVec = ConstantInt::get(Src->getType(), MinVal);
4917 Value *PoisonedShadow = getPoisonedShadow(Src);
4918 Value *PoisonedIfIntMinShadow =
4921 IRB.
CreateSelect(IsIntMinPoison, PoisonedIfIntMinShadow, SrcShadow);
4923 setShadow(&
I, Shadow);
4924 setOrigin(&
I, getOrigin(&
I, 0));
4927 void handleIsFpClass(IntrinsicInst &
I) {
4929 Value *Shadow = getShadow(&
I, 0);
4930 setShadow(&
I, IRB.
CreateICmpNE(Shadow, getCleanShadow(Shadow)));
4931 setOrigin(&
I, getOrigin(&
I, 0));
4934 void handleArithmeticWithOverflow(IntrinsicInst &
I) {
4936 Value *Shadow0 = getShadow(&
I, 0);
4937 Value *Shadow1 = getShadow(&
I, 1);
4940 IRB.
CreateICmpNE(ShadowElt0, getCleanShadow(ShadowElt0));
4946 setShadow(&
I, Shadow);
4947 setOriginForNaryOp(
I);
4953 Value *Shadow = getShadow(V);
4975 void handleAVX512VectorDownConvert(IntrinsicInst &
I) {
4980 Value *WriteThrough =
I.getOperand(1);
4984 assert(isFixedIntVector(WriteThrough));
4986 unsigned ANumElements =
4988 unsigned OutputNumElements =
4990 assert(ANumElements == OutputNumElements ||
4991 ANumElements * 2 == OutputNumElements);
4994 assert(
Mask->getType()->getScalarSizeInBits() == ANumElements);
4995 insertCheckShadowOf(Mask, &
I);
5006 if (ANumElements != OutputNumElements) {
5008 Mask = IRB.
CreateZExt(Mask, Type::getIntNTy(*MS.C, OutputNumElements),
5015 Value *AShadow = getShadow(
A);
5019 VectorType *ShadowType = maybeShrinkVectorShadowType(
A,
I);
5029 AShadow = IRB.
CreateTrunc(AShadow, ShadowType,
"_ms_trunc_shadow");
5030 AShadow = maybeExtendVectorShadowWithZeros(AShadow,
I);
5032 Value *WriteThroughShadow = getShadow(WriteThrough);
5035 setShadow(&
I, Shadow);
5036 setOriginForNaryOp(
I);
5063 void handleAVX512VectorGenericMaskedFP(IntrinsicInst &
I,
unsigned AIndex,
5064 unsigned WriteThruIndex,
5065 unsigned MaskIndex) {
5068 unsigned NumArgs =
I.arg_size();
5069 assert(AIndex < NumArgs);
5070 assert(WriteThruIndex < NumArgs);
5071 assert(MaskIndex < NumArgs);
5072 assert(AIndex != WriteThruIndex);
5073 assert(AIndex != MaskIndex);
5074 assert(WriteThruIndex != MaskIndex);
5076 Value *
A =
I.getOperand(AIndex);
5077 Value *WriteThru =
I.getOperand(WriteThruIndex);
5081 assert(isFixedFPVector(WriteThru));
5083 [[maybe_unused]]
unsigned ANumElements =
5085 unsigned OutputNumElements =
5087 assert(ANumElements == OutputNumElements);
5089 for (
unsigned i = 0; i < NumArgs; ++i) {
5090 if (i != AIndex && i != WriteThruIndex) {
5093 assert(
I.getOperand(i)->getType()->isIntegerTy());
5094 insertCheckShadowOf(
I.getOperand(i), &
I);
5099 if (
Mask->getType()->getScalarSizeInBits() == 8 && ANumElements < 8)
5101 assert(
Mask->getType()->getScalarSizeInBits() == ANumElements);
5108 Value *AShadow = getShadow(
A);
5114 Value *WriteThruShadow = getShadow(WriteThru);
5117 setShadow(&
I, Shadow);
5119 setOriginForNaryOp(
I);
5129 void visitGenericScalarHalfwordInst(IntrinsicInst &
I) {
5135 Value *WriteThrough =
I.getOperand(2);
5142 insertCheckShadowOf(Mask, &
I);
5146 unsigned NumElements =
5148 assert(NumElements == 8);
5149 assert(
A->getType() ==
B->getType());
5151 assert(
Mask->getType()->getPrimitiveSizeInBits() == NumElements);
5154 Value *ALowerShadow = extractLowerShadow(IRB,
A);
5155 Value *BLowerShadow = extractLowerShadow(IRB,
B);
5157 Value *ABLowerShadow = IRB.
CreateOr(ALowerShadow, BLowerShadow);
5159 Value *WriteThroughLowerShadow = extractLowerShadow(IRB, WriteThrough);
5166 Value *AShadow = getShadow(
A);
5167 Value *DstLowerShadow =
5168 IRB.
CreateSelect(MaskLower, ABLowerShadow, WriteThroughLowerShadow);
5170 AShadow, DstLowerShadow, ConstantInt::get(IRB.
getInt32Ty(), 0),
5173 setShadow(&
I, DstShadow);
5174 setOriginForNaryOp(
I);
5204 void handleAVXGF2P8Affine(IntrinsicInst &
I) {
5215 ->getScalarSizeInBits() == 8);
5217 assert(
A->getType() ==
X->getType());
5219 assert(
B->getType()->isIntegerTy());
5220 assert(
B->getType()->getScalarSizeInBits() == 8);
5222 assert(
I.getType() ==
A->getType());
5224 Value *AShadow = getShadow(
A);
5225 Value *XShadow = getShadow(
X);
5226 Value *BZeroShadow = getCleanShadow(
B);
5229 I.getType(),
I.getIntrinsicID(), {XShadow, AShadow, BZeroShadow});
5231 {X, AShadow, BZeroShadow});
5233 {XShadow, A, BZeroShadow});
5236 Value *BShadow = getShadow(
B);
5237 Value *BBroadcastShadow = getCleanShadow(AShadow);
5242 for (
unsigned i = 0; i < NumElements; i++)
5246 {AShadowXShadow, AShadowX, XShadowA, BBroadcastShadow}));
5247 setOriginForNaryOp(
I);
5261 void handleNEONVectorLoad(IntrinsicInst &
I,
bool WithLane) {
5262 unsigned int numArgs =
I.arg_size();
5265 assert(
I.getType()->isStructTy());
5275 assert(4 <= numArgs && numArgs <= 6);
5289 for (
unsigned int i = 0; i < numArgs - 2; i++)
5290 ShadowArgs.
push_back(getShadow(
I.getArgOperand(i)));
5293 Value *LaneNumber =
I.getArgOperand(numArgs - 2);
5297 insertCheckShadowOf(LaneNumber, &
I);
5300 Value *Src =
I.getArgOperand(numArgs - 1);
5301 assert(Src->getType()->isPointerTy() &&
"Source is not a pointer!");
5303 Type *SrcShadowTy = getShadowTy(Src);
5304 auto [SrcShadowPtr, SrcOriginPtr] =
5305 getShadowOriginPtr(Src, IRB, SrcShadowTy,
Align(1),
false);
5315 if (!MS.TrackOrigins)
5319 setOrigin(&
I, PtrSrcOrigin);
5336 void handleNEONVectorStoreIntrinsic(IntrinsicInst &
I,
bool useLane) {
5340 int numArgOperands =
I.arg_size();
5343 assert(numArgOperands >= 1);
5344 Value *Addr =
I.getArgOperand(numArgOperands - 1);
5346 int skipTrailingOperands = 1;
5349 insertCheckShadowOf(Addr, &
I);
5353 skipTrailingOperands++;
5354 assert(numArgOperands >=
static_cast<int>(skipTrailingOperands));
5356 I.getArgOperand(numArgOperands - skipTrailingOperands)->getType()));
5359 SmallVector<Value *, 8> ShadowArgs;
5361 for (
int i = 0; i < numArgOperands - skipTrailingOperands; i++) {
5363 Value *Shadow = getShadow(&
I, i);
5364 ShadowArgs.
append(1, Shadow);
5381 (numArgOperands - skipTrailingOperands));
5382 Type *OutputShadowTy = getShadowTy(OutputVectorTy);
5386 I.getArgOperand(numArgOperands - skipTrailingOperands));
5388 Value *OutputShadowPtr, *OutputOriginPtr;
5390 std::tie(OutputShadowPtr, OutputOriginPtr) = getShadowOriginPtr(
5391 Addr, IRB, OutputShadowTy,
Align(1),
true);
5392 ShadowArgs.
append(1, OutputShadowPtr);
5398 if (MS.TrackOrigins) {
5406 OriginCombiner OC(
this, IRB);
5407 for (
int i = 0; i < numArgOperands - skipTrailingOperands; i++)
5408 OC.Add(
I.getArgOperand(i));
5410 const DataLayout &
DL =
F.getDataLayout();
5411 OC.DoneAndStoreOrigin(
DL.getTypeStoreSize(OutputVectorTy),
5444 void handleNEONMatrixMultiply(IntrinsicInst &
I,
unsigned int ARows,
5445 unsigned int ACols,
unsigned int BRows,
5446 unsigned int BCols) {
5450 Value *
R =
I.getArgOperand(0);
5451 Value *
A =
I.getArgOperand(1);
5452 Value *
B =
I.getArgOperand(2);
5454 assert(
I.getType() ==
R->getType());
5480 Value *ShadowR = getShadow(&
I, 0);
5481 Value *ShadowA = getShadow(&
I, 1);
5482 Value *ShadowB = getShadow(&
I, 2);
5493 I.getType(),
I.getIntrinsicID(), {getCleanShadow(R), ShadowA, ShadowB});
5506 setShadow(&
I, IRB.
CreateOr(ShadowAB, ShadowR));
5507 setOriginForNaryOp(
I);
5532 void handleIntrinsicByApplyingToShadow(IntrinsicInst &
I,
5534 unsigned int trailingVerbatimArgs) {
5537 assert(trailingVerbatimArgs <
I.arg_size());
5539 SmallVector<Value *, 8> ShadowArgs;
5541 for (
unsigned int i = 0; i <
I.arg_size() - trailingVerbatimArgs; i++) {
5542 Value *Shadow = getShadow(&
I, i);
5550 for (
unsigned int i =
I.arg_size() - trailingVerbatimArgs; i <
I.arg_size();
5552 Value *Arg =
I.getArgOperand(i);
5558 Value *CombinedShadow = CI;
5561 for (
unsigned int i =
I.arg_size() - trailingVerbatimArgs; i <
I.arg_size();
5564 CreateShadowCast(IRB, getShadow(&
I, i), CombinedShadow->
getType());
5565 CombinedShadow = IRB.
CreateOr(Shadow, CombinedShadow,
"_msprop");
5570 setOriginForNaryOp(
I);
5576 void handleNEONVectorMultiplyIntrinsic(IntrinsicInst &
I) {
5582 bool maybeHandleCrossPlatformIntrinsic(IntrinsicInst &
I) {
5583 switch (
I.getIntrinsicID()) {
5584 case Intrinsic::uadd_with_overflow:
5585 case Intrinsic::sadd_with_overflow:
5586 case Intrinsic::usub_with_overflow:
5587 case Intrinsic::ssub_with_overflow:
5588 case Intrinsic::umul_with_overflow:
5589 case Intrinsic::smul_with_overflow:
5590 handleArithmeticWithOverflow(
I);
5592 case Intrinsic::abs:
5593 handleAbsIntrinsic(
I);
5595 case Intrinsic::bitreverse:
5596 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
5599 case Intrinsic::is_fpclass:
5602 case Intrinsic::lifetime_start:
5603 handleLifetimeStart(
I);
5605 case Intrinsic::launder_invariant_group:
5606 case Intrinsic::strip_invariant_group:
5607 handleInvariantGroup(
I);
5609 case Intrinsic::bswap:
5612 case Intrinsic::ctlz:
5613 case Intrinsic::cttz:
5614 handleCountLeadingTrailingZeros(
I);
5616 case Intrinsic::masked_compressstore:
5617 handleMaskedCompressStore(
I);
5619 case Intrinsic::masked_expandload:
5620 handleMaskedExpandLoad(
I);
5622 case Intrinsic::masked_gather:
5623 handleMaskedGather(
I);
5625 case Intrinsic::masked_scatter:
5626 handleMaskedScatter(
I);
5628 case Intrinsic::masked_store:
5629 handleMaskedStore(
I);
5631 case Intrinsic::masked_load:
5632 handleMaskedLoad(
I);
5634 case Intrinsic::vector_reduce_and:
5635 handleVectorReduceAndIntrinsic(
I);
5637 case Intrinsic::vector_reduce_or:
5638 handleVectorReduceOrIntrinsic(
I);
5641 case Intrinsic::vector_reduce_add:
5642 case Intrinsic::vector_reduce_xor:
5643 case Intrinsic::vector_reduce_mul:
5646 case Intrinsic::vector_reduce_smax:
5647 case Intrinsic::vector_reduce_smin:
5648 case Intrinsic::vector_reduce_umax:
5649 case Intrinsic::vector_reduce_umin:
5652 case Intrinsic::vector_reduce_fmax:
5653 case Intrinsic::vector_reduce_fmin:
5654 handleVectorReduceIntrinsic(
I,
false);
5657 case Intrinsic::vector_reduce_fadd:
5658 case Intrinsic::vector_reduce_fmul:
5659 handleVectorReduceWithStarterIntrinsic(
I);
5662 case Intrinsic::scmp:
5663 case Intrinsic::ucmp: {
5668 case Intrinsic::fshl:
5669 case Intrinsic::fshr:
5670 handleFunnelShift(
I);
5673 case Intrinsic::is_constant:
5675 setShadow(&
I, getCleanShadow(&
I));
5676 setOrigin(&
I, getCleanOrigin());
5686 bool maybeHandleX86SIMDIntrinsic(IntrinsicInst &
I) {
5687 switch (
I.getIntrinsicID()) {
5688 case Intrinsic::x86_sse_stmxcsr:
5691 case Intrinsic::x86_sse_ldmxcsr:
5698 case Intrinsic::x86_avx512_vcvtsd2usi64:
5699 case Intrinsic::x86_avx512_vcvtsd2usi32:
5700 case Intrinsic::x86_avx512_vcvtss2usi64:
5701 case Intrinsic::x86_avx512_vcvtss2usi32:
5702 case Intrinsic::x86_avx512_cvttss2usi64:
5703 case Intrinsic::x86_avx512_cvttss2usi:
5704 case Intrinsic::x86_avx512_cvttsd2usi64:
5705 case Intrinsic::x86_avx512_cvttsd2usi:
5706 case Intrinsic::x86_avx512_cvtusi2ss:
5707 case Intrinsic::x86_avx512_cvtusi642sd:
5708 case Intrinsic::x86_avx512_cvtusi642ss:
5709 handleSSEVectorConvertIntrinsic(
I, 1,
true);
5711 case Intrinsic::x86_sse2_cvtsd2si64:
5712 case Intrinsic::x86_sse2_cvtsd2si:
5713 case Intrinsic::x86_sse2_cvtsd2ss:
5714 case Intrinsic::x86_sse2_cvttsd2si64:
5715 case Intrinsic::x86_sse2_cvttsd2si:
5716 case Intrinsic::x86_sse_cvtss2si64:
5717 case Intrinsic::x86_sse_cvtss2si:
5718 case Intrinsic::x86_sse_cvttss2si64:
5719 case Intrinsic::x86_sse_cvttss2si:
5720 handleSSEVectorConvertIntrinsic(
I, 1);
5722 case Intrinsic::x86_sse_cvtps2pi:
5723 case Intrinsic::x86_sse_cvttps2pi:
5724 handleSSEVectorConvertIntrinsic(
I, 2);
5732 case Intrinsic::x86_vcvtps2ph_128:
5733 case Intrinsic::x86_vcvtps2ph_256: {
5734 handleSSEVectorConvertIntrinsicByProp(
I,
true);
5743 case Intrinsic::x86_avx512_mask_cvtps2dq_512:
5744 handleAVX512VectorConvertFPToInt(
I,
false);
5749 case Intrinsic::x86_sse2_cvtpd2ps:
5750 case Intrinsic::x86_sse2_cvtps2dq:
5751 case Intrinsic::x86_sse2_cvtpd2dq:
5752 case Intrinsic::x86_sse2_cvttps2dq:
5753 case Intrinsic::x86_sse2_cvttpd2dq:
5754 case Intrinsic::x86_avx_cvt_pd2_ps_256:
5755 case Intrinsic::x86_avx_cvt_ps2dq_256:
5756 case Intrinsic::x86_avx_cvt_pd2dq_256:
5757 case Intrinsic::x86_avx_cvtt_ps2dq_256:
5758 case Intrinsic::x86_avx_cvtt_pd2dq_256: {
5759 handleSSEVectorConvertIntrinsicByProp(
I,
false);
5770 case Intrinsic::x86_avx512_mask_vcvtps2ph_512:
5771 case Intrinsic::x86_avx512_mask_vcvtps2ph_256:
5772 case Intrinsic::x86_avx512_mask_vcvtps2ph_128:
5773 handleAVX512VectorConvertFPToInt(
I,
true);
5777 case Intrinsic::x86_avx512_psll_w_512:
5778 case Intrinsic::x86_avx512_psll_d_512:
5779 case Intrinsic::x86_avx512_psll_q_512:
5780 case Intrinsic::x86_avx512_pslli_w_512:
5781 case Intrinsic::x86_avx512_pslli_d_512:
5782 case Intrinsic::x86_avx512_pslli_q_512:
5783 case Intrinsic::x86_avx512_psrl_w_512:
5784 case Intrinsic::x86_avx512_psrl_d_512:
5785 case Intrinsic::x86_avx512_psrl_q_512:
5786 case Intrinsic::x86_avx512_psra_w_512:
5787 case Intrinsic::x86_avx512_psra_d_512:
5788 case Intrinsic::x86_avx512_psra_q_512:
5789 case Intrinsic::x86_avx512_psrli_w_512:
5790 case Intrinsic::x86_avx512_psrli_d_512:
5791 case Intrinsic::x86_avx512_psrli_q_512:
5792 case Intrinsic::x86_avx512_psrai_w_512:
5793 case Intrinsic::x86_avx512_psrai_d_512:
5794 case Intrinsic::x86_avx512_psrai_q_512:
5795 case Intrinsic::x86_avx512_psra_q_256:
5796 case Intrinsic::x86_avx512_psra_q_128:
5797 case Intrinsic::x86_avx512_psrai_q_256:
5798 case Intrinsic::x86_avx512_psrai_q_128:
5799 case Intrinsic::x86_avx2_psll_w:
5800 case Intrinsic::x86_avx2_psll_d:
5801 case Intrinsic::x86_avx2_psll_q:
5802 case Intrinsic::x86_avx2_pslli_w:
5803 case Intrinsic::x86_avx2_pslli_d:
5804 case Intrinsic::x86_avx2_pslli_q:
5805 case Intrinsic::x86_avx2_psrl_w:
5806 case Intrinsic::x86_avx2_psrl_d:
5807 case Intrinsic::x86_avx2_psrl_q:
5808 case Intrinsic::x86_avx2_psra_w:
5809 case Intrinsic::x86_avx2_psra_d:
5810 case Intrinsic::x86_avx2_psrli_w:
5811 case Intrinsic::x86_avx2_psrli_d:
5812 case Intrinsic::x86_avx2_psrli_q:
5813 case Intrinsic::x86_avx2_psrai_w:
5814 case Intrinsic::x86_avx2_psrai_d:
5815 case Intrinsic::x86_sse2_psll_w:
5816 case Intrinsic::x86_sse2_psll_d:
5817 case Intrinsic::x86_sse2_psll_q:
5818 case Intrinsic::x86_sse2_pslli_w:
5819 case Intrinsic::x86_sse2_pslli_d:
5820 case Intrinsic::x86_sse2_pslli_q:
5821 case Intrinsic::x86_sse2_psrl_w:
5822 case Intrinsic::x86_sse2_psrl_d:
5823 case Intrinsic::x86_sse2_psrl_q:
5824 case Intrinsic::x86_sse2_psra_w:
5825 case Intrinsic::x86_sse2_psra_d:
5826 case Intrinsic::x86_sse2_psrli_w:
5827 case Intrinsic::x86_sse2_psrli_d:
5828 case Intrinsic::x86_sse2_psrli_q:
5829 case Intrinsic::x86_sse2_psrai_w:
5830 case Intrinsic::x86_sse2_psrai_d:
5831 case Intrinsic::x86_mmx_psll_w:
5832 case Intrinsic::x86_mmx_psll_d:
5833 case Intrinsic::x86_mmx_psll_q:
5834 case Intrinsic::x86_mmx_pslli_w:
5835 case Intrinsic::x86_mmx_pslli_d:
5836 case Intrinsic::x86_mmx_pslli_q:
5837 case Intrinsic::x86_mmx_psrl_w:
5838 case Intrinsic::x86_mmx_psrl_d:
5839 case Intrinsic::x86_mmx_psrl_q:
5840 case Intrinsic::x86_mmx_psra_w:
5841 case Intrinsic::x86_mmx_psra_d:
5842 case Intrinsic::x86_mmx_psrli_w:
5843 case Intrinsic::x86_mmx_psrli_d:
5844 case Intrinsic::x86_mmx_psrli_q:
5845 case Intrinsic::x86_mmx_psrai_w:
5846 case Intrinsic::x86_mmx_psrai_d:
5847 handleVectorShiftIntrinsic(
I,
false);
5849 case Intrinsic::x86_avx2_psllv_d:
5850 case Intrinsic::x86_avx2_psllv_d_256:
5851 case Intrinsic::x86_avx512_psllv_d_512:
5852 case Intrinsic::x86_avx2_psllv_q:
5853 case Intrinsic::x86_avx2_psllv_q_256:
5854 case Intrinsic::x86_avx512_psllv_q_512:
5855 case Intrinsic::x86_avx2_psrlv_d:
5856 case Intrinsic::x86_avx2_psrlv_d_256:
5857 case Intrinsic::x86_avx512_psrlv_d_512:
5858 case Intrinsic::x86_avx2_psrlv_q:
5859 case Intrinsic::x86_avx2_psrlv_q_256:
5860 case Intrinsic::x86_avx512_psrlv_q_512:
5861 case Intrinsic::x86_avx2_psrav_d:
5862 case Intrinsic::x86_avx2_psrav_d_256:
5863 case Intrinsic::x86_avx512_psrav_d_512:
5864 case Intrinsic::x86_avx512_psrav_q_128:
5865 case Intrinsic::x86_avx512_psrav_q_256:
5866 case Intrinsic::x86_avx512_psrav_q_512:
5867 handleVectorShiftIntrinsic(
I,
true);
5871 case Intrinsic::x86_sse2_packsswb_128:
5872 case Intrinsic::x86_sse2_packssdw_128:
5873 case Intrinsic::x86_sse2_packuswb_128:
5874 case Intrinsic::x86_sse41_packusdw:
5875 case Intrinsic::x86_avx2_packsswb:
5876 case Intrinsic::x86_avx2_packssdw:
5877 case Intrinsic::x86_avx2_packuswb:
5878 case Intrinsic::x86_avx2_packusdw:
5884 case Intrinsic::x86_avx512_packsswb_512:
5885 case Intrinsic::x86_avx512_packssdw_512:
5886 case Intrinsic::x86_avx512_packuswb_512:
5887 case Intrinsic::x86_avx512_packusdw_512:
5888 handleVectorPackIntrinsic(
I);
5891 case Intrinsic::x86_sse41_pblendvb:
5892 case Intrinsic::x86_sse41_blendvpd:
5893 case Intrinsic::x86_sse41_blendvps:
5894 case Intrinsic::x86_avx_blendv_pd_256:
5895 case Intrinsic::x86_avx_blendv_ps_256:
5896 case Intrinsic::x86_avx2_pblendvb:
5897 handleBlendvIntrinsic(
I);
5900 case Intrinsic::x86_avx_dp_ps_256:
5901 case Intrinsic::x86_sse41_dppd:
5902 case Intrinsic::x86_sse41_dpps:
5903 handleDppIntrinsic(
I);
5906 case Intrinsic::x86_mmx_packsswb:
5907 case Intrinsic::x86_mmx_packuswb:
5908 handleVectorPackIntrinsic(
I, 16);
5911 case Intrinsic::x86_mmx_packssdw:
5912 handleVectorPackIntrinsic(
I, 32);
5915 case Intrinsic::x86_mmx_psad_bw:
5916 handleVectorSadIntrinsic(
I,
true);
5918 case Intrinsic::x86_sse2_psad_bw:
5919 case Intrinsic::x86_avx2_psad_bw:
5920 handleVectorSadIntrinsic(
I);
5946 case Intrinsic::x86_sse2_pmadd_wd:
5947 case Intrinsic::x86_avx2_pmadd_wd:
5948 case Intrinsic::x86_avx512_pmaddw_d_512:
5949 case Intrinsic::x86_ssse3_pmadd_ub_sw_128:
5950 case Intrinsic::x86_avx2_pmadd_ub_sw:
5951 case Intrinsic::x86_avx512_pmaddubs_w_512:
5952 handleVectorDotProductIntrinsic(
I, 2,
5959 case Intrinsic::x86_ssse3_pmadd_ub_sw:
5960 handleVectorDotProductIntrinsic(
I, 2,
5967 case Intrinsic::x86_mmx_pmadd_wd:
5968 handleVectorDotProductIntrinsic(
I, 2,
5977 case Intrinsic::aarch64_neon_bfmlalt:
5978 handleVectorDotProductIntrinsic(
I, 2,
5986 case Intrinsic::aarch64_neon_bfmlalb:
5987 handleVectorDotProductIntrinsic(
I, 2,
6085 case Intrinsic::x86_avx512_vpdpbusd_128:
6086 case Intrinsic::x86_avx512_vpdpbusd_256:
6087 case Intrinsic::x86_avx512_vpdpbusd_512:
6088 case Intrinsic::x86_avx512_vpdpbusds_128:
6089 case Intrinsic::x86_avx512_vpdpbusds_256:
6090 case Intrinsic::x86_avx512_vpdpbusds_512:
6091 case Intrinsic::x86_avx2_vpdpbssd_128:
6092 case Intrinsic::x86_avx2_vpdpbssd_256:
6093 case Intrinsic::x86_avx10_vpdpbssd_512:
6094 case Intrinsic::x86_avx2_vpdpbssds_128:
6095 case Intrinsic::x86_avx2_vpdpbssds_256:
6096 case Intrinsic::x86_avx10_vpdpbssds_512:
6097 case Intrinsic::x86_avx2_vpdpbsud_128:
6098 case Intrinsic::x86_avx2_vpdpbsud_256:
6099 case Intrinsic::x86_avx10_vpdpbsud_512:
6100 case Intrinsic::x86_avx2_vpdpbsuds_128:
6101 case Intrinsic::x86_avx2_vpdpbsuds_256:
6102 case Intrinsic::x86_avx10_vpdpbsuds_512:
6103 case Intrinsic::x86_avx2_vpdpbuud_128:
6104 case Intrinsic::x86_avx2_vpdpbuud_256:
6105 case Intrinsic::x86_avx10_vpdpbuud_512:
6106 case Intrinsic::x86_avx2_vpdpbuuds_128:
6107 case Intrinsic::x86_avx2_vpdpbuuds_256:
6108 case Intrinsic::x86_avx10_vpdpbuuds_512:
6109 handleVectorDotProductIntrinsic(
I, 4,
6207 case Intrinsic::x86_avx512_vpdpwssd_128:
6208 case Intrinsic::x86_avx512_vpdpwssd_256:
6209 case Intrinsic::x86_avx512_vpdpwssd_512:
6210 case Intrinsic::x86_avx512_vpdpwssds_128:
6211 case Intrinsic::x86_avx512_vpdpwssds_256:
6212 case Intrinsic::x86_avx512_vpdpwssds_512:
6213 case Intrinsic::x86_avx2_vpdpwsud_128:
6214 case Intrinsic::x86_avx2_vpdpwsud_256:
6215 case Intrinsic::x86_avx10_vpdpwsud_512:
6216 case Intrinsic::x86_avx2_vpdpwsuds_128:
6217 case Intrinsic::x86_avx2_vpdpwsuds_256:
6218 case Intrinsic::x86_avx10_vpdpwsuds_512:
6219 case Intrinsic::x86_avx2_vpdpwusd_128:
6220 case Intrinsic::x86_avx2_vpdpwusd_256:
6221 case Intrinsic::x86_avx10_vpdpwusd_512:
6222 case Intrinsic::x86_avx2_vpdpwusds_128:
6223 case Intrinsic::x86_avx2_vpdpwusds_256:
6224 case Intrinsic::x86_avx10_vpdpwusds_512:
6225 case Intrinsic::x86_avx2_vpdpwuud_128:
6226 case Intrinsic::x86_avx2_vpdpwuud_256:
6227 case Intrinsic::x86_avx10_vpdpwuud_512:
6228 case Intrinsic::x86_avx2_vpdpwuuds_128:
6229 case Intrinsic::x86_avx2_vpdpwuuds_256:
6230 case Intrinsic::x86_avx10_vpdpwuuds_512:
6231 handleVectorDotProductIntrinsic(
I, 2,
6245 case Intrinsic::x86_avx512bf16_dpbf16ps_128:
6246 case Intrinsic::x86_avx512bf16_dpbf16ps_256:
6247 case Intrinsic::x86_avx512bf16_dpbf16ps_512:
6248 handleVectorDotProductIntrinsic(
I, 2,
6254 case Intrinsic::x86_sse_cmp_ss:
6255 case Intrinsic::x86_sse2_cmp_sd:
6256 case Intrinsic::x86_sse_comieq_ss:
6257 case Intrinsic::x86_sse_comilt_ss:
6258 case Intrinsic::x86_sse_comile_ss:
6259 case Intrinsic::x86_sse_comigt_ss:
6260 case Intrinsic::x86_sse_comige_ss:
6261 case Intrinsic::x86_sse_comineq_ss:
6262 case Intrinsic::x86_sse_ucomieq_ss:
6263 case Intrinsic::x86_sse_ucomilt_ss:
6264 case Intrinsic::x86_sse_ucomile_ss:
6265 case Intrinsic::x86_sse_ucomigt_ss:
6266 case Intrinsic::x86_sse_ucomige_ss:
6267 case Intrinsic::x86_sse_ucomineq_ss:
6268 case Intrinsic::x86_sse2_comieq_sd:
6269 case Intrinsic::x86_sse2_comilt_sd:
6270 case Intrinsic::x86_sse2_comile_sd:
6271 case Intrinsic::x86_sse2_comigt_sd:
6272 case Intrinsic::x86_sse2_comige_sd:
6273 case Intrinsic::x86_sse2_comineq_sd:
6274 case Intrinsic::x86_sse2_ucomieq_sd:
6275 case Intrinsic::x86_sse2_ucomilt_sd:
6276 case Intrinsic::x86_sse2_ucomile_sd:
6277 case Intrinsic::x86_sse2_ucomigt_sd:
6278 case Intrinsic::x86_sse2_ucomige_sd:
6279 case Intrinsic::x86_sse2_ucomineq_sd:
6280 handleVectorCompareScalarIntrinsic(
I);
6283 case Intrinsic::x86_avx_cmp_pd_256:
6284 case Intrinsic::x86_avx_cmp_ps_256:
6285 case Intrinsic::x86_sse2_cmp_pd:
6286 case Intrinsic::x86_sse_cmp_ps:
6287 handleVectorComparePackedIntrinsic(
I,
true);
6290 case Intrinsic::x86_bmi_bextr_32:
6291 case Intrinsic::x86_bmi_bextr_64:
6292 case Intrinsic::x86_bmi_bzhi_32:
6293 case Intrinsic::x86_bmi_bzhi_64:
6294 case Intrinsic::x86_bmi_pdep_32:
6295 case Intrinsic::x86_bmi_pdep_64:
6296 case Intrinsic::x86_bmi_pext_32:
6297 case Intrinsic::x86_bmi_pext_64:
6298 handleBmiIntrinsic(
I);
6301 case Intrinsic::x86_pclmulqdq:
6302 case Intrinsic::x86_pclmulqdq_256:
6303 case Intrinsic::x86_pclmulqdq_512:
6304 handlePclmulIntrinsic(
I);
6307 case Intrinsic::x86_avx_round_pd_256:
6308 case Intrinsic::x86_avx_round_ps_256:
6309 case Intrinsic::x86_sse41_round_pd:
6310 case Intrinsic::x86_sse41_round_ps:
6311 handleRoundPdPsIntrinsic(
I);
6314 case Intrinsic::x86_sse41_round_sd:
6315 case Intrinsic::x86_sse41_round_ss:
6316 handleUnarySdSsIntrinsic(
I);
6319 case Intrinsic::x86_sse2_max_sd:
6320 case Intrinsic::x86_sse_max_ss:
6321 case Intrinsic::x86_sse2_min_sd:
6322 case Intrinsic::x86_sse_min_ss:
6323 handleBinarySdSsIntrinsic(
I);
6326 case Intrinsic::x86_avx_vtestc_pd:
6327 case Intrinsic::x86_avx_vtestc_pd_256:
6328 case Intrinsic::x86_avx_vtestc_ps:
6329 case Intrinsic::x86_avx_vtestc_ps_256:
6330 case Intrinsic::x86_avx_vtestnzc_pd:
6331 case Intrinsic::x86_avx_vtestnzc_pd_256:
6332 case Intrinsic::x86_avx_vtestnzc_ps:
6333 case Intrinsic::x86_avx_vtestnzc_ps_256:
6334 case Intrinsic::x86_avx_vtestz_pd:
6335 case Intrinsic::x86_avx_vtestz_pd_256:
6336 case Intrinsic::x86_avx_vtestz_ps:
6337 case Intrinsic::x86_avx_vtestz_ps_256:
6338 case Intrinsic::x86_avx_ptestc_256:
6339 case Intrinsic::x86_avx_ptestnzc_256:
6340 case Intrinsic::x86_avx_ptestz_256:
6341 case Intrinsic::x86_sse41_ptestc:
6342 case Intrinsic::x86_sse41_ptestnzc:
6343 case Intrinsic::x86_sse41_ptestz:
6344 handleVtestIntrinsic(
I);
6348 case Intrinsic::x86_ssse3_phadd_w:
6349 case Intrinsic::x86_ssse3_phadd_w_128:
6350 case Intrinsic::x86_ssse3_phsub_w:
6351 case Intrinsic::x86_ssse3_phsub_w_128:
6352 handlePairwiseShadowOrIntrinsic(
I, 1,
6356 case Intrinsic::x86_avx2_phadd_w:
6357 case Intrinsic::x86_avx2_phsub_w:
6358 handlePairwiseShadowOrIntrinsic(
I, 2,
6363 case Intrinsic::x86_ssse3_phadd_d:
6364 case Intrinsic::x86_ssse3_phadd_d_128:
6365 case Intrinsic::x86_ssse3_phsub_d:
6366 case Intrinsic::x86_ssse3_phsub_d_128:
6367 handlePairwiseShadowOrIntrinsic(
I, 1,
6371 case Intrinsic::x86_avx2_phadd_d:
6372 case Intrinsic::x86_avx2_phsub_d:
6373 handlePairwiseShadowOrIntrinsic(
I, 2,
6378 case Intrinsic::x86_ssse3_phadd_sw:
6379 case Intrinsic::x86_ssse3_phadd_sw_128:
6380 case Intrinsic::x86_ssse3_phsub_sw:
6381 case Intrinsic::x86_ssse3_phsub_sw_128:
6382 handlePairwiseShadowOrIntrinsic(
I, 1,
6386 case Intrinsic::x86_avx2_phadd_sw:
6387 case Intrinsic::x86_avx2_phsub_sw:
6388 handlePairwiseShadowOrIntrinsic(
I, 2,
6393 case Intrinsic::x86_sse3_hadd_ps:
6394 case Intrinsic::x86_sse3_hadd_pd:
6395 case Intrinsic::x86_sse3_hsub_ps:
6396 case Intrinsic::x86_sse3_hsub_pd:
6397 handlePairwiseShadowOrIntrinsic(
I, 1);
6400 case Intrinsic::x86_avx_hadd_pd_256:
6401 case Intrinsic::x86_avx_hadd_ps_256:
6402 case Intrinsic::x86_avx_hsub_pd_256:
6403 case Intrinsic::x86_avx_hsub_ps_256:
6404 handlePairwiseShadowOrIntrinsic(
I, 2);
6407 case Intrinsic::x86_avx_maskstore_ps:
6408 case Intrinsic::x86_avx_maskstore_pd:
6409 case Intrinsic::x86_avx_maskstore_ps_256:
6410 case Intrinsic::x86_avx_maskstore_pd_256:
6411 case Intrinsic::x86_avx2_maskstore_d:
6412 case Intrinsic::x86_avx2_maskstore_q:
6413 case Intrinsic::x86_avx2_maskstore_d_256:
6414 case Intrinsic::x86_avx2_maskstore_q_256: {
6415 handleAVXMaskedStore(
I);
6419 case Intrinsic::x86_avx_maskload_ps:
6420 case Intrinsic::x86_avx_maskload_pd:
6421 case Intrinsic::x86_avx_maskload_ps_256:
6422 case Intrinsic::x86_avx_maskload_pd_256:
6423 case Intrinsic::x86_avx2_maskload_d:
6424 case Intrinsic::x86_avx2_maskload_q:
6425 case Intrinsic::x86_avx2_maskload_d_256:
6426 case Intrinsic::x86_avx2_maskload_q_256: {
6427 handleAVXMaskedLoad(
I);
6432 case Intrinsic::x86_avx512fp16_add_ph_512:
6433 case Intrinsic::x86_avx512fp16_sub_ph_512:
6434 case Intrinsic::x86_avx512fp16_mul_ph_512:
6435 case Intrinsic::x86_avx512fp16_div_ph_512:
6436 case Intrinsic::x86_avx512fp16_max_ph_512:
6437 case Intrinsic::x86_avx512fp16_min_ph_512:
6438 case Intrinsic::x86_avx512_min_ps_512:
6439 case Intrinsic::x86_avx512_min_pd_512:
6440 case Intrinsic::x86_avx512_max_ps_512:
6441 case Intrinsic::x86_avx512_max_pd_512: {
6446 [[maybe_unused]]
bool Success =
6447 maybeHandleSimpleNomemIntrinsic(
I, 1);
6452 case Intrinsic::x86_avx_vpermilvar_pd:
6453 case Intrinsic::x86_avx_vpermilvar_pd_256:
6454 case Intrinsic::x86_avx512_vpermilvar_pd_512:
6455 case Intrinsic::x86_avx_vpermilvar_ps:
6456 case Intrinsic::x86_avx_vpermilvar_ps_256:
6457 case Intrinsic::x86_avx512_vpermilvar_ps_512: {
6458 handleAVXVpermilvar(
I);
6462 case Intrinsic::x86_avx512_vpermi2var_d_128:
6463 case Intrinsic::x86_avx512_vpermi2var_d_256:
6464 case Intrinsic::x86_avx512_vpermi2var_d_512:
6465 case Intrinsic::x86_avx512_vpermi2var_hi_128:
6466 case Intrinsic::x86_avx512_vpermi2var_hi_256:
6467 case Intrinsic::x86_avx512_vpermi2var_hi_512:
6468 case Intrinsic::x86_avx512_vpermi2var_pd_128:
6469 case Intrinsic::x86_avx512_vpermi2var_pd_256:
6470 case Intrinsic::x86_avx512_vpermi2var_pd_512:
6471 case Intrinsic::x86_avx512_vpermi2var_ps_128:
6472 case Intrinsic::x86_avx512_vpermi2var_ps_256:
6473 case Intrinsic::x86_avx512_vpermi2var_ps_512:
6474 case Intrinsic::x86_avx512_vpermi2var_q_128:
6475 case Intrinsic::x86_avx512_vpermi2var_q_256:
6476 case Intrinsic::x86_avx512_vpermi2var_q_512:
6477 case Intrinsic::x86_avx512_vpermi2var_qi_128:
6478 case Intrinsic::x86_avx512_vpermi2var_qi_256:
6479 case Intrinsic::x86_avx512_vpermi2var_qi_512:
6480 handleAVXVpermi2var(
I);
6494 case Intrinsic::x86_avx2_pshuf_b:
6495 case Intrinsic::x86_sse_pshuf_w:
6496 case Intrinsic::x86_ssse3_pshuf_b_128:
6497 case Intrinsic::x86_ssse3_pshuf_b:
6498 case Intrinsic::x86_avx512_pshuf_b_512:
6499 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
6505 case Intrinsic::x86_avx512_mask_pmov_dw_512:
6506 case Intrinsic::x86_avx512_mask_pmov_db_512:
6507 case Intrinsic::x86_avx512_mask_pmov_qb_512:
6508 case Intrinsic::x86_avx512_mask_pmov_qw_512: {
6511 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
6519 case Intrinsic::x86_avx512_mask_pmovs_dw_512:
6520 case Intrinsic::x86_avx512_mask_pmovus_dw_512: {
6521 handleIntrinsicByApplyingToShadow(
I,
6522 Intrinsic::x86_avx512_mask_pmov_dw_512,
6527 case Intrinsic::x86_avx512_mask_pmovs_db_512:
6528 case Intrinsic::x86_avx512_mask_pmovus_db_512: {
6529 handleIntrinsicByApplyingToShadow(
I,
6530 Intrinsic::x86_avx512_mask_pmov_db_512,
6535 case Intrinsic::x86_avx512_mask_pmovs_qb_512:
6536 case Intrinsic::x86_avx512_mask_pmovus_qb_512: {
6537 handleIntrinsicByApplyingToShadow(
I,
6538 Intrinsic::x86_avx512_mask_pmov_qb_512,
6543 case Intrinsic::x86_avx512_mask_pmovs_qw_512:
6544 case Intrinsic::x86_avx512_mask_pmovus_qw_512: {
6545 handleIntrinsicByApplyingToShadow(
I,
6546 Intrinsic::x86_avx512_mask_pmov_qw_512,
6551 case Intrinsic::x86_avx512_mask_pmovs_qd_512:
6552 case Intrinsic::x86_avx512_mask_pmovus_qd_512:
6553 case Intrinsic::x86_avx512_mask_pmovs_wb_512:
6554 case Intrinsic::x86_avx512_mask_pmovus_wb_512: {
6558 handleAVX512VectorDownConvert(
I);
6598 case Intrinsic::x86_avx512_rsqrt14_ps_512:
6599 case Intrinsic::x86_avx512_rsqrt14_ps_256:
6600 case Intrinsic::x86_avx512_rsqrt14_ps_128:
6601 case Intrinsic::x86_avx512_rsqrt14_pd_512:
6602 case Intrinsic::x86_avx512_rsqrt14_pd_256:
6603 case Intrinsic::x86_avx512_rsqrt14_pd_128:
6604 case Intrinsic::x86_avx10_mask_rsqrt_bf16_512:
6605 case Intrinsic::x86_avx10_mask_rsqrt_bf16_256:
6606 case Intrinsic::x86_avx10_mask_rsqrt_bf16_128:
6607 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_512:
6608 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_256:
6609 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_128:
6610 handleAVX512VectorGenericMaskedFP(
I, 0, 1,
6650 case Intrinsic::x86_avx512_rcp14_ps_512:
6651 case Intrinsic::x86_avx512_rcp14_ps_256:
6652 case Intrinsic::x86_avx512_rcp14_ps_128:
6653 case Intrinsic::x86_avx512_rcp14_pd_512:
6654 case Intrinsic::x86_avx512_rcp14_pd_256:
6655 case Intrinsic::x86_avx512_rcp14_pd_128:
6656 case Intrinsic::x86_avx10_mask_rcp_bf16_512:
6657 case Intrinsic::x86_avx10_mask_rcp_bf16_256:
6658 case Intrinsic::x86_avx10_mask_rcp_bf16_128:
6659 case Intrinsic::x86_avx512fp16_mask_rcp_ph_512:
6660 case Intrinsic::x86_avx512fp16_mask_rcp_ph_256:
6661 case Intrinsic::x86_avx512fp16_mask_rcp_ph_128:
6662 handleAVX512VectorGenericMaskedFP(
I, 0, 1,
6706 case Intrinsic::x86_avx512fp16_mask_rndscale_ph_512:
6707 case Intrinsic::x86_avx512fp16_mask_rndscale_ph_256:
6708 case Intrinsic::x86_avx512fp16_mask_rndscale_ph_128:
6709 case Intrinsic::x86_avx512_mask_rndscale_ps_512:
6710 case Intrinsic::x86_avx512_mask_rndscale_ps_256:
6711 case Intrinsic::x86_avx512_mask_rndscale_ps_128:
6712 case Intrinsic::x86_avx512_mask_rndscale_pd_512:
6713 case Intrinsic::x86_avx512_mask_rndscale_pd_256:
6714 case Intrinsic::x86_avx512_mask_rndscale_pd_128:
6715 case Intrinsic::x86_avx10_mask_rndscale_bf16_512:
6716 case Intrinsic::x86_avx10_mask_rndscale_bf16_256:
6717 case Intrinsic::x86_avx10_mask_rndscale_bf16_128:
6718 handleAVX512VectorGenericMaskedFP(
I, 0, 2,
6723 case Intrinsic::x86_avx512fp16_mask_add_sh_round:
6724 case Intrinsic::x86_avx512fp16_mask_sub_sh_round:
6725 case Intrinsic::x86_avx512fp16_mask_mul_sh_round:
6726 case Intrinsic::x86_avx512fp16_mask_div_sh_round:
6727 case Intrinsic::x86_avx512fp16_mask_max_sh_round:
6728 case Intrinsic::x86_avx512fp16_mask_min_sh_round: {
6729 visitGenericScalarHalfwordInst(
I);
6734 case Intrinsic::x86_vgf2p8affineqb_128:
6735 case Intrinsic::x86_vgf2p8affineqb_256:
6736 case Intrinsic::x86_vgf2p8affineqb_512:
6737 handleAVXGF2P8Affine(
I);
6747 bool maybeHandleArmSIMDIntrinsic(IntrinsicInst &
I) {
6748 switch (
I.getIntrinsicID()) {
6752 case Intrinsic::aarch64_neon_rshrn:
6753 case Intrinsic::aarch64_neon_sqrshl:
6754 case Intrinsic::aarch64_neon_sqrshrn:
6755 case Intrinsic::aarch64_neon_sqrshrun:
6756 case Intrinsic::aarch64_neon_sqshl:
6757 case Intrinsic::aarch64_neon_sqshlu:
6758 case Intrinsic::aarch64_neon_sqshrn:
6759 case Intrinsic::aarch64_neon_sqshrun:
6760 case Intrinsic::aarch64_neon_srshl:
6761 case Intrinsic::aarch64_neon_sshl:
6762 case Intrinsic::aarch64_neon_uqrshl:
6763 case Intrinsic::aarch64_neon_uqrshrn:
6764 case Intrinsic::aarch64_neon_uqshl:
6765 case Intrinsic::aarch64_neon_uqshrn:
6766 case Intrinsic::aarch64_neon_urshl:
6767 case Intrinsic::aarch64_neon_ushl:
6768 handleVectorShiftIntrinsic(
I,
false);
6781 case Intrinsic::aarch64_neon_vsli:
6782 case Intrinsic::aarch64_neon_vsri:
6783 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
6789 case Intrinsic::aarch64_neon_fmaxp:
6790 case Intrinsic::aarch64_neon_fminp:
6792 case Intrinsic::aarch64_neon_fmaxnmp:
6793 case Intrinsic::aarch64_neon_fminnmp:
6795 case Intrinsic::aarch64_neon_smaxp:
6796 case Intrinsic::aarch64_neon_sminp:
6797 case Intrinsic::aarch64_neon_umaxp:
6798 case Intrinsic::aarch64_neon_uminp:
6800 case Intrinsic::aarch64_neon_addp:
6802 case Intrinsic::aarch64_neon_faddp:
6804 case Intrinsic::aarch64_neon_saddlp:
6805 case Intrinsic::aarch64_neon_uaddlp: {
6806 handlePairwiseShadowOrIntrinsic(
I, 1);
6811 case Intrinsic::aarch64_neon_fcvtas:
6812 case Intrinsic::aarch64_neon_fcvtau:
6814 case Intrinsic::aarch64_neon_fcvtms:
6815 case Intrinsic::aarch64_neon_fcvtmu:
6817 case Intrinsic::aarch64_neon_fcvtns:
6818 case Intrinsic::aarch64_neon_fcvtnu:
6820 case Intrinsic::aarch64_neon_fcvtps:
6821 case Intrinsic::aarch64_neon_fcvtpu:
6823 case Intrinsic::aarch64_neon_fcvtzs:
6824 case Intrinsic::aarch64_neon_fcvtzu:
6826 case Intrinsic::aarch64_neon_fcvtxn:
6828 case Intrinsic::aarch64_neon_vcvthf2fp:
6829 case Intrinsic::aarch64_neon_vcvtfp2hf:
6830 handleNEONVectorConvertIntrinsic(
I,
false);
6834 case Intrinsic::aarch64_neon_vcvtfxs2fp:
6835 case Intrinsic::aarch64_neon_vcvtfp2fxs:
6836 case Intrinsic::aarch64_neon_vcvtfxu2fp:
6837 case Intrinsic::aarch64_neon_vcvtfp2fxu:
6838 handleNEONVectorConvertIntrinsic(
I,
true);
6847 case Intrinsic::aarch64_neon_faddv:
6848 case Intrinsic::aarch64_neon_saddv:
6849 case Intrinsic::aarch64_neon_uaddv:
6852 case Intrinsic::aarch64_neon_smaxv:
6853 case Intrinsic::aarch64_neon_sminv:
6854 case Intrinsic::aarch64_neon_umaxv:
6855 case Intrinsic::aarch64_neon_uminv:
6859 case Intrinsic::aarch64_neon_fmaxv:
6860 case Intrinsic::aarch64_neon_fminv:
6861 case Intrinsic::aarch64_neon_fmaxnmv:
6862 case Intrinsic::aarch64_neon_fminnmv:
6864 case Intrinsic::aarch64_neon_saddlv:
6865 case Intrinsic::aarch64_neon_uaddlv:
6866 handleVectorReduceIntrinsic(
I,
true);
6869 case Intrinsic::aarch64_neon_ld1x2:
6870 case Intrinsic::aarch64_neon_ld1x3:
6871 case Intrinsic::aarch64_neon_ld1x4:
6872 case Intrinsic::aarch64_neon_ld2:
6873 case Intrinsic::aarch64_neon_ld3:
6874 case Intrinsic::aarch64_neon_ld4:
6875 case Intrinsic::aarch64_neon_ld2r:
6876 case Intrinsic::aarch64_neon_ld3r:
6877 case Intrinsic::aarch64_neon_ld4r: {
6878 handleNEONVectorLoad(
I,
false);
6882 case Intrinsic::aarch64_neon_ld2lane:
6883 case Intrinsic::aarch64_neon_ld3lane:
6884 case Intrinsic::aarch64_neon_ld4lane: {
6885 handleNEONVectorLoad(
I,
true);
6890 case Intrinsic::aarch64_neon_sqxtn:
6891 case Intrinsic::aarch64_neon_sqxtun:
6892 case Intrinsic::aarch64_neon_uqxtn:
6899 case Intrinsic::aarch64_neon_st1x2:
6900 case Intrinsic::aarch64_neon_st1x3:
6901 case Intrinsic::aarch64_neon_st1x4:
6902 case Intrinsic::aarch64_neon_st2:
6903 case Intrinsic::aarch64_neon_st3:
6904 case Intrinsic::aarch64_neon_st4: {
6905 handleNEONVectorStoreIntrinsic(
I,
false);
6909 case Intrinsic::aarch64_neon_st2lane:
6910 case Intrinsic::aarch64_neon_st3lane:
6911 case Intrinsic::aarch64_neon_st4lane: {
6912 handleNEONVectorStoreIntrinsic(
I,
true);
6925 case Intrinsic::aarch64_neon_tbl1:
6926 case Intrinsic::aarch64_neon_tbl2:
6927 case Intrinsic::aarch64_neon_tbl3:
6928 case Intrinsic::aarch64_neon_tbl4:
6929 case Intrinsic::aarch64_neon_tbx1:
6930 case Intrinsic::aarch64_neon_tbx2:
6931 case Intrinsic::aarch64_neon_tbx3:
6932 case Intrinsic::aarch64_neon_tbx4: {
6934 handleIntrinsicByApplyingToShadow(
6935 I,
I.getIntrinsicID(),
6940 case Intrinsic::aarch64_neon_fmulx:
6941 case Intrinsic::aarch64_neon_pmul:
6942 case Intrinsic::aarch64_neon_pmull:
6943 case Intrinsic::aarch64_neon_smull:
6944 case Intrinsic::aarch64_neon_pmull64:
6945 case Intrinsic::aarch64_neon_umull: {
6946 handleNEONVectorMultiplyIntrinsic(
I);
6950 case Intrinsic::aarch64_neon_smmla:
6951 case Intrinsic::aarch64_neon_ummla:
6952 case Intrinsic::aarch64_neon_usmmla:
6953 handleNEONMatrixMultiply(
I, 2, 8, 8,
6961 case Intrinsic::aarch64_neon_sdot:
6962 case Intrinsic::aarch64_neon_udot:
6963 case Intrinsic::aarch64_neon_usdot:
6964 handleVectorDotProductIntrinsic(
I, 4,
6974 case Intrinsic::aarch64_neon_bfdot:
6975 handleVectorDotProductIntrinsic(
I, 2,
6988 void visitIntrinsicInst(IntrinsicInst &
I) {
6989 if (maybeHandleCrossPlatformIntrinsic(
I))
6992 if (maybeHandleX86SIMDIntrinsic(
I))
6995 if (maybeHandleArmSIMDIntrinsic(
I))
6998 if (maybeHandleUnknownIntrinsic(
I))
7001 visitInstruction(
I);
7004 void visitLibAtomicLoad(CallBase &CB) {
7015 Value *NewOrdering =
7019 NextNodeIRBuilder NextIRB(&CB);
7020 Value *SrcShadowPtr, *SrcOriginPtr;
7021 std::tie(SrcShadowPtr, SrcOriginPtr) =
7022 getShadowOriginPtr(SrcPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
7024 Value *DstShadowPtr =
7025 getShadowOriginPtr(DstPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
7029 NextIRB.CreateMemCpy(DstShadowPtr,
Align(1), SrcShadowPtr,
Align(1),
Size);
7030 if (MS.TrackOrigins) {
7031 Value *SrcOrigin = NextIRB.CreateAlignedLoad(MS.OriginTy, SrcOriginPtr,
7033 Value *NewOrigin = updateOrigin(SrcOrigin, NextIRB);
7034 NextIRB.CreateCall(MS.MsanSetOriginFn, {DstPtr, Size, NewOrigin});
7038 void visitLibAtomicStore(CallBase &CB) {
7045 Value *NewOrdering =
7049 Value *DstShadowPtr =
7059 void visitCallBase(CallBase &CB) {
7067 visitAsmInstruction(CB);
7069 visitInstruction(CB);
7078 case LibFunc_atomic_load:
7080 llvm::errs() <<
"MSAN -- cannot instrument invoke of libatomic load."
7084 visitLibAtomicLoad(CB);
7086 case LibFunc_atomic_store:
7087 visitLibAtomicStore(CB);
7103 B.addAttribute(Attribute::Memory).addAttribute(Attribute::Speculatable);
7107 Func->removeFnAttrs(
B);
7113 bool MayCheckCall = MS.EagerChecks;
7117 MayCheckCall &= !
Func->getName().starts_with(
"__sanitizer_unaligned_");
7120 unsigned ArgOffset = 0;
7123 if (!
A->getType()->isSized()) {
7124 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is not sized: " << CB <<
"\n");
7128 if (
A->getType()->isScalableTy()) {
7129 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is vscale: " << CB <<
"\n");
7131 insertCheckShadowOf(
A, &CB);
7136 const DataLayout &
DL =
F.getDataLayout();
7140 bool EagerCheck = MayCheckCall && !ByVal && NoUndef;
7143 insertCheckShadowOf(
A, &CB);
7144 Size =
DL.getTypeAllocSize(
A->getType());
7150 Value *ArgShadow = getShadow(
A);
7151 Value *ArgShadowBase = getShadowPtrForArgument(IRB, ArgOffset);
7153 <<
" Shadow: " << *ArgShadow <<
"\n");
7157 assert(
A->getType()->isPointerTy() &&
7158 "ByVal argument is not a pointer!");
7163 MaybeAlign Alignment = std::nullopt;
7166 Value *AShadowPtr, *AOriginPtr;
7167 std::tie(AShadowPtr, AOriginPtr) =
7168 getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(), Alignment,
7170 if (!PropagateShadow) {
7177 if (MS.TrackOrigins) {
7178 Value *ArgOriginBase = getOriginPtrForArgument(IRB, ArgOffset);
7192 Size =
DL.getTypeAllocSize(
A->getType());
7198 if (MS.TrackOrigins && !(Cst && Cst->
isNullValue())) {
7200 getOriginPtrForArgument(IRB, ArgOffset));
7203 assert(Store !=
nullptr);
7212 if (FT->isVarArg()) {
7213 VAHelper->visitCallBase(CB, IRB);
7223 if (MayCheckCall && CB.
hasRetAttr(Attribute::NoUndef)) {
7224 setShadow(&CB, getCleanShadow(&CB));
7225 setOrigin(&CB, getCleanOrigin());
7231 Value *
Base = getShadowPtrForRetval(IRBBefore);
7232 IRBBefore.CreateAlignedStore(getCleanShadow(&CB),
Base,
7244 setShadow(&CB, getCleanShadow(&CB));
7245 setOrigin(&CB, getCleanOrigin());
7252 "Could not find insertion point for retval shadow load");
7255 Value *RetvalShadow = IRBAfter.CreateAlignedLoad(
7258 setShadow(&CB, RetvalShadow);
7259 if (MS.TrackOrigins)
7260 setOrigin(&CB, IRBAfter.CreateLoad(MS.OriginTy, getOriginPtrForRetval()));
7265 RetVal =
I->getOperand(0);
7268 return I->isMustTailCall();
7273 void visitReturnInst(ReturnInst &
I) {
7275 Value *RetVal =
I.getReturnValue();
7281 Value *ShadowPtr = getShadowPtrForRetval(IRB);
7282 bool HasNoUndef =
F.hasRetAttribute(Attribute::NoUndef);
7283 bool StoreShadow = !(MS.EagerChecks && HasNoUndef);
7286 bool EagerCheck = (MS.EagerChecks && HasNoUndef) || (
F.getName() ==
"main");
7288 Value *Shadow = getShadow(RetVal);
7289 bool StoreOrigin =
true;
7291 insertCheckShadowOf(RetVal, &
I);
7292 Shadow = getCleanShadow(RetVal);
7293 StoreOrigin =
false;
7300 if (MS.TrackOrigins && StoreOrigin)
7301 IRB.
CreateStore(getOrigin(RetVal), getOriginPtrForRetval());
7305 void visitPHINode(PHINode &
I) {
7307 if (!PropagateShadow) {
7308 setShadow(&
I, getCleanShadow(&
I));
7309 setOrigin(&
I, getCleanOrigin());
7313 ShadowPHINodes.push_back(&
I);
7314 setShadow(&
I, IRB.
CreatePHI(getShadowTy(&
I),
I.getNumIncomingValues(),
7316 if (MS.TrackOrigins)
7318 &
I, IRB.
CreatePHI(MS.OriginTy,
I.getNumIncomingValues(),
"_msphi_o"));
7321 Value *getLocalVarIdptr(AllocaInst &
I) {
7322 ConstantInt *IntConst =
7323 ConstantInt::get(Type::getInt32Ty((*
F.getParent()).getContext()), 0);
7324 return new GlobalVariable(*
F.getParent(), IntConst->
getType(),
7329 Value *getLocalVarDescription(AllocaInst &
I) {
7335 IRB.
CreateCall(MS.MsanPoisonStackFn, {&I, Len});
7337 Value *ShadowBase, *OriginBase;
7338 std::tie(ShadowBase, OriginBase) = getShadowOriginPtr(
7342 IRB.
CreateMemSet(ShadowBase, PoisonValue, Len,
I.getAlign());
7345 if (PoisonStack && MS.TrackOrigins) {
7346 Value *Idptr = getLocalVarIdptr(
I);
7348 Value *Descr = getLocalVarDescription(
I);
7349 IRB.
CreateCall(MS.MsanSetAllocaOriginWithDescriptionFn,
7350 {&I, Len, Idptr, Descr});
7352 IRB.
CreateCall(MS.MsanSetAllocaOriginNoDescriptionFn, {&I, Len, Idptr});
7358 Value *Descr = getLocalVarDescription(
I);
7360 IRB.
CreateCall(MS.MsanPoisonAllocaFn, {&I, Len, Descr});
7362 IRB.
CreateCall(MS.MsanUnpoisonAllocaFn, {&I, Len});
7366 void instrumentAlloca(AllocaInst &
I, Instruction *InsPoint =
nullptr) {
7369 NextNodeIRBuilder IRB(InsPoint);
7372 if (MS.CompileKernel)
7373 poisonAllocaKmsan(
I, IRB, Len);
7375 poisonAllocaUserspace(
I, IRB, Len);
7378 void visitAllocaInst(AllocaInst &
I) {
7379 setShadow(&
I, getCleanShadow(&
I));
7380 setOrigin(&
I, getCleanOrigin());
7386 void visitSelectInst(SelectInst &
I) {
7392 handleSelectLikeInst(
I,
B,
C,
D);
7398 Value *Sb = getShadow(
B);
7399 Value *Sc = getShadow(
C);
7400 Value *Sd = getShadow(
D);
7402 Value *Ob = MS.TrackOrigins ? getOrigin(
B) : nullptr;
7403 Value *Oc = MS.TrackOrigins ? getOrigin(
C) : nullptr;
7404 Value *Od = MS.TrackOrigins ? getOrigin(
D) : nullptr;
7409 if (
I.getType()->isAggregateType()) {
7413 Sa1 = getPoisonedShadow(getShadowTy(
I.getType()));
7414 }
else if (isScalableNonVectorType(
I.getType())) {
7422 Sa1 = getCleanShadow(getShadowTy(
I.getType()));
7430 C = CreateAppToShadowCast(IRB,
C);
7431 D = CreateAppToShadowCast(IRB,
D);
7438 if (MS.TrackOrigins) {
7441 if (
B->getType()->isVectorTy()) {
7442 B = convertToBool(
B, IRB);
7443 Sb = convertToBool(Sb, IRB);
7451 void visitLandingPadInst(LandingPadInst &
I) {
7454 setShadow(&
I, getCleanShadow(&
I));
7455 setOrigin(&
I, getCleanOrigin());
7458 void visitCatchSwitchInst(CatchSwitchInst &
I) {
7459 setShadow(&
I, getCleanShadow(&
I));
7460 setOrigin(&
I, getCleanOrigin());
7463 void visitFuncletPadInst(FuncletPadInst &
I) {
7464 setShadow(&
I, getCleanShadow(&
I));
7465 setOrigin(&
I, getCleanOrigin());
7468 void visitGetElementPtrInst(GetElementPtrInst &
I) { handleShadowOr(
I); }
7470 void visitExtractValueInst(ExtractValueInst &
I) {
7472 Value *Agg =
I.getAggregateOperand();
7474 Value *AggShadow = getShadow(Agg);
7478 setShadow(&
I, ResShadow);
7479 setOriginForNaryOp(
I);
7482 void visitInsertValueInst(InsertValueInst &
I) {
7485 Value *AggShadow = getShadow(
I.getAggregateOperand());
7486 Value *InsShadow = getShadow(
I.getInsertedValueOperand());
7492 setOriginForNaryOp(
I);
7495 void dumpInst(Instruction &
I) {
7499 errs() <<
"ZZZ " <<
I.getOpcodeName() <<
"\n";
7501 errs() <<
"QQQ " <<
I <<
"\n";
7504 void visitResumeInst(ResumeInst &
I) {
7509 void visitCleanupReturnInst(CleanupReturnInst &CRI) {
7514 void visitCatchReturnInst(CatchReturnInst &CRI) {
7519 void instrumentAsmArgument(
Value *Operand,
Type *ElemTy, Instruction &
I,
7528 insertCheckShadowOf(Operand, &
I);
7535 auto Size =
DL.getTypeStoreSize(ElemTy);
7537 if (MS.CompileKernel) {
7538 IRB.
CreateCall(MS.MsanInstrumentAsmStoreFn, {Operand, SizeVal});
7544 auto [ShadowPtr,
_] =
7545 getShadowOriginPtrUserspace(Operand, IRB, IRB.
getInt8Ty(),
Align(1));
7555 int getNumOutputArgs(InlineAsm *IA, CallBase *CB) {
7556 int NumRetOutputs = 0;
7563 NumRetOutputs =
ST->getNumElements();
7568 for (
const InlineAsm::ConstraintInfo &Info : Constraints) {
7569 switch (
Info.Type) {
7577 return NumOutputs - NumRetOutputs;
7580 void visitAsmInstruction(Instruction &
I) {
7596 const DataLayout &
DL =
F.getDataLayout();
7600 int OutputArgs = getNumOutputArgs(IA, CB);
7606 for (
int i = OutputArgs; i < NumOperands; i++) {
7614 for (
int i = 0; i < OutputArgs; i++) {
7620 setShadow(&
I, getCleanShadow(&
I));
7621 setOrigin(&
I, getCleanOrigin());
7624 void visitFreezeInst(FreezeInst &
I) {
7626 setShadow(&
I, getCleanShadow(&
I));
7627 setOrigin(&
I, getCleanOrigin());
7630 void visitInstruction(Instruction &
I) {
7635 for (
size_t i = 0, n =
I.getNumOperands(); i < n; i++) {
7636 Value *Operand =
I.getOperand(i);
7638 insertCheckShadowOf(Operand, &
I);
7640 setShadow(&
I, getCleanShadow(&
I));
7641 setOrigin(&
I, getCleanOrigin());
7645struct VarArgHelperBase :
public VarArgHelper {
7647 MemorySanitizer &MS;
7648 MemorySanitizerVisitor &MSV;
7650 const unsigned VAListTagSize;
7652 VarArgHelperBase(Function &
F, MemorySanitizer &MS,
7653 MemorySanitizerVisitor &MSV,
unsigned VAListTagSize)
7654 :
F(
F), MS(MS), MSV(MSV), VAListTagSize(VAListTagSize) {}
7658 return IRB.
CreateAdd(
Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
7664 MS.VAArgTLS, ConstantInt::get(MS.IntptrTy, ArgOffset),
"_msarg_va_s");
7673 return getShadowPtrForVAArgument(IRB, ArgOffset);
7682 ConstantInt::get(MS.IntptrTy, ArgOffset),
7687 unsigned BaseOffset) {
7696 TailSize,
Align(8));
7699 void unpoisonVAListTagForInst(IntrinsicInst &
I) {
7701 Value *VAListTag =
I.getArgOperand(0);
7703 auto [ShadowPtr, OriginPtr] = MSV.getShadowOriginPtr(
7704 VAListTag, IRB, IRB.
getInt8Ty(), Alignment,
true);
7707 VAListTagSize, Alignment,
false);
7710 void visitVAStartInst(VAStartInst &
I)
override {
7711 if (
F.getCallingConv() == CallingConv::Win64)
7714 unpoisonVAListTagForInst(
I);
7717 void visitVACopyInst(VACopyInst &
I)
override {
7718 if (
F.getCallingConv() == CallingConv::Win64)
7720 unpoisonVAListTagForInst(
I);
7725struct VarArgAMD64Helper :
public VarArgHelperBase {
7728 static const unsigned AMD64GpEndOffset = 48;
7729 static const unsigned AMD64FpEndOffsetSSE = 176;
7731 static const unsigned AMD64FpEndOffsetNoSSE = AMD64GpEndOffset;
7733 unsigned AMD64FpEndOffset;
7734 AllocaInst *VAArgTLSCopy =
nullptr;
7735 AllocaInst *VAArgTLSOriginCopy =
nullptr;
7736 Value *VAArgOverflowSize =
nullptr;
7738 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
7740 VarArgAMD64Helper(Function &
F, MemorySanitizer &MS,
7741 MemorySanitizerVisitor &MSV)
7742 : VarArgHelperBase(
F, MS, MSV, 24) {
7743 AMD64FpEndOffset = AMD64FpEndOffsetSSE;
7744 for (
const auto &Attr :
F.getAttributes().getFnAttrs()) {
7745 if (Attr.isStringAttribute() &&
7746 (Attr.getKindAsString() ==
"target-features")) {
7747 if (Attr.getValueAsString().contains(
"-sse"))
7748 AMD64FpEndOffset = AMD64FpEndOffsetNoSSE;
7754 ArgKind classifyArgument(
Value *arg) {
7757 if (
T->isX86_FP80Ty())
7759 if (
T->isFPOrFPVectorTy())
7760 return AK_FloatingPoint;
7761 if (
T->isIntegerTy() &&
T->getPrimitiveSizeInBits() <= 64)
7762 return AK_GeneralPurpose;
7763 if (
T->isPointerTy())
7764 return AK_GeneralPurpose;
7776 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7777 unsigned GpOffset = 0;
7778 unsigned FpOffset = AMD64GpEndOffset;
7779 unsigned OverflowOffset = AMD64FpEndOffset;
7780 const DataLayout &
DL =
F.getDataLayout();
7784 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
7791 assert(
A->getType()->isPointerTy());
7793 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
7794 uint64_t AlignedSize =
alignTo(ArgSize, 8);
7795 unsigned BaseOffset = OverflowOffset;
7796 Value *ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
7797 Value *OriginBase =
nullptr;
7798 if (MS.TrackOrigins)
7799 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
7800 OverflowOffset += AlignedSize;
7803 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
7807 Value *ShadowPtr, *OriginPtr;
7808 std::tie(ShadowPtr, OriginPtr) =
7813 if (MS.TrackOrigins)
7817 ArgKind AK = classifyArgument(
A);
7818 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
7820 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
7822 Value *ShadowBase, *OriginBase =
nullptr;
7824 case AK_GeneralPurpose:
7825 ShadowBase = getShadowPtrForVAArgument(IRB, GpOffset);
7826 if (MS.TrackOrigins)
7827 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset);
7831 case AK_FloatingPoint:
7832 ShadowBase = getShadowPtrForVAArgument(IRB, FpOffset);
7833 if (MS.TrackOrigins)
7834 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
7841 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
7842 uint64_t AlignedSize =
alignTo(ArgSize, 8);
7843 unsigned BaseOffset = OverflowOffset;
7844 ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
7845 if (MS.TrackOrigins) {
7846 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
7848 OverflowOffset += AlignedSize;
7851 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
7860 Value *Shadow = MSV.getShadow(
A);
7862 if (MS.TrackOrigins) {
7863 Value *Origin = MSV.getOrigin(
A);
7864 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
7865 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
7871 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
7872 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
7875 void finalizeInstrumentation()
override {
7876 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
7877 "finalizeInstrumentation called twice");
7878 if (!VAStartInstrumentationList.
empty()) {
7885 ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), VAArgOverflowSize);
7886 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7892 Intrinsic::umin, CopySize,
7896 if (MS.TrackOrigins) {
7897 VAArgTLSOriginCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7906 for (CallInst *OrigInst : VAStartInstrumentationList) {
7907 NextNodeIRBuilder IRB(OrigInst);
7908 Value *VAListTag = OrigInst->getArgOperand(0);
7910 Value *RegSaveAreaPtrPtr =
7911 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, 16));
7913 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
7915 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
7916 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7918 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
7920 if (MS.TrackOrigins)
7921 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
7922 Alignment, AMD64FpEndOffset);
7923 Value *OverflowArgAreaPtrPtr =
7924 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, 8));
7925 Value *OverflowArgAreaPtr =
7926 IRB.
CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
7927 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
7928 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
7929 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
7933 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
7935 if (MS.TrackOrigins) {
7938 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
7946struct VarArgAArch64Helper :
public VarArgHelperBase {
7947 static const unsigned kAArch64GrArgSize = 64;
7948 static const unsigned kAArch64VrArgSize = 128;
7950 static const unsigned AArch64GrBegOffset = 0;
7951 static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
7953 static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
7954 static const unsigned AArch64VrEndOffset =
7955 AArch64VrBegOffset + kAArch64VrArgSize;
7956 static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
7958 AllocaInst *VAArgTLSCopy =
nullptr;
7959 Value *VAArgOverflowSize =
nullptr;
7961 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
7963 VarArgAArch64Helper(Function &
F, MemorySanitizer &MS,
7964 MemorySanitizerVisitor &MSV)
7965 : VarArgHelperBase(
F, MS, MSV, 32) {}
7968 std::pair<ArgKind, uint64_t> classifyArgument(
Type *
T) {
7969 if (
T->isIntOrPtrTy() &&
T->getPrimitiveSizeInBits() <= 64)
7970 return {AK_GeneralPurpose, 1};
7971 if (
T->isFloatingPointTy() &&
T->getPrimitiveSizeInBits() <= 128)
7972 return {AK_FloatingPoint, 1};
7974 if (
T->isArrayTy()) {
7975 auto R = classifyArgument(
T->getArrayElementType());
7976 R.second *=
T->getScalarType()->getArrayNumElements();
7981 auto R = classifyArgument(FV->getScalarType());
7982 R.second *= FV->getNumElements();
7987 return {AK_Memory, 0};
7999 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8000 unsigned GrOffset = AArch64GrBegOffset;
8001 unsigned VrOffset = AArch64VrBegOffset;
8002 unsigned OverflowOffset = AArch64VAEndOffset;
8004 const DataLayout &
DL =
F.getDataLayout();
8007 auto [AK, RegNum] = classifyArgument(
A->getType());
8008 if (AK == AK_GeneralPurpose &&
8009 (GrOffset + RegNum * 8) > AArch64GrEndOffset)
8011 if (AK == AK_FloatingPoint &&
8012 (VrOffset + RegNum * 16) > AArch64VrEndOffset)
8016 case AK_GeneralPurpose:
8017 Base = getShadowPtrForVAArgument(IRB, GrOffset);
8018 GrOffset += 8 * RegNum;
8020 case AK_FloatingPoint:
8021 Base = getShadowPtrForVAArgument(IRB, VrOffset);
8022 VrOffset += 16 * RegNum;
8029 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
8030 uint64_t AlignedSize =
alignTo(ArgSize, 8);
8031 unsigned BaseOffset = OverflowOffset;
8032 Base = getShadowPtrForVAArgument(IRB, BaseOffset);
8033 OverflowOffset += AlignedSize;
8036 CleanUnusedTLS(IRB,
Base, BaseOffset);
8048 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
8049 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
8054 Value *SaveAreaPtrPtr =
8055 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, offset));
8056 return IRB.
CreateLoad(Type::getInt64Ty(*MS.C), SaveAreaPtrPtr);
8061 Value *SaveAreaPtr =
8062 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, offset));
8064 return IRB.
CreateSExt(SaveArea32, MS.IntptrTy);
8067 void finalizeInstrumentation()
override {
8068 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
8069 "finalizeInstrumentation called twice");
8070 if (!VAStartInstrumentationList.empty()) {
8077 ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset), VAArgOverflowSize);
8078 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8084 Intrinsic::umin, CopySize,
8090 Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
8091 Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize);
8095 for (CallInst *OrigInst : VAStartInstrumentationList) {
8096 NextNodeIRBuilder IRB(OrigInst);
8098 Value *VAListTag = OrigInst->getArgOperand(0);
8115 Value *StackSaveAreaPtr =
8116 IRB.
CreateIntToPtr(getVAField64(IRB, VAListTag, 0), RegSaveAreaPtrTy);
8119 Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
8120 Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
8123 IRB.
CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea), RegSaveAreaPtrTy);
8126 Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
8127 Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
8130 IRB.
CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea), RegSaveAreaPtrTy);
8136 Value *GrRegSaveAreaShadowPtrOff =
8137 IRB.
CreateAdd(GrArgSize, GrOffSaveArea);
8139 Value *GrRegSaveAreaShadowPtr =
8140 MSV.getShadowOriginPtr(GrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8146 Value *GrCopySize = IRB.
CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
8152 Value *VrRegSaveAreaShadowPtrOff =
8153 IRB.
CreateAdd(VrArgSize, VrOffSaveArea);
8155 Value *VrRegSaveAreaShadowPtr =
8156 MSV.getShadowOriginPtr(VrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8163 VrRegSaveAreaShadowPtrOff);
8164 Value *VrCopySize = IRB.
CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
8170 Value *StackSaveAreaShadowPtr =
8171 MSV.getShadowOriginPtr(StackSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8176 VAArgTLSCopy, IRB.
getInt32(AArch64VAEndOffset));
8179 Align(16), VAArgOverflowSize);
8185struct VarArgPowerPC64Helper :
public VarArgHelperBase {
8186 AllocaInst *VAArgTLSCopy =
nullptr;
8187 Value *VAArgSize =
nullptr;
8189 VarArgPowerPC64Helper(Function &
F, MemorySanitizer &MS,
8190 MemorySanitizerVisitor &MSV)
8191 : VarArgHelperBase(
F, MS, MSV, 8) {}
8193 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8201 Triple TargetTriple(
F.getParent()->getTargetTriple());
8205 if (TargetTriple.isPPC64ELFv2ABI())
8209 unsigned VAArgOffset = VAArgBase;
8210 const DataLayout &
DL =
F.getDataLayout();
8213 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
8215 assert(
A->getType()->isPointerTy());
8217 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
8220 ArgAlign =
Align(8);
8221 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8224 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
8226 Value *AShadowPtr, *AOriginPtr;
8227 std::tie(AShadowPtr, AOriginPtr) =
8228 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
8238 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
8240 if (
A->getType()->isArrayTy()) {
8243 Type *ElementTy =
A->getType()->getArrayElementType();
8245 ArgAlign =
Align(
DL.getTypeAllocSize(ElementTy));
8246 }
else if (
A->getType()->isVectorTy()) {
8248 ArgAlign =
Align(ArgSize);
8251 ArgAlign =
Align(8);
8252 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8253 if (
DL.isBigEndian()) {
8257 VAArgOffset += (8 - ArgSize);
8261 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
8265 VAArgOffset += ArgSize;
8269 VAArgBase = VAArgOffset;
8273 ConstantInt::get(MS.IntptrTy, VAArgOffset - VAArgBase);
8276 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8279 void finalizeInstrumentation()
override {
8280 assert(!VAArgSize && !VAArgTLSCopy &&
8281 "finalizeInstrumentation called twice");
8284 Value *CopySize = VAArgSize;
8286 if (!VAStartInstrumentationList.empty()) {
8290 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8296 Intrinsic::umin, CopySize,
8304 for (CallInst *OrigInst : VAStartInstrumentationList) {
8305 NextNodeIRBuilder IRB(OrigInst);
8306 Value *VAListTag = OrigInst->getArgOperand(0);
8309 RegSaveAreaPtrPtr = IRB.
CreateIntToPtr(RegSaveAreaPtrPtr, MS.PtrTy);
8312 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8313 const DataLayout &
DL =
F.getDataLayout();
8314 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8316 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8317 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8319 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8326struct VarArgPowerPC32Helper :
public VarArgHelperBase {
8327 AllocaInst *VAArgTLSCopy =
nullptr;
8328 Value *VAArgSize =
nullptr;
8330 VarArgPowerPC32Helper(Function &
F, MemorySanitizer &MS,
8331 MemorySanitizerVisitor &MSV)
8332 : VarArgHelperBase(
F, MS, MSV, 12) {}
8334 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8338 unsigned VAArgOffset = VAArgBase;
8339 const DataLayout &
DL =
F.getDataLayout();
8340 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8343 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
8345 assert(
A->getType()->isPointerTy());
8347 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
8349 if (ArgAlign < IntptrSize)
8350 ArgAlign =
Align(IntptrSize);
8351 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8354 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
8356 Value *AShadowPtr, *AOriginPtr;
8357 std::tie(AShadowPtr, AOriginPtr) =
8358 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
8368 Type *ArgTy =
A->getType();
8374 uint64_t ArgSize =
DL.getTypeAllocSize(ArgTy);
8381 ArgAlign =
Align(
DL.getTypeAllocSize(ElementTy));
8384 ArgAlign =
Align(ArgSize);
8386 if (ArgAlign < IntptrSize)
8387 ArgAlign =
Align(IntptrSize);
8388 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8389 if (
DL.isBigEndian()) {
8392 if (ArgSize < IntptrSize)
8393 VAArgOffset += (IntptrSize - ArgSize);
8396 Base = getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase,
8402 VAArgOffset += ArgSize;
8409 ConstantInt::get(MS.IntptrTy, VAArgOffset - VAArgBase);
8412 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8415 void finalizeInstrumentation()
override {
8416 assert(!VAArgSize && !VAArgTLSCopy &&
8417 "finalizeInstrumentation called twice");
8419 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
8420 Value *CopySize = VAArgSize;
8422 if (!VAStartInstrumentationList.empty()) {
8426 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8432 Intrinsic::umin, CopySize,
8440 for (CallInst *OrigInst : VAStartInstrumentationList) {
8441 NextNodeIRBuilder IRB(OrigInst);
8442 Value *VAListTag = OrigInst->getArgOperand(0);
8444 Value *RegSaveAreaSize = CopySize;
8448 IRB.
CreateAdd(RegSaveAreaPtrPtr, ConstantInt::get(MS.IntptrTy, 8));
8452 Intrinsic::umin, CopySize, ConstantInt::get(MS.IntptrTy, 32));
8454 RegSaveAreaPtrPtr = IRB.
CreateIntToPtr(RegSaveAreaPtrPtr, MS.PtrTy);
8457 const DataLayout &
DL =
F.getDataLayout();
8458 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8462 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8463 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8464 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8466 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy,
8467 Alignment, RegSaveAreaSize);
8469 RegSaveAreaShadowPtr =
8472 ConstantInt::get(MS.IntptrTy, 32));
8477 ConstantInt::get(MS.IntptrTy, 32), Alignment);
8482 Value *OverflowAreaSize = IRB.
CreateSub(CopySize, RegSaveAreaSize);
8485 OverflowAreaPtrPtr =
8486 IRB.
CreateAdd(OverflowAreaPtrPtr, ConstantInt::get(MS.IntptrTy, 4));
8487 OverflowAreaPtrPtr = IRB.
CreateIntToPtr(OverflowAreaPtrPtr, MS.PtrTy);
8489 Value *OverflowAreaPtr = IRB.
CreateLoad(MS.PtrTy, OverflowAreaPtrPtr);
8491 Value *OverflowAreaShadowPtr, *OverflowAreaOriginPtr;
8492 std::tie(OverflowAreaShadowPtr, OverflowAreaOriginPtr) =
8493 MSV.getShadowOriginPtr(OverflowAreaPtr, IRB, IRB.
getInt8Ty(),
8496 Value *OverflowVAArgTLSCopyPtr =
8498 OverflowVAArgTLSCopyPtr =
8499 IRB.
CreateAdd(OverflowVAArgTLSCopyPtr, RegSaveAreaSize);
8501 OverflowVAArgTLSCopyPtr =
8504 OverflowVAArgTLSCopyPtr, Alignment, OverflowAreaSize);
8511struct VarArgSystemZHelper :
public VarArgHelperBase {
8512 static const unsigned SystemZGpOffset = 16;
8513 static const unsigned SystemZGpEndOffset = 56;
8514 static const unsigned SystemZFpOffset = 128;
8515 static const unsigned SystemZFpEndOffset = 160;
8516 static const unsigned SystemZMaxVrArgs = 8;
8517 static const unsigned SystemZRegSaveAreaSize = 160;
8518 static const unsigned SystemZOverflowOffset = 160;
8519 static const unsigned SystemZVAListTagSize = 32;
8520 static const unsigned SystemZOverflowArgAreaPtrOffset = 16;
8521 static const unsigned SystemZRegSaveAreaPtrOffset = 24;
8523 bool IsSoftFloatABI;
8524 AllocaInst *VAArgTLSCopy =
nullptr;
8525 AllocaInst *VAArgTLSOriginCopy =
nullptr;
8526 Value *VAArgOverflowSize =
nullptr;
8528 enum class ArgKind {
8536 enum class ShadowExtension {
None,
Zero, Sign };
8538 VarArgSystemZHelper(Function &
F, MemorySanitizer &MS,
8539 MemorySanitizerVisitor &MSV)
8540 : VarArgHelperBase(
F, MS, MSV, SystemZVAListTagSize),
8541 IsSoftFloatABI(
F.getFnAttribute(
"use-soft-float").getValueAsBool()) {}
8543 ArgKind classifyArgument(
Type *
T) {
8550 if (
T->isIntegerTy(128) ||
T->isFP128Ty())
8551 return ArgKind::Indirect;
8552 if (
T->isFloatingPointTy())
8553 return IsSoftFloatABI ? ArgKind::GeneralPurpose : ArgKind::FloatingPoint;
8554 if (
T->isIntegerTy() ||
T->isPointerTy())
8555 return ArgKind::GeneralPurpose;
8556 if (
T->isVectorTy())
8557 return ArgKind::Vector;
8558 return ArgKind::Memory;
8561 ShadowExtension getShadowExtension(
const CallBase &CB,
unsigned ArgNo) {
8571 return ShadowExtension::Zero;
8575 return ShadowExtension::Sign;
8577 return ShadowExtension::None;
8580 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8581 unsigned GpOffset = SystemZGpOffset;
8582 unsigned FpOffset = SystemZFpOffset;
8583 unsigned VrIndex = 0;
8584 unsigned OverflowOffset = SystemZOverflowOffset;
8585 const DataLayout &
DL =
F.getDataLayout();
8591 ArgKind AK = classifyArgument(
T);
8592 if (AK == ArgKind::Indirect) {
8594 AK = ArgKind::GeneralPurpose;
8596 if (AK == ArgKind::GeneralPurpose && GpOffset >= SystemZGpEndOffset)
8597 AK = ArgKind::Memory;
8598 if (AK == ArgKind::FloatingPoint && FpOffset >= SystemZFpEndOffset)
8599 AK = ArgKind::Memory;
8600 if (AK == ArgKind::Vector && (VrIndex >= SystemZMaxVrArgs || !IsFixed))
8601 AK = ArgKind::Memory;
8602 Value *ShadowBase =
nullptr;
8603 Value *OriginBase =
nullptr;
8604 ShadowExtension SE = ShadowExtension::None;
8606 case ArgKind::GeneralPurpose: {
8608 uint64_t ArgSize = 8;
8611 SE = getShadowExtension(CB, ArgNo);
8612 uint64_t GapSize = 0;
8613 if (SE == ShadowExtension::None) {
8614 uint64_t ArgAllocSize =
DL.getTypeAllocSize(
T);
8615 assert(ArgAllocSize <= ArgSize);
8616 GapSize = ArgSize - ArgAllocSize;
8618 ShadowBase = getShadowAddrForVAArgument(IRB, GpOffset + GapSize);
8619 if (MS.TrackOrigins)
8620 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset + GapSize);
8622 GpOffset += ArgSize;
8628 case ArgKind::FloatingPoint: {
8630 uint64_t ArgSize = 8;
8637 ShadowBase = getShadowAddrForVAArgument(IRB, FpOffset);
8638 if (MS.TrackOrigins)
8639 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
8641 FpOffset += ArgSize;
8647 case ArgKind::Vector: {
8654 case ArgKind::Memory: {
8659 uint64_t ArgAllocSize =
DL.getTypeAllocSize(
T);
8660 uint64_t ArgSize =
alignTo(ArgAllocSize, 8);
8662 SE = getShadowExtension(CB, ArgNo);
8664 SE == ShadowExtension::None ? ArgSize - ArgAllocSize : 0;
8666 getShadowAddrForVAArgument(IRB, OverflowOffset + GapSize);
8667 if (MS.TrackOrigins)
8669 getOriginPtrForVAArgument(IRB, OverflowOffset + GapSize);
8670 OverflowOffset += ArgSize;
8677 case ArgKind::Indirect:
8680 if (ShadowBase ==
nullptr)
8682 Value *Shadow = MSV.getShadow(
A);
8683 if (SE != ShadowExtension::None)
8684 Shadow = MSV.CreateShadowCast(IRB, Shadow, IRB.
getInt64Ty(),
8685 SE == ShadowExtension::Sign);
8686 ShadowBase = IRB.
CreateIntToPtr(ShadowBase, MS.PtrTy,
"_msarg_va_s");
8688 if (MS.TrackOrigins) {
8689 Value *Origin = MSV.getOrigin(
A);
8690 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
8691 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
8695 Constant *OverflowSize = ConstantInt::get(
8696 IRB.
getInt64Ty(), OverflowOffset - SystemZOverflowOffset);
8697 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
8704 ConstantInt::get(MS.IntptrTy, SystemZRegSaveAreaPtrOffset)),
8707 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8709 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8710 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(), Alignment,
8715 unsigned RegSaveAreaSize =
8716 IsSoftFloatABI ? SystemZGpEndOffset : SystemZRegSaveAreaSize;
8717 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8719 if (MS.TrackOrigins)
8720 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
8721 Alignment, RegSaveAreaSize);
8730 ConstantInt::get(MS.IntptrTy, SystemZOverflowArgAreaPtrOffset)),
8732 Value *OverflowArgAreaPtr = IRB.
CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
8733 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
8735 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
8736 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
8739 SystemZOverflowOffset);
8740 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
8742 if (MS.TrackOrigins) {
8744 SystemZOverflowOffset);
8745 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
8750 void finalizeInstrumentation()
override {
8751 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
8752 "finalizeInstrumentation called twice");
8753 if (!VAStartInstrumentationList.empty()) {
8760 IRB.
CreateAdd(ConstantInt::get(MS.IntptrTy, SystemZOverflowOffset),
8762 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8768 Intrinsic::umin, CopySize,
8772 if (MS.TrackOrigins) {
8773 VAArgTLSOriginCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8782 for (CallInst *OrigInst : VAStartInstrumentationList) {
8783 NextNodeIRBuilder IRB(OrigInst);
8784 Value *VAListTag = OrigInst->getArgOperand(0);
8785 copyRegSaveArea(IRB, VAListTag);
8786 copyOverflowArea(IRB, VAListTag);
8792struct VarArgI386Helper :
public VarArgHelperBase {
8793 AllocaInst *VAArgTLSCopy =
nullptr;
8794 Value *VAArgSize =
nullptr;
8796 VarArgI386Helper(Function &
F, MemorySanitizer &MS,
8797 MemorySanitizerVisitor &MSV)
8798 : VarArgHelperBase(
F, MS, MSV, 4) {}
8800 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8801 const DataLayout &
DL =
F.getDataLayout();
8802 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8803 unsigned VAArgOffset = 0;
8806 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
8808 assert(
A->getType()->isPointerTy());
8810 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
8812 if (ArgAlign < IntptrSize)
8813 ArgAlign =
Align(IntptrSize);
8814 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8816 Value *
Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8818 Value *AShadowPtr, *AOriginPtr;
8819 std::tie(AShadowPtr, AOriginPtr) =
8820 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
8830 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
8832 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8833 if (
DL.isBigEndian()) {
8836 if (ArgSize < IntptrSize)
8837 VAArgOffset += (IntptrSize - ArgSize);
8840 Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8843 VAArgOffset += ArgSize;
8849 Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
8852 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8855 void finalizeInstrumentation()
override {
8856 assert(!VAArgSize && !VAArgTLSCopy &&
8857 "finalizeInstrumentation called twice");
8859 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
8860 Value *CopySize = VAArgSize;
8862 if (!VAStartInstrumentationList.empty()) {
8865 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8871 Intrinsic::umin, CopySize,
8879 for (CallInst *OrigInst : VAStartInstrumentationList) {
8880 NextNodeIRBuilder IRB(OrigInst);
8881 Value *VAListTag = OrigInst->getArgOperand(0);
8882 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
8883 Value *RegSaveAreaPtrPtr =
8885 PointerType::get(*MS.C, 0));
8886 Value *RegSaveAreaPtr =
8887 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
8888 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8889 const DataLayout &
DL =
F.getDataLayout();
8890 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8892 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8893 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8895 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8903struct VarArgGenericHelper :
public VarArgHelperBase {
8904 AllocaInst *VAArgTLSCopy =
nullptr;
8905 Value *VAArgSize =
nullptr;
8907 VarArgGenericHelper(Function &
F, MemorySanitizer &MS,
8908 MemorySanitizerVisitor &MSV,
const unsigned VAListTagSize)
8909 : VarArgHelperBase(
F, MS, MSV, VAListTagSize) {}
8911 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8912 unsigned VAArgOffset = 0;
8913 const DataLayout &
DL =
F.getDataLayout();
8914 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8919 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
8920 if (
DL.isBigEndian()) {
8923 if (ArgSize < IntptrSize)
8924 VAArgOffset += (IntptrSize - ArgSize);
8926 Value *
Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8927 VAArgOffset += ArgSize;
8928 VAArgOffset =
alignTo(VAArgOffset, IntptrSize);
8934 Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
8937 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8940 void finalizeInstrumentation()
override {
8941 assert(!VAArgSize && !VAArgTLSCopy &&
8942 "finalizeInstrumentation called twice");
8944 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
8945 Value *CopySize = VAArgSize;
8947 if (!VAStartInstrumentationList.empty()) {
8950 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8956 Intrinsic::umin, CopySize,
8964 for (CallInst *OrigInst : VAStartInstrumentationList) {
8965 NextNodeIRBuilder IRB(OrigInst);
8966 Value *VAListTag = OrigInst->getArgOperand(0);
8967 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
8968 Value *RegSaveAreaPtrPtr =
8970 PointerType::get(*MS.C, 0));
8971 Value *RegSaveAreaPtr =
8972 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
8973 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8974 const DataLayout &
DL =
F.getDataLayout();
8975 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8977 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8978 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8980 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8988using VarArgARM32Helper = VarArgGenericHelper;
8989using VarArgRISCVHelper = VarArgGenericHelper;
8990using VarArgMIPSHelper = VarArgGenericHelper;
8991using VarArgLoongArch64Helper = VarArgGenericHelper;
8994struct VarArgNoOpHelper :
public VarArgHelper {
8995 VarArgNoOpHelper(Function &
F, MemorySanitizer &MS,
8996 MemorySanitizerVisitor &MSV) {}
8998 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {}
9000 void visitVAStartInst(VAStartInst &
I)
override {}
9002 void visitVACopyInst(VACopyInst &
I)
override {}
9004 void finalizeInstrumentation()
override {}
9010 MemorySanitizerVisitor &Visitor) {
9013 Triple TargetTriple(Func.getParent()->getTargetTriple());
9016 return new VarArgI386Helper(Func, Msan, Visitor);
9019 return new VarArgAMD64Helper(Func, Msan, Visitor);
9021 if (TargetTriple.
isARM())
9022 return new VarArgARM32Helper(Func, Msan, Visitor, 4);
9025 return new VarArgAArch64Helper(Func, Msan, Visitor);
9028 return new VarArgSystemZHelper(Func, Msan, Visitor);
9033 return new VarArgPowerPC32Helper(Func, Msan, Visitor);
9036 return new VarArgPowerPC64Helper(Func, Msan, Visitor);
9039 return new VarArgRISCVHelper(Func, Msan, Visitor, 4);
9042 return new VarArgRISCVHelper(Func, Msan, Visitor, 8);
9045 return new VarArgMIPSHelper(Func, Msan, Visitor, 4);
9048 return new VarArgMIPSHelper(Func, Msan, Visitor, 8);
9051 return new VarArgLoongArch64Helper(Func, Msan, Visitor,
9054 return new VarArgNoOpHelper(Func, Msan, Visitor);
9061 if (
F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
9064 MemorySanitizerVisitor Visitor(
F, *
this, TLI);
9071 return Visitor.runOnFunction();
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
static bool isStore(int Opcode)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClWithComdat("asan-with-comdat", cl::desc("Place ASan constructors in comdat sections"), cl::Hidden, cl::init(true))
VarLocInsertPt getNextNode(const DbgRecord *DVR)
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const MemoryMapParams Linux_LoongArch64_MemoryMapParams
const MemoryMapParams Linux_X86_64_MemoryMapParams
static cl::opt< int > ClTrackOrigins("dfsan-track-origins", cl::desc("Track origins of labels"), cl::Hidden, cl::init(0))
static AtomicOrdering addReleaseOrdering(AtomicOrdering AO)
const MemoryMapParams Linux_S390X_MemoryMapParams
static AtomicOrdering addAcquireOrdering(AtomicOrdering AO)
const MemoryMapParams Linux_AArch64_MemoryMapParams
static bool isAMustTailRetVal(Value *RetVal)
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
Module.h This file contains the declarations for the Module class.
Machine Check Debug Module
static const PlatformMemoryMapParams Linux_S390_MemoryMapParams
static const Align kMinOriginAlignment
static cl::opt< uint64_t > ClShadowBase("msan-shadow-base", cl::desc("Define custom MSan ShadowBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClPoisonUndef("msan-poison-undef", cl::desc("Poison fully undef temporary values. " "Partially undefined constant vectors " "are unaffected by this flag (see " "-msan-poison-undef-vectors)."), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_X86_MemoryMapParams
static cl::opt< uint64_t > ClOriginBase("msan-origin-base", cl::desc("Define custom MSan OriginBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClCheckConstantShadow("msan-check-constant-shadow", cl::desc("Insert checks for constant shadow values"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_LoongArch_MemoryMapParams
static const MemoryMapParams NetBSD_X86_64_MemoryMapParams
static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams
static const unsigned kOriginSize
static cl::opt< bool > ClWithComdat("msan-with-comdat", cl::desc("Place MSan constructors in comdat sections"), cl::Hidden, cl::init(false))
static cl::opt< int > ClTrackOrigins("msan-track-origins", cl::desc("Track origins (allocation sites) of poisoned memory"), cl::Hidden, cl::init(0))
Track origins of uninitialized values.
static cl::opt< int > ClInstrumentationWithCallThreshold("msan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented requires more than " "this number of checks and origin stores, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(3500))
static cl::opt< int > ClPoisonStackPattern("msan-poison-stack-pattern", cl::desc("poison uninitialized stack variables with the given pattern"), cl::Hidden, cl::init(0xff))
static const Align kShadowTLSAlignment
static cl::opt< bool > ClHandleICmpExact("msan-handle-icmp-exact", cl::desc("exact handling of relational integer ICmp"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams
static cl::opt< bool > ClDumpStrictInstructions("msan-dump-strict-instructions", cl::desc("print out instructions with default strict semantics i.e.," "check that all the inputs are fully initialized, and mark " "the output as fully initialized. These semantics are applied " "to instructions that could not be handled explicitly nor " "heuristically."), cl::Hidden, cl::init(false))
static Constant * getOrInsertGlobal(Module &M, StringRef Name, Type *Ty)
static cl::opt< bool > ClPreciseDisjointOr("msan-precise-disjoint-or", cl::desc("Precisely poison disjoint OR. If false (legacy behavior), " "disjointedness is ignored (i.e., 1|1 is initialized)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClPoisonStack("msan-poison-stack", cl::desc("poison uninitialized stack variables"), cl::Hidden, cl::init(true))
static const MemoryMapParams Linux_I386_MemoryMapParams
const char kMsanInitName[]
static cl::opt< bool > ClPoisonUndefVectors("msan-poison-undef-vectors", cl::desc("Precisely poison partially undefined constant vectors. " "If false (legacy behavior), the entire vector is " "considered fully initialized, which may lead to false " "negatives. Fully undefined constant vectors are " "unaffected by this flag (see -msan-poison-undef)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClPrintStackNames("msan-print-stack-names", cl::desc("Print name of local stack variable"), cl::Hidden, cl::init(true))
static cl::opt< uint64_t > ClAndMask("msan-and-mask", cl::desc("Define custom MSan AndMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleLifetimeIntrinsics("msan-handle-lifetime-intrinsics", cl::desc("when possible, poison scoped variables at the beginning of the scope " "(slower, but more precise)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKeepGoing("msan-keep-going", cl::desc("keep going after reporting a UMR"), cl::Hidden, cl::init(false))
static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams
static GlobalVariable * createPrivateConstGlobalForString(Module &M, StringRef Str)
Create a non-const global initialized with the given string.
static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClEagerChecks("msan-eager-checks", cl::desc("check arguments and return values at function call boundaries"), cl::Hidden, cl::init(false))
static cl::opt< int > ClDisambiguateWarning("msan-disambiguate-warning-threshold", cl::desc("Define threshold for number of checks per " "debug location to force origin update."), cl::Hidden, cl::init(3))
static VarArgHelper * CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, MemorySanitizerVisitor &Visitor)
static const MemoryMapParams Linux_MIPS64_MemoryMapParams
static const MemoryMapParams Linux_PowerPC64_MemoryMapParams
static cl::opt< uint64_t > ClXorMask("msan-xor-mask", cl::desc("Define custom MSan XorMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleAsmConservative("msan-handle-asm-conservative", cl::desc("conservative handling of inline assembly"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams
static const PlatformMemoryMapParams FreeBSD_ARM_MemoryMapParams
static const unsigned kParamTLSSize
static cl::opt< bool > ClHandleICmp("msan-handle-icmp", cl::desc("propagate shadow through ICmpEQ and ICmpNE"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClEnableKmsan("msan-kernel", cl::desc("Enable KernelMemorySanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClPoisonStackWithCall("msan-poison-stack-with-call", cl::desc("poison uninitialized stack variables with a call"), cl::Hidden, cl::init(false))
static const PlatformMemoryMapParams NetBSD_X86_MemoryMapParams
static cl::opt< bool > ClDumpHeuristicInstructions("msan-dump-heuristic-instructions", cl::desc("Prints 'unknown' instructions that were handled heuristically. " "Use -msan-dump-strict-instructions to print instructions that " "could not be handled explicitly nor heuristically."), cl::Hidden, cl::init(false))
static const unsigned kRetvalTLSSize
static const MemoryMapParams FreeBSD_AArch64_MemoryMapParams
const char kMsanModuleCtorName[]
static const MemoryMapParams FreeBSD_I386_MemoryMapParams
static cl::opt< bool > ClCheckAccessAddress("msan-check-access-address", cl::desc("report accesses through a pointer which has poisoned shadow"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDisableChecks("msan-disable-checks", cl::desc("Apply no_sanitize to the whole file"), cl::Hidden, cl::init(false))
FunctionAnalysisManager FAM
const SmallVectorImpl< MachineOperand > & Cond
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
void setAlignment(Align Align)
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
const T & front() const
front - Get the first element.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
This class stores enough information to efficiently remove some attributes from an existing AttrBuild...
AttributeMask & addAttribute(Attribute::AttrKind Val)
Add an attribute to the mask.
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
InstListType::iterator iterator
Instruction iterators...
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
void removeFnAttrs(const AttributeMask &AttrsToRemove)
Removes the attributes from the function.
MaybeAlign getParamAlign(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
void setArgOperand(unsigned i, Value *v)
FunctionType * getFunctionType() const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_SGT
signed greater than
@ ICMP_SGE
signed greater or equal
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getString(LLVMContext &Context, StringRef Initializer, bool AddNull=true)
This method constructs a CDS and initializes it with a text string.
static LLVM_ABI Constant * get(LLVMContext &Context, ArrayRef< uint8_t > Elts)
get() constructors - Return a constant with vector type with an element count and element type matchi...
static ConstantInt * getSigned(IntegerType *Ty, int64_t V, bool ImplicitTrunc=false)
Return a ConstantInt with the specified value for the specified type.
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
LLVM_ABI bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isZeroValue() const
Return true if the value is negative zero or null value.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
static bool shouldExecute(CounterInfo &Counter)
unsigned getNumElements() const
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
static FixedVectorType * getHalfElementsVectorType(FixedVectorType *VTy)
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
LLVM_ABI void setComdat(Comdat *C)
@ PrivateLinkage
Like Internal, but omit from symbol table.
@ ExternalLinkage
Externally visible function.
Analysis pass providing a never-invalidated alias analysis result.
ConstantInt * getInt1(bool V)
Get a constant value representing either true or false.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
LLVM_ABI CallInst * CreateMaskedCompressStore(Value *Val, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr)
Create a call to Masked Compress Store intrinsic.
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
LLVM_ABI Value * CreateAllocationSize(Type *DestTy, AllocaInst *AI)
Get allocation size of an alloca as a runtime Value* (handles both static and dynamic allocas and vsc...
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memcpy between the specified pointers.
LLVM_ABI CallInst * CreateAndReduce(Value *Src)
Create a vector int AND reduction intrinsic of the source vector.
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
LLVM_ABI CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
ConstantInt * getInt8(uint8_t C)
Get a constant 8-bit value.
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Value * CreateUDiv(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
LLVM_ABI CallInst * CreateOrReduce(Value *Src)
Create a vector int OR reduction intrinsic of the source vector.
LLVM_ABI Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
LLVM_ABI DebugLoc getCurrentDebugLocation() const
Get location information used by debugging information.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memset to the specified pointer and the specified value.
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
LLVMContext & getContext() const
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
LLVM_ABI CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name="")
LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Value * CreateIsNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg == 0.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Type * getVoidTy()
Fetch the type representing void.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
LLVM_ABI CallInst * CreateMaskedExpandLoad(Type *Ty, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Expand Load intrinsic.
Value * CreateInBoundsPtrAdd(Value *Ptr, Value *Offset, const Twine &Name="")
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
LLVM_ABI CallInst * CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, Value *Mask=nullptr)
Create a call to Masked Scatter intrinsic.
LLVM_ABI CallInst * CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Gather intrinsic.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
std::vector< ConstraintInfo > ConstraintInfoVector
void visit(Iterator Start, Iterator End)
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
LLVM_ABI MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
A Module instance is used to store all the information related to an LLVM module.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & abandon()
Mark an analysis as abandoned.
bool remove(const value_type &X)
Remove an item from the set vector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
StringRef - Represent a constant reference to a string, i.e.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
unsigned getNumElements() const
Random access to the elements.
Type * getElementType(unsigned N) const
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Triple - Helper class for working with autoconf configuration names.
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
bool isRISCV32() const
Tests whether the target is 32-bit RISC-V.
bool isPPC32() const
Tests whether the target is 32-bit PowerPC (little and big endian).
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isRISCV64() const
Tests whether the target is 64-bit RISC-V.
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
bool isARM() const
Tests whether the target is ARM (little and big endian).
bool isPPC64() const
Tests whether the target is 64-bit PowerPC (little and big endian).
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
bool isSystemZ() const
Tests whether the target is SystemZ.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isArrayTy() const
True if this is an instance of ArrayType.
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
Type * getArrayElementType() const
bool isPPC_FP128Ty() const
Return true if this is powerpc long double.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
bool isVoidTy() const
Return true if this is 'void'.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Type * getElementType() const
int getNumOccurrences() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
initializer< Ty > init(const Ty &Val)
Function * Kernel
Summary of a kernel (=entry point for target offloading).
NodeAddr< FuncNode * > Func
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
FunctionAddr VTableAddr Value
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
LLVM_ABI std::pair< Instruction *, Value * > SplitBlockAndInsertSimpleForLoop(Value *End, BasicBlock::iterator SplitBefore)
Insert a for (int i = 0; i < End; i++) loop structure (with the exception that End is assumed > 0,...
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
auto dyn_cast_or_null(const Y &Val)
LLVM_ABI std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
@ Or
Bitwise or logical OR of integers.
@ And
Bitwise or logical AND of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
RoundingMode
Rounding mode.
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
iterator_range< df_iterator< T > > depth_first(const T &G)
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
LLVM_ABI bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
LLVM_ABI bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
std::string itostr(int64_t X)
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
A CRTP mix-in to automatically provide informational APIs needed for passes.