54 #define DEBUG_TYPE "tsan"
57 "tsan-instrument-memory-accesses",
cl::init(
true),
61 cl::desc(
"Instrument function entry and exit"),
64 "tsan-handle-cxx-exceptions",
cl::init(
true),
65 cl::desc(
"Handle C++ exceptions (insert cleanup blocks for unwinding)"),
72 "tsan-instrument-memintrinsics",
cl::init(
true),
75 "tsan-distinguish-volatile",
cl::init(
false),
76 cl::desc(
"Emit special instrumentation for accesses to volatiles"),
79 "tsan-instrument-read-before-write",
cl::init(
false),
80 cl::desc(
"Do not eliminate read instrumentation for read-before-writes"),
83 "tsan-compound-read-before-write",
cl::init(
false),
84 cl::desc(
"Emit special compound instrumentation for reads-before-writes"),
87 STATISTIC(NumInstrumentedReads,
"Number of instrumented reads");
88 STATISTIC(NumInstrumentedWrites,
"Number of instrumented writes");
90 "Number of reads ignored due to following writes");
91 STATISTIC(NumAccessesWithBadSize,
"Number of accesses with bad size");
92 STATISTIC(NumInstrumentedVtableWrites,
"Number of vtable ptr writes");
93 STATISTIC(NumInstrumentedVtableReads,
"Number of vtable ptr reads");
94 STATISTIC(NumOmittedReadsFromConstantGlobals,
95 "Number of reads from constant globals");
96 STATISTIC(NumOmittedReadsFromVtable,
"Number of vtable reads");
97 STATISTIC(NumOmittedNonCaptured,
"Number of accesses ignored due to capturing");
110 struct ThreadSanitizer {
115 <<
"warning: Option -tsan-compound-read-before-write has no effect "
116 "when -tsan-instrument-read-before-write is set.\n";
125 struct InstructionInfo {
128 static constexpr
unsigned kCompoundRW = (1U << 0);
130 explicit InstructionInfo(
Instruction *Inst) : Inst(Inst) {}
137 bool instrumentLoadOrStore(
const InstructionInfo &II,
const DataLayout &
DL);
143 bool addrPointsToConstantData(
Value *
Addr);
176 void insertModuleCtor(
Module &
M) {
188 ThreadSanitizer TSan;
201 IntptrTy =
DL.getIntPtrType(
M.getContext());
207 TsanFuncEntry =
M.getOrInsertFunction(
"__tsan_func_entry", Attr,
208 IRB.getVoidTy(), IRB.getInt8PtrTy());
210 M.getOrInsertFunction(
"__tsan_func_exit", Attr, IRB.getVoidTy());
211 TsanIgnoreBegin =
M.getOrInsertFunction(
"__tsan_ignore_thread_begin", Attr,
214 M.getOrInsertFunction(
"__tsan_ignore_thread_end", Attr, IRB.getVoidTy());
217 const unsigned ByteSize = 1U <<
i;
218 const unsigned BitSize = ByteSize * 8;
219 std::string ByteSizeStr = utostr(ByteSize);
220 std::string BitSizeStr = utostr(BitSize);
222 TsanRead[
i] =
M.getOrInsertFunction(ReadName, Attr, IRB.getVoidTy(),
226 TsanWrite[
i] =
M.getOrInsertFunction(WriteName, Attr, IRB.getVoidTy(),
229 SmallString<64> UnalignedReadName(
"__tsan_unaligned_read" + ByteSizeStr);
230 TsanUnalignedRead[
i] =
M.getOrInsertFunction(
231 UnalignedReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
233 SmallString<64> UnalignedWriteName(
"__tsan_unaligned_write" + ByteSizeStr);
234 TsanUnalignedWrite[
i] =
M.getOrInsertFunction(
235 UnalignedWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
237 SmallString<64> VolatileReadName(
"__tsan_volatile_read" + ByteSizeStr);
238 TsanVolatileRead[
i] =
M.getOrInsertFunction(
239 VolatileReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
241 SmallString<64> VolatileWriteName(
"__tsan_volatile_write" + ByteSizeStr);
242 TsanVolatileWrite[
i] =
M.getOrInsertFunction(
243 VolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
245 SmallString<64> UnalignedVolatileReadName(
"__tsan_unaligned_volatile_read" +
247 TsanUnalignedVolatileRead[
i] =
M.getOrInsertFunction(
248 UnalignedVolatileReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
251 "__tsan_unaligned_volatile_write" + ByteSizeStr);
252 TsanUnalignedVolatileWrite[
i] =
M.getOrInsertFunction(
253 UnalignedVolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
256 TsanCompoundRW[
i] =
M.getOrInsertFunction(
257 CompoundRWName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
259 SmallString<64> UnalignedCompoundRWName(
"__tsan_unaligned_read_write" +
261 TsanUnalignedCompoundRW[
i] =
M.getOrInsertFunction(
262 UnalignedCompoundRWName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
266 SmallString<32> AtomicLoadName(
"__tsan_atomic" + BitSizeStr +
"_load");
269 AL =
AL.addParamAttribute(
M.getContext(), 1, Attribute::ZExt);
271 M.getOrInsertFunction(AtomicLoadName,
AL, Ty, PtrTy, OrdTy);
274 SmallString<32> AtomicStoreName(
"__tsan_atomic" + BitSizeStr +
"_store");
277 AL =
AL.addParamAttribute(
M.getContext(), 1, Attribute::ZExt);
278 AL =
AL.addParamAttribute(
M.getContext(), 2, Attribute::ZExt);
279 TsanAtomicStore[
i] =
M.getOrInsertFunction(
280 AtomicStoreName,
AL, IRB.getVoidTy(), PtrTy, Ty, OrdTy);
285 TsanAtomicRMW[
Op][
i] =
nullptr;
286 const char *NamePart =
nullptr;
288 NamePart =
"_exchange";
290 NamePart =
"_fetch_add";
292 NamePart =
"_fetch_sub";
294 NamePart =
"_fetch_and";
296 NamePart =
"_fetch_or";
298 NamePart =
"_fetch_xor";
300 NamePart =
"_fetch_nand";
306 AL =
AL.addParamAttribute(
M.getContext(), 1, Attribute::ZExt);
307 AL =
AL.addParamAttribute(
M.getContext(), 2, Attribute::ZExt);
308 TsanAtomicRMW[
Op][
i] =
309 M.getOrInsertFunction(RMWName,
AL, Ty, PtrTy, Ty, OrdTy);
314 "_compare_exchange_val");
317 AL =
AL.addParamAttribute(
M.getContext(), 1, Attribute::ZExt);
318 AL =
AL.addParamAttribute(
M.getContext(), 2, Attribute::ZExt);
319 AL =
AL.addParamAttribute(
M.getContext(), 3, Attribute::ZExt);
320 AL =
AL.addParamAttribute(
M.getContext(), 4, Attribute::ZExt);
321 TsanAtomicCAS[
i] =
M.getOrInsertFunction(AtomicCASName,
AL, Ty, PtrTy, Ty,
326 M.getOrInsertFunction(
"__tsan_vptr_update", Attr, IRB.getVoidTy(),
327 IRB.getInt8PtrTy(), IRB.getInt8PtrTy());
328 TsanVptrLoad =
M.getOrInsertFunction(
"__tsan_vptr_read", Attr,
329 IRB.getVoidTy(), IRB.getInt8PtrTy());
332 AL =
AL.addParamAttribute(
M.getContext(), 0, Attribute::ZExt);
333 TsanAtomicThreadFence =
M.getOrInsertFunction(
"__tsan_atomic_thread_fence",
334 AL, IRB.getVoidTy(), OrdTy);
338 AL =
AL.addParamAttribute(
M.getContext(), 0, Attribute::ZExt);
339 TsanAtomicSignalFence =
M.getOrInsertFunction(
"__tsan_atomic_signal_fence",
340 AL, IRB.getVoidTy(), OrdTy);
344 M.getOrInsertFunction(
"memmove", Attr, IRB.getInt8PtrTy(),
345 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
347 M.getOrInsertFunction(
"memcpy", Attr, IRB.getInt8PtrTy(),
348 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
350 M.getOrInsertFunction(
"memset", Attr, IRB.getInt8PtrTy(),
351 IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy);
355 if (
MDNode *Tag =
I->getMetadata(LLVMContext::MD_tbaa))
356 return Tag->isTBAAVtableAccess();
364 Addr =
Addr->stripInBoundsOffsets();
367 if (GV->hasSection()) {
377 if (GV->getName().startswith(
"__llvm_gcov") ||
378 GV->getName().startswith(
"__llvm_gcda"))
385 Type *PtrTy = cast<PointerType>(
Addr->getType()->getScalarType());
393 bool ThreadSanitizer::addrPointsToConstantData(
Value *
Addr) {
396 Addr =
GEP->getPointerOperand();
399 if (GV->isConstant()) {
401 NumOmittedReadsFromConstantGlobals++;
407 NumOmittedReadsFromVtable++;
426 void ThreadSanitizer::chooseInstructionsToInstrument(
432 const bool IsWrite = isa<StoreInst>(*
I);
433 Value *
Addr = IsWrite ? cast<StoreInst>(
I)->getPointerOperand()
434 : cast<LoadInst>(
I)->getPointerOperand();
440 const auto WriteEntry = WriteTargets.
find(
Addr);
442 auto &WI =
All[WriteEntry->second];
445 const bool AnyVolatile =
447 cast<StoreInst>(WI.Inst)->isVolatile());
451 WI.Flags |= InstructionInfo::kCompoundRW;
452 NumOmittedReadsBeforeWrite++;
457 if (addrPointsToConstantData(
Addr)) {
468 NumOmittedNonCaptured++;
477 WriteTargets[
Addr] =
All.size() - 1;
486 if (!SSID.hasValue())
488 if (isa<LoadInst>(
I) || isa<StoreInst>(
I))
493 void ThreadSanitizer::InsertRuntimeIgnores(
Function &
F) {
495 IRB.CreateCall(TsanIgnoreBegin);
499 AtExit->CreateCall(TsanIgnoreEnd);
503 bool ThreadSanitizer::sanitizeFunction(
Function &
F,
512 if (
F.hasFnAttribute(Attribute::Naked))
517 if (
F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
527 bool SanitizeFunction =
F.hasFnAttribute(Attribute::SanitizeThread);
532 for (
auto &Inst :
BB) {
534 AtomicAccesses.push_back(&Inst);
535 else if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst))
536 LocalLoadsAndStores.push_back(&Inst);
537 else if ((isa<CallInst>(Inst) && !isa<DbgInfoIntrinsic>(Inst)) ||
538 isa<InvokeInst>(Inst)) {
539 if (
CallInst *CI = dyn_cast<CallInst>(&Inst))
541 if (isa<MemIntrinsic>(Inst))
542 MemIntrinCalls.push_back(&Inst);
544 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores,
548 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores,
DL);
557 for (
const auto &II : AllLoadsAndStores) {
558 Res |= instrumentLoadOrStore(II,
DL);
564 for (
auto Inst : AtomicAccesses) {
565 Res |= instrumentAtomic(Inst,
DL);
569 for (
auto Inst : MemIntrinCalls) {
570 Res |= instrumentMemIntrinsic(Inst);
573 if (
F.hasFnAttribute(
"sanitize_thread_no_checking_at_run_time")) {
574 assert(!
F.hasFnAttribute(Attribute::SanitizeThread));
576 InsertRuntimeIgnores(
F);
582 Value *ReturnAddress = IRB.CreateCall(
585 IRB.CreateCall(TsanFuncEntry, ReturnAddress);
590 AtExit->CreateCall(TsanFuncExit, {});
597 bool ThreadSanitizer::instrumentLoadOrStore(
const InstructionInfo &II,
600 const bool IsWrite = isa<StoreInst>(*II.Inst);
601 Value *
Addr = IsWrite ? cast<StoreInst>(II.Inst)->getPointerOperand()
602 : cast<LoadInst>(II.Inst)->getPointerOperand();
608 if (
Addr->isSwiftError())
611 int Idx = getMemoryAccessFuncIndex(OrigTy,
Addr,
DL);
616 Value *StoredValue = cast<StoreInst>(II.Inst)->getValueOperand();
620 if (isa<VectorType>(StoredValue->
getType()))
621 StoredValue = IRB.CreateExtractElement(
624 StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy());
626 IRB.CreateCall(TsanVptrUpdate,
627 {IRB.CreatePointerCast(
Addr, IRB.getInt8PtrTy()),
628 IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy())});
629 NumInstrumentedVtableWrites++;
633 IRB.CreateCall(TsanVptrLoad,
634 IRB.CreatePointerCast(
Addr, IRB.getInt8PtrTy()));
635 NumInstrumentedVtableReads++;
639 const unsigned Alignment = IsWrite ? cast<StoreInst>(II.Inst)->getAlignment()
640 : cast<LoadInst>(II.Inst)->getAlignment();
641 const bool IsCompoundRW =
644 (IsWrite ? cast<StoreInst>(II.Inst)->isVolatile()
645 : cast<LoadInst>(II.Inst)->isVolatile());
650 if (Alignment == 0 || Alignment >= 8 || (Alignment % (
TypeSize / 8)) == 0) {
652 OnAccessFunc = TsanCompoundRW[Idx];
654 OnAccessFunc = IsWrite ? TsanVolatileWrite[Idx] : TsanVolatileRead[Idx];
656 OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx];
659 OnAccessFunc = TsanUnalignedCompoundRW[Idx];
661 OnAccessFunc = IsWrite ? TsanUnalignedVolatileWrite[Idx]
662 : TsanUnalignedVolatileRead[Idx];
664 OnAccessFunc = IsWrite ? TsanUnalignedWrite[Idx] : TsanUnalignedRead[Idx];
666 IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(
Addr, IRB.getInt8PtrTy()));
667 if (IsCompoundRW || IsWrite)
668 NumInstrumentedWrites++;
669 if (IsCompoundRW || !IsWrite)
670 NumInstrumentedReads++;
699 bool ThreadSanitizer::instrumentMemIntrinsic(
Instruction *
I) {
704 {IRB.CreatePointerCast(
M->getArgOperand(0), IRB.getInt8PtrTy()),
705 IRB.CreateIntCast(
M->getArgOperand(1), IRB.getInt32Ty(),
false),
706 IRB.CreateIntCast(
M->getArgOperand(2), IntptrTy,
false)});
707 I->eraseFromParent();
710 isa<MemCpyInst>(M) ? MemcpyFn : MemmoveFn,
711 {IRB.CreatePointerCast(
M->getArgOperand(0), IRB.getInt8PtrTy()),
712 IRB.CreatePointerCast(
M->getArgOperand(1), IRB.getInt8PtrTy()),
713 IRB.CreateIntCast(
M->getArgOperand(2), IntptrTy,
false)});
714 I->eraseFromParent();
729 if (
LoadInst *LI = dyn_cast<LoadInst>(
I)) {
731 Type *OrigTy = LI->getType();
732 int Idx = getMemoryAccessFuncIndex(OrigTy,
Addr,
DL);
735 const unsigned ByteSize = 1U << Idx;
736 const unsigned BitSize = ByteSize * 8;
741 Value *
C = IRB.CreateCall(TsanAtomicLoad[Idx],
Args);
742 Value *Cast = IRB.CreateBitOrPointerCast(
C, OrigTy);
743 I->replaceAllUsesWith(Cast);
747 getMemoryAccessFuncIndex(
SI->getValueOperand()->getType(),
Addr,
DL);
750 const unsigned ByteSize = 1U << Idx;
751 const unsigned BitSize = ByteSize * 8;
755 IRB.CreateBitOrPointerCast(
SI->getValueOperand(), Ty),
762 getMemoryAccessFuncIndex(RMWI->getValOperand()->getType(),
Addr,
DL);
768 const unsigned ByteSize = 1U << Idx;
769 const unsigned BitSize = ByteSize * 8;
773 IRB.CreateIntCast(RMWI->getValOperand(), Ty,
false),
779 Type *OrigOldValTy = CASI->getNewValOperand()->getType();
780 int Idx = getMemoryAccessFuncIndex(OrigOldValTy,
Addr,
DL);
783 const unsigned ByteSize = 1U << Idx;
784 const unsigned BitSize = ByteSize * 8;
788 IRB.CreateBitOrPointerCast(CASI->getCompareOperand(), Ty);
790 IRB.CreateBitOrPointerCast(CASI->getNewValOperand(), Ty);
799 if (Ty != OrigOldValTy) {
801 OldVal = IRB.CreateIntToPtr(
C, OrigOldValTy);
806 Res = IRB.CreateInsertValue(Res,
Success, 1);
808 I->replaceAllUsesWith(Res);
809 I->eraseFromParent();
810 }
else if (
FenceInst *FI = dyn_cast<FenceInst>(
I)) {
813 ? TsanAtomicSignalFence
814 : TsanAtomicThreadFence;
821 int ThreadSanitizer::getMemoryAccessFuncIndex(
Type *OrigTy,
Value *
Addr,
825 cast<PointerType>(
Addr->getType())->isOpaqueOrPointeeTypeMatches(OrigTy));
829 NumAccessesWithBadSize++;