54 #define DEBUG_TYPE "tsan"
57 "tsan-instrument-memory-accesses",
cl::init(
true),
61 cl::desc(
"Instrument function entry and exit"),
64 "tsan-handle-cxx-exceptions",
cl::init(
true),
65 cl::desc(
"Handle C++ exceptions (insert cleanup blocks for unwinding)"),
72 "tsan-instrument-memintrinsics",
cl::init(
true),
75 "tsan-distinguish-volatile",
cl::init(
false),
76 cl::desc(
"Emit special instrumentation for accesses to volatiles"),
79 "tsan-instrument-read-before-write",
cl::init(
false),
80 cl::desc(
"Do not eliminate read instrumentation for read-before-writes"),
83 "tsan-compound-read-before-write",
cl::init(
false),
84 cl::desc(
"Emit special compound instrumentation for reads-before-writes"),
87 STATISTIC(NumInstrumentedReads,
"Number of instrumented reads");
88 STATISTIC(NumInstrumentedWrites,
"Number of instrumented writes");
90 "Number of reads ignored due to following writes");
91 STATISTIC(NumAccessesWithBadSize,
"Number of accesses with bad size");
92 STATISTIC(NumInstrumentedVtableWrites,
"Number of vtable ptr writes");
93 STATISTIC(NumInstrumentedVtableReads,
"Number of vtable ptr reads");
94 STATISTIC(NumOmittedReadsFromConstantGlobals,
95 "Number of reads from constant globals");
96 STATISTIC(NumOmittedReadsFromVtable,
"Number of vtable reads");
97 STATISTIC(NumOmittedNonCaptured,
"Number of accesses ignored due to capturing");
110 struct ThreadSanitizer {
115 <<
"warning: Option -tsan-compound-read-before-write has no effect "
116 "when -tsan-instrument-read-before-write is set.\n";
125 struct InstructionInfo {
128 static constexpr
unsigned kCompoundRW = (1U << 0);
130 explicit InstructionInfo(
Instruction *Inst) : Inst(Inst) {}
137 bool instrumentLoadOrStore(
const InstructionInfo &II,
const DataLayout &
DL);
143 bool addrPointsToConstantData(
Value *
Addr);
183 bool doInitialization(
Module &
M)
override;
189 void insertModuleCtor(
Module &
M) {
202 ThreadSanitizer TSan;
216 "ThreadSanitizer: detects data races.",
false,
false)
221 StringRef ThreadSanitizerLegacyPass::getPassName()
const {
222 return "ThreadSanitizerLegacyPass";
225 void ThreadSanitizerLegacyPass::getAnalysisUsage(
AnalysisUsage &AU)
const {
229 bool ThreadSanitizerLegacyPass::doInitialization(
Module &M) {
236 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
F);
237 TSan->sanitizeFunction(
F, TLI);
242 return new ThreadSanitizerLegacyPass();
247 IntptrTy =
DL.getIntPtrType(
M.getContext());
252 Attribute::NoUnwind);
254 TsanFuncEntry =
M.getOrInsertFunction(
"__tsan_func_entry", Attr,
255 IRB.getVoidTy(), IRB.getInt8PtrTy());
257 M.getOrInsertFunction(
"__tsan_func_exit", Attr, IRB.getVoidTy());
258 TsanIgnoreBegin =
M.getOrInsertFunction(
"__tsan_ignore_thread_begin", Attr,
261 M.getOrInsertFunction(
"__tsan_ignore_thread_end", Attr, IRB.getVoidTy());
264 const unsigned ByteSize = 1U <<
i;
265 const unsigned BitSize = ByteSize * 8;
266 std::string ByteSizeStr = utostr(ByteSize);
267 std::string BitSizeStr = utostr(BitSize);
269 TsanRead[
i] =
M.getOrInsertFunction(ReadName, Attr, IRB.getVoidTy(),
273 TsanWrite[
i] =
M.getOrInsertFunction(WriteName, Attr, IRB.getVoidTy(),
276 SmallString<64> UnalignedReadName(
"__tsan_unaligned_read" + ByteSizeStr);
277 TsanUnalignedRead[
i] =
M.getOrInsertFunction(
278 UnalignedReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
280 SmallString<64> UnalignedWriteName(
"__tsan_unaligned_write" + ByteSizeStr);
281 TsanUnalignedWrite[
i] =
M.getOrInsertFunction(
282 UnalignedWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
284 SmallString<64> VolatileReadName(
"__tsan_volatile_read" + ByteSizeStr);
285 TsanVolatileRead[
i] =
M.getOrInsertFunction(
286 VolatileReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
288 SmallString<64> VolatileWriteName(
"__tsan_volatile_write" + ByteSizeStr);
289 TsanVolatileWrite[
i] =
M.getOrInsertFunction(
290 VolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
292 SmallString<64> UnalignedVolatileReadName(
"__tsan_unaligned_volatile_read" +
294 TsanUnalignedVolatileRead[
i] =
M.getOrInsertFunction(
295 UnalignedVolatileReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
298 "__tsan_unaligned_volatile_write" + ByteSizeStr);
299 TsanUnalignedVolatileWrite[
i] =
M.getOrInsertFunction(
300 UnalignedVolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
303 TsanCompoundRW[
i] =
M.getOrInsertFunction(
304 CompoundRWName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
306 SmallString<64> UnalignedCompoundRWName(
"__tsan_unaligned_read_write" +
308 TsanUnalignedCompoundRW[
i] =
M.getOrInsertFunction(
309 UnalignedCompoundRWName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
313 SmallString<32> AtomicLoadName(
"__tsan_atomic" + BitSizeStr +
"_load");
315 M.getOrInsertFunction(AtomicLoadName, Attr, Ty, PtrTy, OrdTy);
317 SmallString<32> AtomicStoreName(
"__tsan_atomic" + BitSizeStr +
"_store");
318 TsanAtomicStore[
i] =
M.getOrInsertFunction(
319 AtomicStoreName, Attr, IRB.getVoidTy(), PtrTy, Ty, OrdTy);
323 TsanAtomicRMW[
Op][
i] =
nullptr;
324 const char *NamePart =
nullptr;
326 NamePart =
"_exchange";
328 NamePart =
"_fetch_add";
330 NamePart =
"_fetch_sub";
332 NamePart =
"_fetch_and";
334 NamePart =
"_fetch_or";
336 NamePart =
"_fetch_xor";
338 NamePart =
"_fetch_nand";
342 TsanAtomicRMW[
Op][
i] =
343 M.getOrInsertFunction(RMWName, Attr, Ty, PtrTy, Ty, OrdTy);
347 "_compare_exchange_val");
348 TsanAtomicCAS[
i] =
M.getOrInsertFunction(AtomicCASName, Attr, Ty, PtrTy, Ty,
352 M.getOrInsertFunction(
"__tsan_vptr_update", Attr, IRB.getVoidTy(),
353 IRB.getInt8PtrTy(), IRB.getInt8PtrTy());
354 TsanVptrLoad =
M.getOrInsertFunction(
"__tsan_vptr_read", Attr,
355 IRB.getVoidTy(), IRB.getInt8PtrTy());
356 TsanAtomicThreadFence =
M.getOrInsertFunction(
"__tsan_atomic_thread_fence",
357 Attr, IRB.getVoidTy(), OrdTy);
358 TsanAtomicSignalFence =
M.getOrInsertFunction(
"__tsan_atomic_signal_fence",
359 Attr, IRB.getVoidTy(), OrdTy);
362 M.getOrInsertFunction(
"memmove", Attr, IRB.getInt8PtrTy(),
363 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
365 M.getOrInsertFunction(
"memcpy", Attr, IRB.getInt8PtrTy(),
366 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
368 M.getOrInsertFunction(
"memset", Attr, IRB.getInt8PtrTy(),
369 IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy);
373 if (
MDNode *Tag =
I->getMetadata(LLVMContext::MD_tbaa))
374 return Tag->isTBAAVtableAccess();
382 Addr =
Addr->stripInBoundsOffsets();
385 if (GV->hasSection()) {
395 if (GV->getName().startswith(
"__llvm_gcov") ||
396 GV->getName().startswith(
"__llvm_gcda"))
403 Type *PtrTy = cast<PointerType>(
Addr->getType()->getScalarType());
411 bool ThreadSanitizer::addrPointsToConstantData(
Value *
Addr) {
414 Addr =
GEP->getPointerOperand();
417 if (GV->isConstant()) {
419 NumOmittedReadsFromConstantGlobals++;
425 NumOmittedReadsFromVtable++;
444 void ThreadSanitizer::chooseInstructionsToInstrument(
450 const bool IsWrite = isa<StoreInst>(*
I);
451 Value *
Addr = IsWrite ? cast<StoreInst>(
I)->getPointerOperand()
452 : cast<LoadInst>(
I)->getPointerOperand();
458 const auto WriteEntry = WriteTargets.
find(
Addr);
460 auto &WI =
All[WriteEntry->second];
463 const bool AnyVolatile =
465 cast<StoreInst>(WI.Inst)->isVolatile());
469 WI.Flags |= InstructionInfo::kCompoundRW;
470 NumOmittedReadsBeforeWrite++;
475 if (addrPointsToConstantData(
Addr)) {
486 NumOmittedNonCaptured++;
495 WriteTargets[
Addr] =
All.size() - 1;
503 if (
LoadInst *LI = dyn_cast<LoadInst>(
I))
507 if (isa<AtomicRMWInst>(
I))
509 if (isa<AtomicCmpXchgInst>(
I))
511 if (isa<FenceInst>(
I))
516 void ThreadSanitizer::InsertRuntimeIgnores(
Function &
F) {
518 IRB.CreateCall(TsanIgnoreBegin);
521 AtExit->CreateCall(TsanIgnoreEnd);
525 bool ThreadSanitizer::sanitizeFunction(
Function &
F,
534 if (
F.hasFnAttribute(Attribute::Naked))
543 bool SanitizeFunction =
F.hasFnAttribute(Attribute::SanitizeThread);
548 for (
auto &Inst :
BB) {
550 AtomicAccesses.push_back(&Inst);
551 else if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst))
552 LocalLoadsAndStores.push_back(&Inst);
553 else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {
554 if (
CallInst *CI = dyn_cast<CallInst>(&Inst))
556 if (isa<MemIntrinsic>(Inst))
557 MemIntrinCalls.push_back(&Inst);
559 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores,
563 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores,
DL);
572 for (
const auto &II : AllLoadsAndStores) {
573 Res |= instrumentLoadOrStore(II,
DL);
579 for (
auto Inst : AtomicAccesses) {
580 Res |= instrumentAtomic(Inst,
DL);
584 for (
auto Inst : MemIntrinCalls) {
585 Res |= instrumentMemIntrinsic(Inst);
588 if (
F.hasFnAttribute(
"sanitize_thread_no_checking_at_run_time")) {
589 assert(!
F.hasFnAttribute(Attribute::SanitizeThread));
591 InsertRuntimeIgnores(
F);
597 Value *ReturnAddress = IRB.CreateCall(
600 IRB.CreateCall(TsanFuncEntry, ReturnAddress);
604 AtExit->CreateCall(TsanFuncExit, {});
611 bool ThreadSanitizer::instrumentLoadOrStore(
const InstructionInfo &II,
614 const bool IsWrite = isa<StoreInst>(*II.Inst);
615 Value *
Addr = IsWrite ? cast<StoreInst>(II.Inst)->getPointerOperand()
616 : cast<LoadInst>(II.Inst)->getPointerOperand();
621 if (
Addr->isSwiftError())
624 int Idx = getMemoryAccessFuncIndex(
Addr,
DL);
629 Value *StoredValue = cast<StoreInst>(II.Inst)->getValueOperand();
633 if (isa<VectorType>(StoredValue->
getType()))
634 StoredValue = IRB.CreateExtractElement(
637 StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy());
639 IRB.CreateCall(TsanVptrUpdate,
640 {IRB.CreatePointerCast(
Addr, IRB.getInt8PtrTy()),
641 IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy())});
642 NumInstrumentedVtableWrites++;
646 IRB.CreateCall(TsanVptrLoad,
647 IRB.CreatePointerCast(
Addr, IRB.getInt8PtrTy()));
648 NumInstrumentedVtableReads++;
652 const unsigned Alignment = IsWrite ? cast<StoreInst>(II.Inst)->getAlignment()
653 : cast<LoadInst>(II.Inst)->getAlignment();
654 const bool IsCompoundRW =
657 (IsWrite ? cast<StoreInst>(II.Inst)->isVolatile()
658 : cast<LoadInst>(II.Inst)->isVolatile());
661 Type *OrigTy = cast<PointerType>(
Addr->getType())->getElementType();
664 if (Alignment == 0 || Alignment >= 8 || (Alignment % (
TypeSize / 8)) == 0) {
666 OnAccessFunc = TsanCompoundRW[Idx];
668 OnAccessFunc = IsWrite ? TsanVolatileWrite[Idx] : TsanVolatileRead[Idx];
670 OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx];
673 OnAccessFunc = TsanUnalignedCompoundRW[Idx];
675 OnAccessFunc = IsWrite ? TsanUnalignedVolatileWrite[Idx]
676 : TsanUnalignedVolatileRead[Idx];
678 OnAccessFunc = IsWrite ? TsanUnalignedWrite[Idx] : TsanUnalignedRead[Idx];
680 IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(
Addr, IRB.getInt8PtrTy()));
681 if (IsCompoundRW || IsWrite)
682 NumInstrumentedWrites++;
683 if (IsCompoundRW || !IsWrite)
684 NumInstrumentedReads++;
713 bool ThreadSanitizer::instrumentMemIntrinsic(
Instruction *
I) {
718 {IRB.CreatePointerCast(
M->getArgOperand(0), IRB.getInt8PtrTy()),
719 IRB.CreateIntCast(
M->getArgOperand(1), IRB.getInt32Ty(),
false),
720 IRB.CreateIntCast(
M->getArgOperand(2), IntptrTy,
false)});
721 I->eraseFromParent();
724 isa<MemCpyInst>(M) ? MemcpyFn : MemmoveFn,
725 {IRB.CreatePointerCast(
M->getArgOperand(0), IRB.getInt8PtrTy()),
726 IRB.CreatePointerCast(
M->getArgOperand(1), IRB.getInt8PtrTy()),
727 IRB.CreateIntCast(
M->getArgOperand(2), IntptrTy,
false)});
728 I->eraseFromParent();
743 if (
LoadInst *LI = dyn_cast<LoadInst>(
I)) {
745 int Idx = getMemoryAccessFuncIndex(
Addr,
DL);
748 const unsigned ByteSize = 1U << Idx;
749 const unsigned BitSize = ByteSize * 8;
754 Type *OrigTy = cast<PointerType>(
Addr->getType())->getElementType();
755 Value *
C = IRB.CreateCall(TsanAtomicLoad[Idx],
Args);
756 Value *Cast = IRB.CreateBitOrPointerCast(
C, OrigTy);
757 I->replaceAllUsesWith(Cast);
760 int Idx = getMemoryAccessFuncIndex(
Addr,
DL);
763 const unsigned ByteSize = 1U << Idx;
764 const unsigned BitSize = ByteSize * 8;
768 IRB.CreateBitOrPointerCast(
SI->getValueOperand(), Ty),
774 int Idx = getMemoryAccessFuncIndex(
Addr,
DL);
780 const unsigned ByteSize = 1U << Idx;
781 const unsigned BitSize = ByteSize * 8;
785 IRB.CreateIntCast(RMWI->getValOperand(), Ty,
false),
791 int Idx = getMemoryAccessFuncIndex(
Addr,
DL);
794 const unsigned ByteSize = 1U << Idx;
795 const unsigned BitSize = ByteSize * 8;
799 IRB.CreateBitOrPointerCast(CASI->getCompareOperand(), Ty);
801 IRB.CreateBitOrPointerCast(CASI->getNewValOperand(), Ty);
810 Type *OrigOldValTy = CASI->getNewValOperand()->getType();
811 if (Ty != OrigOldValTy) {
813 OldVal = IRB.CreateIntToPtr(
C, OrigOldValTy);
818 Res = IRB.CreateInsertValue(Res,
Success, 1);
820 I->replaceAllUsesWith(Res);
821 I->eraseFromParent();
822 }
else if (
FenceInst *FI = dyn_cast<FenceInst>(
I)) {
825 ? TsanAtomicSignalFence
826 : TsanAtomicThreadFence;
833 int ThreadSanitizer::getMemoryAccessFuncIndex(
Value *
Addr,
836 Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
841 NumAccessesWithBadSize++;