LLVM  16.0.0git
AArch64StackTagging.cpp
Go to the documentation of this file.
1 //===- AArch64StackTagging.cpp - Stack tagging in IR --===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //===----------------------------------------------------------------------===//
9 
10 #include "AArch64.h"
11 #include "AArch64InstrInfo.h"
12 #include "AArch64Subtarget.h"
13 #include "AArch64TargetMachine.h"
14 #include "llvm/ADT/DenseMap.h"
16 #include "llvm/ADT/MapVector.h"
17 #include "llvm/ADT/None.h"
18 #include "llvm/ADT/Optional.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/CFG.h"
23 #include "llvm/Analysis/LoopInfo.h"
40 #include "llvm/IR/DebugLoc.h"
41 #include "llvm/IR/Dominators.h"
42 #include "llvm/IR/Function.h"
44 #include "llvm/IR/IRBuilder.h"
45 #include "llvm/IR/InstIterator.h"
46 #include "llvm/IR/Instruction.h"
47 #include "llvm/IR/Instructions.h"
48 #include "llvm/IR/IntrinsicInst.h"
49 #include "llvm/IR/IntrinsicsAArch64.h"
50 #include "llvm/IR/Metadata.h"
51 #include "llvm/IR/ValueHandle.h"
52 #include "llvm/InitializePasses.h"
53 #include "llvm/Pass.h"
54 #include "llvm/Support/Casting.h"
55 #include "llvm/Support/Debug.h"
59 #include <cassert>
60 #include <iterator>
61 #include <memory>
62 #include <utility>
63 
64 using namespace llvm;
65 
66 #define DEBUG_TYPE "aarch64-stack-tagging"
67 
69  "stack-tagging-merge-init", cl::Hidden, cl::init(true),
70  cl::desc("merge stack variable initializers with tagging when possible"));
71 
72 static cl::opt<bool>
73  ClUseStackSafety("stack-tagging-use-stack-safety", cl::Hidden,
74  cl::init(true),
75  cl::desc("Use Stack Safety analysis results"));
76 
77 static cl::opt<unsigned> ClScanLimit("stack-tagging-merge-init-scan-limit",
78  cl::init(40), cl::Hidden);
79 
80 static cl::opt<unsigned>
81  ClMergeInitSizeLimit("stack-tagging-merge-init-size-limit", cl::init(272),
82  cl::Hidden);
83 
85  "stack-tagging-max-lifetimes-for-alloca", cl::Hidden, cl::init(3),
87  cl::desc("How many lifetime ends to handle for a single alloca."),
88  cl::Optional);
89 
90 static const Align kTagGranuleSize = Align(16);
91 
92 namespace {
93 
94 class InitializerBuilder {
95  uint64_t Size;
96  const DataLayout *DL;
97  Value *BasePtr;
98  Function *SetTagFn;
99  Function *SetTagZeroFn;
100  Function *StgpFn;
101 
102  // List of initializers sorted by start offset.
103  struct Range {
104  uint64_t Start, End;
105  Instruction *Inst;
106  };
107  SmallVector<Range, 4> Ranges;
108  // 8-aligned offset => 8-byte initializer
109  // Missing keys are zero initialized.
110  std::map<uint64_t, Value *> Out;
111 
112 public:
113  InitializerBuilder(uint64_t Size, const DataLayout *DL, Value *BasePtr,
114  Function *SetTagFn, Function *SetTagZeroFn,
115  Function *StgpFn)
116  : Size(Size), DL(DL), BasePtr(BasePtr), SetTagFn(SetTagFn),
117  SetTagZeroFn(SetTagZeroFn), StgpFn(StgpFn) {}
118 
119  bool addRange(uint64_t Start, uint64_t End, Instruction *Inst) {
120  auto I =
121  llvm::lower_bound(Ranges, Start, [](const Range &LHS, uint64_t RHS) {
122  return LHS.End <= RHS;
123  });
124  if (I != Ranges.end() && End > I->Start) {
125  // Overlap - bail.
126  return false;
127  }
128  Ranges.insert(I, {Start, End, Inst});
129  return true;
130  }
131 
132  bool addStore(uint64_t Offset, StoreInst *SI, const DataLayout *DL) {
133  int64_t StoreSize = DL->getTypeStoreSize(SI->getOperand(0)->getType());
134  if (!addRange(Offset, Offset + StoreSize, SI))
135  return false;
136  IRBuilder<> IRB(SI);
137  applyStore(IRB, Offset, Offset + StoreSize, SI->getOperand(0));
138  return true;
139  }
140 
141  bool addMemSet(uint64_t Offset, MemSetInst *MSI) {
142  uint64_t StoreSize = cast<ConstantInt>(MSI->getLength())->getZExtValue();
143  if (!addRange(Offset, Offset + StoreSize, MSI))
144  return false;
145  IRBuilder<> IRB(MSI);
146  applyMemSet(IRB, Offset, Offset + StoreSize,
147  cast<ConstantInt>(MSI->getValue()));
148  return true;
149  }
150 
151  void applyMemSet(IRBuilder<> &IRB, int64_t Start, int64_t End,
152  ConstantInt *V) {
153  // Out[] does not distinguish between zero and undef, and we already know
154  // that this memset does not overlap with any other initializer. Nothing to
155  // do for memset(0).
156  if (V->isZero())
157  return;
158  for (int64_t Offset = Start - Start % 8; Offset < End; Offset += 8) {
159  uint64_t Cst = 0x0101010101010101UL;
160  int LowBits = Offset < Start ? (Start - Offset) * 8 : 0;
161  if (LowBits)
162  Cst = (Cst >> LowBits) << LowBits;
163  int HighBits = End - Offset < 8 ? (8 - (End - Offset)) * 8 : 0;
164  if (HighBits)
165  Cst = (Cst << HighBits) >> HighBits;
166  ConstantInt *C =
167  ConstantInt::get(IRB.getInt64Ty(), Cst * V->getZExtValue());
168 
169  Value *&CurrentV = Out[Offset];
170  if (!CurrentV) {
171  CurrentV = C;
172  } else {
173  CurrentV = IRB.CreateOr(CurrentV, C);
174  }
175  }
176  }
177 
178  // Take a 64-bit slice of the value starting at the given offset (in bytes).
179  // Offset can be negative. Pad with zeroes on both sides when necessary.
180  Value *sliceValue(IRBuilder<> &IRB, Value *V, int64_t Offset) {
181  if (Offset > 0) {
182  V = IRB.CreateLShr(V, Offset * 8);
183  V = IRB.CreateZExtOrTrunc(V, IRB.getInt64Ty());
184  } else if (Offset < 0) {
185  V = IRB.CreateZExtOrTrunc(V, IRB.getInt64Ty());
186  V = IRB.CreateShl(V, -Offset * 8);
187  } else {
188  V = IRB.CreateZExtOrTrunc(V, IRB.getInt64Ty());
189  }
190  return V;
191  }
192 
193  void applyStore(IRBuilder<> &IRB, int64_t Start, int64_t End,
194  Value *StoredValue) {
195  StoredValue = flatten(IRB, StoredValue);
196  for (int64_t Offset = Start - Start % 8; Offset < End; Offset += 8) {
197  Value *V = sliceValue(IRB, StoredValue, Offset - Start);
198  Value *&CurrentV = Out[Offset];
199  if (!CurrentV) {
200  CurrentV = V;
201  } else {
202  CurrentV = IRB.CreateOr(CurrentV, V);
203  }
204  }
205  }
206 
207  void generate(IRBuilder<> &IRB) {
208  LLVM_DEBUG(dbgs() << "Combined initializer\n");
209  // No initializers => the entire allocation is undef.
210  if (Ranges.empty()) {
211  emitUndef(IRB, 0, Size);
212  return;
213  }
214 
215  // Look through 8-byte initializer list 16 bytes at a time;
216  // If one of the two 8-byte halfs is non-zero non-undef, emit STGP.
217  // Otherwise, emit zeroes up to next available item.
218  uint64_t LastOffset = 0;
219  for (uint64_t Offset = 0; Offset < Size; Offset += 16) {
220  auto I1 = Out.find(Offset);
221  auto I2 = Out.find(Offset + 8);
222  if (I1 == Out.end() && I2 == Out.end())
223  continue;
224 
225  if (Offset > LastOffset)
226  emitZeroes(IRB, LastOffset, Offset - LastOffset);
227 
228  Value *Store1 = I1 == Out.end() ? Constant::getNullValue(IRB.getInt64Ty())
229  : I1->second;
230  Value *Store2 = I2 == Out.end() ? Constant::getNullValue(IRB.getInt64Ty())
231  : I2->second;
232  emitPair(IRB, Offset, Store1, Store2);
233  LastOffset = Offset + 16;
234  }
235 
236  // memset(0) does not update Out[], therefore the tail can be either undef
237  // or zero.
238  if (LastOffset < Size)
239  emitZeroes(IRB, LastOffset, Size - LastOffset);
240 
241  for (const auto &R : Ranges) {
242  R.Inst->eraseFromParent();
243  }
244  }
245 
246  void emitZeroes(IRBuilder<> &IRB, uint64_t Offset, uint64_t Size) {
247  LLVM_DEBUG(dbgs() << " [" << Offset << ", " << Offset + Size
248  << ") zero\n");
249  Value *Ptr = BasePtr;
250  if (Offset)
251  Ptr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), Ptr, Offset);
252  IRB.CreateCall(SetTagZeroFn,
253  {Ptr, ConstantInt::get(IRB.getInt64Ty(), Size)});
254  }
255 
256  void emitUndef(IRBuilder<> &IRB, uint64_t Offset, uint64_t Size) {
257  LLVM_DEBUG(dbgs() << " [" << Offset << ", " << Offset + Size
258  << ") undef\n");
259  Value *Ptr = BasePtr;
260  if (Offset)
261  Ptr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), Ptr, Offset);
262  IRB.CreateCall(SetTagFn, {Ptr, ConstantInt::get(IRB.getInt64Ty(), Size)});
263  }
264 
265  void emitPair(IRBuilder<> &IRB, uint64_t Offset, Value *A, Value *B) {
266  LLVM_DEBUG(dbgs() << " [" << Offset << ", " << Offset + 16 << "):\n");
267  LLVM_DEBUG(dbgs() << " " << *A << "\n " << *B << "\n");
268  Value *Ptr = BasePtr;
269  if (Offset)
270  Ptr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), Ptr, Offset);
271  IRB.CreateCall(StgpFn, {Ptr, A, B});
272  }
273 
274  Value *flatten(IRBuilder<> &IRB, Value *V) {
275  if (V->getType()->isIntegerTy())
276  return V;
277  // vector of pointers -> vector of ints
278  if (VectorType *VecTy = dyn_cast<VectorType>(V->getType())) {
279  LLVMContext &Ctx = IRB.getContext();
280  Type *EltTy = VecTy->getElementType();
281  if (EltTy->isPointerTy()) {
282  uint32_t EltSize = DL->getTypeSizeInBits(EltTy);
283  auto *NewTy = FixedVectorType::get(
284  IntegerType::get(Ctx, EltSize),
285  cast<FixedVectorType>(VecTy)->getNumElements());
286  V = IRB.CreatePointerCast(V, NewTy);
287  }
288  }
289  return IRB.CreateBitOrPointerCast(
290  V, IRB.getIntNTy(DL->getTypeStoreSize(V->getType()) * 8));
291  }
292 };
293 
294 class AArch64StackTagging : public FunctionPass {
295  const bool MergeInit;
296  const bool UseStackSafety;
297 
298 public:
299  static char ID; // Pass ID, replacement for typeid
300 
301  AArch64StackTagging(bool IsOptNone = false)
302  : FunctionPass(ID),
303  MergeInit(ClMergeInit.getNumOccurrences() ? ClMergeInit : !IsOptNone),
304  UseStackSafety(ClUseStackSafety.getNumOccurrences() ? ClUseStackSafety
305  : !IsOptNone) {
307  }
308 
309  bool isInterestingAlloca(const AllocaInst &AI);
310 
311  void tagAlloca(AllocaInst *AI, Instruction *InsertBefore, Value *Ptr,
312  uint64_t Size);
313  void untagAlloca(AllocaInst *AI, Instruction *InsertBefore, uint64_t Size);
314 
315  Instruction *collectInitializers(Instruction *StartInst, Value *StartPtr,
316  uint64_t Size, InitializerBuilder &IB);
317 
318  Instruction *insertBaseTaggedPointer(
320  const DominatorTree *DT);
321  bool runOnFunction(Function &F) override;
322 
323  StringRef getPassName() const override { return "AArch64 Stack Tagging"; }
324 
325 private:
326  Function *F = nullptr;
327  Function *SetTagFunc = nullptr;
328  const DataLayout *DL = nullptr;
329  AAResults *AA = nullptr;
330  const StackSafetyGlobalInfo *SSI = nullptr;
331 
332  void getAnalysisUsage(AnalysisUsage &AU) const override {
333  AU.setPreservesCFG();
334  if (UseStackSafety)
336  if (MergeInit)
338  }
339 };
340 
341 } // end anonymous namespace
342 
343 char AArch64StackTagging::ID = 0;
344 
345 INITIALIZE_PASS_BEGIN(AArch64StackTagging, DEBUG_TYPE, "AArch64 Stack Tagging",
346  false, false)
349 INITIALIZE_PASS_END(AArch64StackTagging, DEBUG_TYPE, "AArch64 Stack Tagging",
351 
353  return new AArch64StackTagging(IsOptNone);
354 }
355 
356 Instruction *AArch64StackTagging::collectInitializers(Instruction *StartInst,
357  Value *StartPtr,
358  uint64_t Size,
359  InitializerBuilder &IB) {
360  MemoryLocation AllocaLoc{StartPtr, Size};
361  Instruction *LastInst = StartInst;
362  BasicBlock::iterator BI(StartInst);
363 
364  unsigned Count = 0;
365  for (; Count < ClScanLimit && !BI->isTerminator(); ++BI) {
366  if (!isa<DbgInfoIntrinsic>(*BI))
367  ++Count;
368 
369  if (isNoModRef(AA->getModRefInfo(&*BI, AllocaLoc)))
370  continue;
371 
372  if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
373  // If the instruction is readnone, ignore it, otherwise bail out. We
374  // don't even allow readonly here because we don't want something like:
375  // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
376  if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
377  break;
378  continue;
379  }
380 
381  if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
382  if (!NextStore->isSimple())
383  break;
384 
385  // Check to see if this store is to a constant offset from the start ptr.
387  isPointerOffset(StartPtr, NextStore->getPointerOperand(), *DL);
388  if (!Offset)
389  break;
390 
391  if (!IB.addStore(*Offset, NextStore, DL))
392  break;
393  LastInst = NextStore;
394  } else {
395  MemSetInst *MSI = cast<MemSetInst>(BI);
396 
397  if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength()))
398  break;
399 
400  if (!isa<ConstantInt>(MSI->getValue()))
401  break;
402 
403  // Check to see if this store is to a constant offset from the start ptr.
404  Optional<int64_t> Offset = isPointerOffset(StartPtr, MSI->getDest(), *DL);
405  if (!Offset)
406  break;
407 
408  if (!IB.addMemSet(*Offset, MSI))
409  break;
410  LastInst = MSI;
411  }
412  }
413  return LastInst;
414 }
415 
416 bool AArch64StackTagging::isInterestingAlloca(const AllocaInst &AI) {
417  // FIXME: support dynamic allocas
418  bool IsInteresting =
419  AI.getAllocatedType()->isSized() && AI.isStaticAlloca() &&
420  // alloca() may be called with 0 size, ignore it.
421  *AI.getAllocationSizeInBits(*DL) > 0 &&
422  // inalloca allocas are not treated as static, and we don't want
423  // dynamic alloca instrumentation for them as well.
424  !AI.isUsedWithInAlloca() &&
425  // swifterror allocas are register promoted by ISel
426  !AI.isSwiftError() &&
427  // safe allocas are not interesting
428  !(SSI && SSI->isSafe(AI));
429  return IsInteresting;
430 }
431 
432 void AArch64StackTagging::tagAlloca(AllocaInst *AI, Instruction *InsertBefore,
433  Value *Ptr, uint64_t Size) {
434  auto SetTagZeroFunc =
435  Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_settag_zero);
436  auto StgpFunc =
437  Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_stgp);
438 
439  InitializerBuilder IB(Size, DL, Ptr, SetTagFunc, SetTagZeroFunc, StgpFunc);
440  bool LittleEndian =
442  // Current implementation of initializer merging assumes little endianness.
443  if (MergeInit && !F->hasOptNone() && LittleEndian &&
444  Size < ClMergeInitSizeLimit) {
445  LLVM_DEBUG(dbgs() << "collecting initializers for " << *AI
446  << ", size = " << Size << "\n");
447  InsertBefore = collectInitializers(InsertBefore, Ptr, Size, IB);
448  }
449 
450  IRBuilder<> IRB(InsertBefore);
451  IB.generate(IRB);
452 }
453 
454 void AArch64StackTagging::untagAlloca(AllocaInst *AI, Instruction *InsertBefore,
455  uint64_t Size) {
456  IRBuilder<> IRB(InsertBefore);
457  IRB.CreateCall(SetTagFunc, {IRB.CreatePointerCast(AI, IRB.getInt8PtrTy()),
458  ConstantInt::get(IRB.getInt64Ty(), Size)});
459 }
460 
461 Instruction *AArch64StackTagging::insertBaseTaggedPointer(
462  const MapVector<AllocaInst *, memtag::AllocaInfo> &AllocasToInstrument,
463  const DominatorTree *DT) {
464  BasicBlock *PrologueBB = nullptr;
465  // Try sinking IRG as deep as possible to avoid hurting shrink wrap.
466  for (auto &I : AllocasToInstrument) {
467  const memtag::AllocaInfo &Info = I.second;
468  AllocaInst *AI = Info.AI;
469  if (!PrologueBB) {
470  PrologueBB = AI->getParent();
471  continue;
472  }
473  PrologueBB = DT->findNearestCommonDominator(PrologueBB, AI->getParent());
474  }
475  assert(PrologueBB);
476 
477  IRBuilder<> IRB(&PrologueBB->front());
478  Function *IRG_SP =
479  Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_irg_sp);
480  Instruction *Base =
481  IRB.CreateCall(IRG_SP, {Constant::getNullValue(IRB.getInt64Ty())});
482  Base->setName("basetag");
483  return Base;
484 }
485 
486 // FIXME: check for MTE extension
488  if (!Fn.hasFnAttribute(Attribute::SanitizeMemTag))
489  return false;
490 
491  if (UseStackSafety)
492  SSI = &getAnalysis<StackSafetyGlobalInfoWrapperPass>().getResult();
493  F = &Fn;
494  DL = &Fn.getParent()->getDataLayout();
495  if (MergeInit)
496  AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
497 
499  [this](const AllocaInst &AI) { return isInterestingAlloca(AI); });
500  for (Instruction &I : instructions(F))
501  SIB.visit(I);
502  memtag::StackInfo &SInfo = SIB.get();
503 
504  if (SInfo.AllocasToInstrument.empty())
505  return false;
506 
507  std::unique_ptr<DominatorTree> DeleteDT;
508  DominatorTree *DT = nullptr;
509  if (auto *P = getAnalysisIfAvailable<DominatorTreeWrapperPass>())
510  DT = &P->getDomTree();
511 
512  if (DT == nullptr) {
513  DeleteDT = std::make_unique<DominatorTree>(*F);
514  DT = DeleteDT.get();
515  }
516 
517  std::unique_ptr<PostDominatorTree> DeletePDT;
518  PostDominatorTree *PDT = nullptr;
519  if (auto *P = getAnalysisIfAvailable<PostDominatorTreeWrapperPass>())
520  PDT = &P->getPostDomTree();
521 
522  if (PDT == nullptr) {
523  DeletePDT = std::make_unique<PostDominatorTree>(*F);
524  PDT = DeletePDT.get();
525  }
526 
527  std::unique_ptr<LoopInfo> DeleteLI;
528  LoopInfo *LI = nullptr;
529  if (auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>()) {
530  LI = &LIWP->getLoopInfo();
531  } else {
532  DeleteLI = std::make_unique<LoopInfo>(*DT);
533  LI = DeleteLI.get();
534  }
535 
536  SetTagFunc =
537  Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_settag);
538 
539  Instruction *Base = insertBaseTaggedPointer(SInfo.AllocasToInstrument, DT);
540 
541  int NextTag = 0;
542  for (auto &I : SInfo.AllocasToInstrument) {
543  memtag::AllocaInfo &Info = I.second;
544  assert(Info.AI && isInterestingAlloca(*Info.AI));
545  TrackingVH<Instruction> OldAI = Info.AI;
547  AllocaInst *AI = Info.AI;
548  int Tag = NextTag;
549  NextTag = (NextTag + 1) % 16;
550  // Replace alloca with tagp(alloca).
551  IRBuilder<> IRB(Info.AI->getNextNode());
553  F->getParent(), Intrinsic::aarch64_tagp, {Info.AI->getType()});
554  Instruction *TagPCall =
555  IRB.CreateCall(TagP, {Constant::getNullValue(Info.AI->getType()), Base,
556  ConstantInt::get(IRB.getInt64Ty(), Tag)});
557  if (Info.AI->hasName())
558  TagPCall->setName(Info.AI->getName() + ".tag");
559  Info.AI->replaceAllUsesWith(TagPCall);
560  TagPCall->setOperand(0, Info.AI);
561 
562  // Calls to functions that may return twice (e.g. setjmp) confuse the
563  // postdominator analysis, and will leave us to keep memory tagged after
564  // function return. Work around this by always untagging at every return
565  // statement if return_twice functions are called.
566  bool StandardLifetime =
567  SInfo.UnrecognizedLifetimes.empty() &&
568  memtag::isStandardLifetime(Info.LifetimeStart, Info.LifetimeEnd, DT, LI,
569  ClMaxLifetimes) &&
570  !SInfo.CallsReturnTwice;
571  if (StandardLifetime) {
572  IntrinsicInst *Start = Info.LifetimeStart[0];
573  uint64_t Size =
574  cast<ConstantInt>(Start->getArgOperand(0))->getZExtValue();
575  Size = alignTo(Size, kTagGranuleSize);
576  tagAlloca(AI, Start->getNextNode(), Start->getArgOperand(1), Size);
577 
578  auto TagEnd = [&](Instruction *Node) { untagAlloca(AI, Node, Size); };
579  if (!DT || !PDT ||
580  !memtag::forAllReachableExits(*DT, *PDT, *LI, Start, Info.LifetimeEnd,
581  SInfo.RetVec, TagEnd)) {
582  for (auto *End : Info.LifetimeEnd)
583  End->eraseFromParent();
584  }
585  } else {
586  uint64_t Size = *Info.AI->getAllocationSizeInBits(*DL) / 8;
587  Value *Ptr = IRB.CreatePointerCast(TagPCall, IRB.getInt8PtrTy());
588  tagAlloca(AI, &*IRB.GetInsertPoint(), Ptr, Size);
589  for (auto *RI : SInfo.RetVec) {
590  untagAlloca(AI, RI, Size);
591  }
592  // We may have inserted tag/untag outside of any lifetime interval.
593  // Remove all lifetime intrinsics for this alloca.
594  for (auto *II : Info.LifetimeStart)
595  II->eraseFromParent();
596  for (auto *II : Info.LifetimeEnd)
597  II->eraseFromParent();
598  }
599 
600  // Fixup debug intrinsics to point to the new alloca.
601  for (auto *DVI : Info.DbgVariableIntrinsics)
602  DVI->replaceVariableLocationOp(OldAI, Info.AI);
603  }
604 
605  // If we have instrumented at least one alloca, all unrecognized lifetime
606  // intrinsics have to go.
607  for (auto *I : SInfo.UnrecognizedLifetimes)
608  I->eraseFromParent();
609 
610  return true;
611 }
llvm::Check::Size
@ Size
Definition: FileCheck.h:77
llvm::alignTo
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:156
StackSafetyAnalysis.h
llvm::Type::isSized
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:269
MachineInstr.h
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
llvm::DominatorTreeBase::findNearestCommonDominator
NodeT * findNearestCommonDominator(NodeT *A, NodeT *B) const
Find nearest common dominator basic block for basic block A and B.
Definition: GenericDomTree.h:468
llvm::initializeAArch64StackTaggingPass
void initializeAArch64StackTaggingPass(PassRegistry &)
llvm::Instruction::getModule
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:65
AArch64.h
llvm::IRBuilderBase::getInt64Ty
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Definition: IRBuilder.h:514
Optional.h
llvm::memtag::StackInfo
Definition: MemoryTaggingSupport.h:56
llvm::StackSafetyGlobalInfo::isSafe
bool isSafe(const AllocaInst &AI) const
Definition: StackSafetyAnalysis.cpp:966
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:113
llvm::dxil::ParameterKind::I1
@ I1
llvm::Intrinsic::getDeclaration
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1421
Metadata.h
llvm::BasicBlock::iterator
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:87
IntrinsicInst.h
llvm::Type::isPointerTy
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:223
InstIterator.h
DEBUG_TYPE
#define DEBUG_TYPE
Definition: AArch64StackTagging.cpp:66
flatten
loop flatten
Definition: LoopFlatten.cpp:967
llvm::Function
Definition: Function.h:60
llvm::lower_bound
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1727
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
Pass.h
GetElementPtrTypeIterator.h
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1181
Statistic.h
llvm::memtag::StackInfo::CallsReturnTwice
bool CallsReturnTwice
Definition: MemoryTaggingSupport.h:60
llvm::IRBuilder<>
MapVector.h
ValueTracking.h
Local.h
llvm::IRBuilderBase::CreateOr
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1402
llvm::DominatorTree
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:166
llvm::Triple
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
MachineBasicBlock.h
llvm::memtag::forAllReachableExits
bool forAllReachableExits(const DominatorTree &DT, const PostDominatorTree &PDT, const LoopInfo &LI, const Instruction *Start, const SmallVectorImpl< IntrinsicInst * > &Ends, const SmallVectorImpl< Instruction * > &RetVec, llvm::function_ref< void(Instruction *)> Callback)
Definition: MemoryTaggingSupport.cpp:42
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:139
ClUseStackSafety
static cl::opt< bool > ClUseStackSafety("stack-tagging-use-stack-safety", cl::Hidden, cl::init(true), cl::desc("Use Stack Safety analysis results"))
ScalarEvolution.h
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
DenseMap.h
ClMergeInit
static cl::opt< bool > ClMergeInit("stack-tagging-merge-init", cl::Hidden, cl::init(true), cl::desc("merge stack variable initializers with tagging when possible"))
llvm::Optional< int64_t >
llvm::MapVector
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:37
ClScanLimit
static cl::opt< unsigned > ClScanLimit("stack-tagging-merge-init-scan-limit", cl::init(40), cl::Hidden)
RHS
Value * RHS
Definition: X86PartialReduction.cpp:76
llvm::dwarf::Tag
Tag
Definition: Dwarf.h:105
llvm::cl::ReallyHidden
@ ReallyHidden
Definition: CommandLine.h:140
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
DepthFirstIterator.h
F
#define F(x, y, z)
Definition: MD5.cpp:55
llvm::RISCVFenceField::R
@ R
Definition: RISCVBaseInfo.h:265
llvm::MemSetBase::getValue
Value * getValue() const
Definition: IntrinsicInst.h:853
MachineRegisterInfo.h
llvm::BasicBlock
LLVM Basic Block Representation.
Definition: BasicBlock.h:55
AliasAnalysis.h
llvm::memtag::StackInfo::RetVec
SmallVector< Instruction *, 8 > RetVec
Definition: MemoryTaggingSupport.h:59
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::isPointerOffset
Optional< int64_t > isPointerOffset(const Value *Ptr1, const Value *Ptr2, const DataLayout &DL)
If Ptr1 is provably equal to Ptr2 plus a constant offset, return that offset.
Definition: ValueTracking.cpp:7349
Instruction.h
llvm::Triple::isLittleEndian
bool isLittleEndian() const
Tests whether the target triple is little endian.
Definition: Triple.cpp:1720
LHS
Value * LHS
Definition: X86PartialReduction.cpp:75
llvm::ConstantInt
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
llvm::AllocaInst::isStaticAlloca
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Definition: Instructions.cpp:1417
ClMaxLifetimes
static cl::opt< size_t > ClMaxLifetimes("stack-tagging-max-lifetimes-for-alloca", cl::Hidden, cl::init(3), cl::ReallyHidden, cl::desc("How many lifetime ends to handle for a single alloca."), cl::Optional)
MachineLoopInfo.h
AArch64TargetMachine.h
AArch64InstrInfo.h
llvm::PassRegistry::getPassRegistry
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Definition: PassRegistry.cpp:24
llvm::AAResults
Definition: AliasAnalysis.h:518
PostDominators.h
llvm::AllocaInst::getAllocatedType
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:115
llvm::memtag::StackInfoBuilder
Definition: MemoryTaggingSupport.h:63
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::ARM_PROC::A
@ A
Definition: ARMBaseInfo.h:34
llvm::IRBuilderBase::getIntNTy
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
Definition: IRBuilder.h:522
llvm::AnalysisUsage
Represent the analysis usage information of a pass.
Definition: PassAnalysisSupport.h:47
false
Definition: StackSlotColoring.cpp:141
MemoryTaggingSupport.h
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
kTagGranuleSize
static const Align kTagGranuleSize
Definition: AArch64StackTagging.cpp:90
llvm::Instruction
Definition: Instruction.h:42
llvm::IRBuilderBase::getInt8Ty
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition: IRBuilder.h:499
llvm::IRBuilderBase::getContext
LLVMContext & getContext() const
Definition: IRBuilder.h:175
llvm::Value::setName
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:375
llvm::codeview::EncodedFramePtrReg::BasePtr
@ BasePtr
llvm::ConstantInt::get
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:879
DebugLoc.h
Info
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
Align
uint64_t Align
Definition: ELFObjHandler.cpp:81
llvm::FixedVectorType::get
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition: Type.cpp:684
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
llvm::AllocaInst::getAllocationSizeInBits
Optional< TypeSize > getAllocationSizeInBits(const DataLayout &DL) const
Get allocation size in bits.
Definition: Instructions.cpp:58
llvm::CallingConv::ID
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
llvm::MemSetInst
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
Definition: IntrinsicInst.h:993
llvm::IRBuilderBase::CreatePointerCast
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2030
INITIALIZE_PASS_BEGIN
INITIALIZE_PASS_BEGIN(AArch64StackTagging, DEBUG_TYPE, "AArch64 Stack Tagging", false, false) INITIALIZE_PASS_END(AArch64StackTagging
INITIALIZE_PASS_END
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:58
LoopInfo.h
llvm::AllocaInst::isSwiftError
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:147
llvm::Type::isIntegerTy
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:196
llvm::VectorType
Base class of all SIMD vector types.
Definition: DerivedTypes.h:389
llvm::Function::hasFnAttribute
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:628
llvm::cl::opt< bool >
ClMergeInitSizeLimit
static cl::opt< unsigned > ClMergeInitSizeLimit("stack-tagging-merge-init-size-limit", cl::init(272), cl::Hidden)
llvm::instructions
inst_range instructions(Function *F)
Definition: InstIterator.h:133
llvm::StoreInst
An instruction for storing to memory.
Definition: Instructions.h:298
llvm::IRBuilderBase::GetInsertPoint
BasicBlock::iterator GetInsertPoint() const
Definition: IRBuilder.h:174
llvm::AMDGPU::Hwreg::Offset
Offset
Definition: SIDefines.h:416
uint64_t
llvm::GlobalValue::getParent
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:644
llvm::memtag::StackInfo::UnrecognizedLifetimes
SmallVector< Instruction *, 4 > UnrecognizedLifetimes
Definition: MemoryTaggingSupport.h:58
INITIALIZE_PASS_DEPENDENCY
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
llvm::LLVMContext
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::IRBuilderBase::getInt8PtrTy
PointerType * getInt8PtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer to an 8-bit integer value.
Definition: IRBuilder.h:557
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:439
TargetPassConfig.h
MachineFunctionPass.h
llvm::TrackingVH
Value handle that tracks a Value across RAUW.
Definition: ValueHandle.h:331
IRBuilder.h
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI
StandardInstrumentations SI(Debug, VerifyEach)
Ptr
@ Ptr
Definition: TargetLibraryInfo.cpp:60
llvm::memtag::AllocaInfo
Definition: MemoryTaggingSupport.h:49
llvm::User::setOperand
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
addRange
static void addRange(SmallVectorImpl< ConstantInt * > &EndPoints, ConstantInt *Low, ConstantInt *High)
Definition: Metadata.cpp:1103
CFG.h
llvm::MemIntrinsicBase::getDest
Value * getDest() const
This is just like getRawDest, but it strips off any cast instructions (including addrspacecast) that ...
Definition: IntrinsicInst.h:744
llvm::LoopInfo
Definition: LoopInfo.h:1105
None.h
llvm::AnalysisUsage::setPreservesCFG
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:263
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
llvm::PostDominatorTree
PostDominatorTree Class - Concrete subclass of DominatorTree that is used to compute the post-dominat...
Definition: PostDominators.h:28
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
llvm::ConstantInt::isZero
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:194
uint32_t
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
LiveRegUnits.h
ValueHandle.h
llvm::memtag::StackInfoBuilder::visit
void visit(Instruction &Inst)
Definition: MemoryTaggingSupport.cpp:110
llvm::IRBuilderBase::CreateConstGEP1_32
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
Definition: IRBuilder.h:1772
llvm::cl::Optional
@ Optional
Definition: CommandLine.h:115
llvm::ConstantInt::getZExtValue
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:142
llvm::BasicBlock::front
const Instruction & front() const
Definition: BasicBlock.h:318
llvm::memtag::isStandardLifetime
bool isStandardLifetime(const SmallVectorImpl< IntrinsicInst * > &LifetimeStart, const SmallVectorImpl< IntrinsicInst * > &LifetimeEnd, const DominatorTree *DT, const LoopInfo *LI, size_t MaxLifetimes)
Definition: MemoryTaggingSupport.cpp:85
runOnFunction
static bool runOnFunction(Function &F, bool PostInlining)
Definition: EntryExitInstrumenter.cpp:69
llvm::IRBuilderBase::CreateBitOrPointerCast
Value * CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2062
llvm::Constant::getNullValue
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:350
llvm::MemIntrinsicBase::getLength
Value * getLength() const
Definition: IntrinsicInst.h:735
llvm::memtag::alignAndPadAlloca
void alignAndPadAlloca(memtag::AllocaInfo &Info, llvm::Align Align)
Definition: MemoryTaggingSupport.cpp:160
Casting.h
Function.h
llvm::memtag::StackInfo::AllocasToInstrument
MapVector< AllocaInst *, AllocaInfo > AllocasToInstrument
Definition: MemoryTaggingSupport.h:57
llvm::IntrinsicInst
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:46
llvm::StackSafetyGlobalInfoWrapperPass
This pass performs the global (interprocedural) stack safety analysis (legacy pass manager).
Definition: StackSafetyAnalysis.h:150
AA
ScalarEvolutionExpressions.h
Instructions.h
AArch64Subtarget.h
llvm::AllocaInst::isUsedWithInAlloca
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:137
llvm::MemIntrinsic::isVolatile
bool isVolatile() const
Definition: IntrinsicInst.h:970
SmallVector.h
MachineInstrBuilder.h
llvm::createAArch64StackTaggingPass
FunctionPass * createAArch64StackTaggingPass(bool IsOptNone)
Definition: AArch64StackTagging.cpp:352
llvm::IRBuilderBase::CreateLShr
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1342
Dominators.h
llvm::Module::getTargetTriple
const std::string & getTargetTriple() const
Get the target triple which is a string describing the target host.
Definition: Module.h:258
llvm::IRBuilderBase::CreateShl
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1321
llvm::AAResultsWrapperPass
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Definition: AliasAnalysis.h:1308
llvm::Instruction::getParent
const BasicBlock * getParent() const
Definition: Instruction.h:91
generate
We currently generate
Definition: README.txt:597
Tagging
AArch64 Stack Tagging
Definition: AArch64StackTagging.cpp:349
llvm::IRBuilderBase::CreateZExtOrTrunc
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
Definition: IRBuilder.h:1902
MachineOperand.h
llvm::isNoModRef
bool isNoModRef(const ModRefInfo MRI)
Definition: AliasAnalysis.h:159
llvm::Module::getDataLayout
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.cpp:398
llvm::StackSafetyGlobalInfo
Definition: StackSafetyAnalysis.h:58
llvm::IntegerType::get
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:311
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:308
llvm::AnalysisUsage::addRequired
AnalysisUsage & addRequired()
Definition: PassAnalysisSupport.h:75
llvm::AllocaInst
an instruction to allocate memory on the stack
Definition: Instructions.h:59
llvm::cl::desc
Definition: CommandLine.h:412
raw_ostream.h
MachineFunction.h
InitializePasses.h
llvm::Value
LLVM Value Representation.
Definition: Value.h:74
TargetRegisterInfo.h
Debug.h
llvm::IRBuilderBase::CreateCall
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2268
llvm::MemoryLocation
Representation for a specific memory location.
Definition: MemoryLocation.h:210
llvm::sampleprof::Base
@ Base
Definition: Discriminator.h:58
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:38
llvm::SmallVectorImpl::insert
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:791