LLVM  13.0.0git
AArch64StackTagging.cpp
Go to the documentation of this file.
1 //===- AArch64StackTagging.cpp - Stack tagging in IR --===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //===----------------------------------------------------------------------===//
10 
11 #include "AArch64.h"
12 #include "AArch64InstrInfo.h"
13 #include "AArch64Subtarget.h"
14 #include "AArch64TargetMachine.h"
15 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/MapVector.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/CFG.h"
24 #include "llvm/Analysis/LoopInfo.h"
41 #include "llvm/IR/DebugLoc.h"
42 #include "llvm/IR/Dominators.h"
43 #include "llvm/IR/Function.h"
45 #include "llvm/IR/Instruction.h"
46 #include "llvm/IR/Instructions.h"
47 #include "llvm/IR/IntrinsicInst.h"
48 #include "llvm/IR/IntrinsicsAArch64.h"
49 #include "llvm/IR/IRBuilder.h"
50 #include "llvm/IR/Metadata.h"
51 #include "llvm/InitializePasses.h"
52 #include "llvm/Pass.h"
53 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/Debug.h"
57 #include <cassert>
58 #include <iterator>
59 #include <utility>
60 
61 using namespace llvm;
62 
63 #define DEBUG_TYPE "aarch64-stack-tagging"
64 
66  "stack-tagging-merge-init", cl::Hidden, cl::init(true), cl::ZeroOrMore,
67  cl::desc("merge stack variable initializers with tagging when possible"));
68 
69 static cl::opt<bool>
70  ClUseStackSafety("stack-tagging-use-stack-safety", cl::Hidden,
71  cl::init(true), cl::ZeroOrMore,
72  cl::desc("Use Stack Safety analysis results"));
73 
74 static cl::opt<unsigned> ClScanLimit("stack-tagging-merge-init-scan-limit",
75  cl::init(40), cl::Hidden);
76 
77 static cl::opt<unsigned>
78  ClMergeInitSizeLimit("stack-tagging-merge-init-size-limit", cl::init(272),
79  cl::Hidden);
80 
81 static const Align kTagGranuleSize = Align(16);
82 
83 namespace {
84 
85 class InitializerBuilder {
86  uint64_t Size;
87  const DataLayout *DL;
88  Value *BasePtr;
89  Function *SetTagFn;
90  Function *SetTagZeroFn;
91  Function *StgpFn;
92 
93  // List of initializers sorted by start offset.
94  struct Range {
95  uint64_t Start, End;
96  Instruction *Inst;
97  };
98  SmallVector<Range, 4> Ranges;
99  // 8-aligned offset => 8-byte initializer
100  // Missing keys are zero initialized.
101  std::map<uint64_t, Value *> Out;
102 
103 public:
104  InitializerBuilder(uint64_t Size, const DataLayout *DL, Value *BasePtr,
105  Function *SetTagFn, Function *SetTagZeroFn,
106  Function *StgpFn)
107  : Size(Size), DL(DL), BasePtr(BasePtr), SetTagFn(SetTagFn),
108  SetTagZeroFn(SetTagZeroFn), StgpFn(StgpFn) {}
109 
110  bool addRange(uint64_t Start, uint64_t End, Instruction *Inst) {
111  auto I =
112  llvm::lower_bound(Ranges, Start, [](const Range &LHS, uint64_t RHS) {
113  return LHS.End <= RHS;
114  });
115  if (I != Ranges.end() && End > I->Start) {
116  // Overlap - bail.
117  return false;
118  }
119  Ranges.insert(I, {Start, End, Inst});
120  return true;
121  }
122 
123  bool addStore(uint64_t Offset, StoreInst *SI, const DataLayout *DL) {
124  int64_t StoreSize = DL->getTypeStoreSize(SI->getOperand(0)->getType());
125  if (!addRange(Offset, Offset + StoreSize, SI))
126  return false;
127  IRBuilder<> IRB(SI);
128  applyStore(IRB, Offset, Offset + StoreSize, SI->getOperand(0));
129  return true;
130  }
131 
132  bool addMemSet(uint64_t Offset, MemSetInst *MSI) {
133  uint64_t StoreSize = cast<ConstantInt>(MSI->getLength())->getZExtValue();
134  if (!addRange(Offset, Offset + StoreSize, MSI))
135  return false;
136  IRBuilder<> IRB(MSI);
137  applyMemSet(IRB, Offset, Offset + StoreSize,
138  cast<ConstantInt>(MSI->getValue()));
139  return true;
140  }
141 
142  void applyMemSet(IRBuilder<> &IRB, int64_t Start, int64_t End,
143  ConstantInt *V) {
144  // Out[] does not distinguish between zero and undef, and we already know
145  // that this memset does not overlap with any other initializer. Nothing to
146  // do for memset(0).
147  if (V->isZero())
148  return;
149  for (int64_t Offset = Start - Start % 8; Offset < End; Offset += 8) {
150  uint64_t Cst = 0x0101010101010101UL;
151  int LowBits = Offset < Start ? (Start - Offset) * 8 : 0;
152  if (LowBits)
153  Cst = (Cst >> LowBits) << LowBits;
154  int HighBits = End - Offset < 8 ? (8 - (End - Offset)) * 8 : 0;
155  if (HighBits)
156  Cst = (Cst << HighBits) >> HighBits;
157  ConstantInt *C =
158  ConstantInt::get(IRB.getInt64Ty(), Cst * V->getZExtValue());
159 
160  Value *&CurrentV = Out[Offset];
161  if (!CurrentV) {
162  CurrentV = C;
163  } else {
164  CurrentV = IRB.CreateOr(CurrentV, C);
165  }
166  }
167  }
168 
169  // Take a 64-bit slice of the value starting at the given offset (in bytes).
170  // Offset can be negative. Pad with zeroes on both sides when necessary.
171  Value *sliceValue(IRBuilder<> &IRB, Value *V, int64_t Offset) {
172  if (Offset > 0) {
173  V = IRB.CreateLShr(V, Offset * 8);
174  V = IRB.CreateZExtOrTrunc(V, IRB.getInt64Ty());
175  } else if (Offset < 0) {
176  V = IRB.CreateZExtOrTrunc(V, IRB.getInt64Ty());
177  V = IRB.CreateShl(V, -Offset * 8);
178  } else {
179  V = IRB.CreateZExtOrTrunc(V, IRB.getInt64Ty());
180  }
181  return V;
182  }
183 
184  void applyStore(IRBuilder<> &IRB, int64_t Start, int64_t End,
185  Value *StoredValue) {
186  StoredValue = flatten(IRB, StoredValue);
187  for (int64_t Offset = Start - Start % 8; Offset < End; Offset += 8) {
188  Value *V = sliceValue(IRB, StoredValue, Offset - Start);
189  Value *&CurrentV = Out[Offset];
190  if (!CurrentV) {
191  CurrentV = V;
192  } else {
193  CurrentV = IRB.CreateOr(CurrentV, V);
194  }
195  }
196  }
197 
198  void generate(IRBuilder<> &IRB) {
199  LLVM_DEBUG(dbgs() << "Combined initializer\n");
200  // No initializers => the entire allocation is undef.
201  if (Ranges.empty()) {
202  emitUndef(IRB, 0, Size);
203  return;
204  }
205 
206  // Look through 8-byte initializer list 16 bytes at a time;
207  // If one of the two 8-byte halfs is non-zero non-undef, emit STGP.
208  // Otherwise, emit zeroes up to next available item.
209  uint64_t LastOffset = 0;
210  for (uint64_t Offset = 0; Offset < Size; Offset += 16) {
211  auto I1 = Out.find(Offset);
212  auto I2 = Out.find(Offset + 8);
213  if (I1 == Out.end() && I2 == Out.end())
214  continue;
215 
216  if (Offset > LastOffset)
217  emitZeroes(IRB, LastOffset, Offset - LastOffset);
218 
219  Value *Store1 = I1 == Out.end() ? Constant::getNullValue(IRB.getInt64Ty())
220  : I1->second;
221  Value *Store2 = I2 == Out.end() ? Constant::getNullValue(IRB.getInt64Ty())
222  : I2->second;
223  emitPair(IRB, Offset, Store1, Store2);
224  LastOffset = Offset + 16;
225  }
226 
227  // memset(0) does not update Out[], therefore the tail can be either undef
228  // or zero.
229  if (LastOffset < Size)
230  emitZeroes(IRB, LastOffset, Size - LastOffset);
231 
232  for (const auto &R : Ranges) {
233  R.Inst->eraseFromParent();
234  }
235  }
236 
237  void emitZeroes(IRBuilder<> &IRB, uint64_t Offset, uint64_t Size) {
238  LLVM_DEBUG(dbgs() << " [" << Offset << ", " << Offset + Size
239  << ") zero\n");
240  Value *Ptr = BasePtr;
241  if (Offset)
242  Ptr = IRB.CreateConstGEP1_32(Ptr, Offset);
243  IRB.CreateCall(SetTagZeroFn,
244  {Ptr, ConstantInt::get(IRB.getInt64Ty(), Size)});
245  }
246 
247  void emitUndef(IRBuilder<> &IRB, uint64_t Offset, uint64_t Size) {
248  LLVM_DEBUG(dbgs() << " [" << Offset << ", " << Offset + Size
249  << ") undef\n");
250  Value *Ptr = BasePtr;
251  if (Offset)
252  Ptr = IRB.CreateConstGEP1_32(Ptr, Offset);
253  IRB.CreateCall(SetTagFn, {Ptr, ConstantInt::get(IRB.getInt64Ty(), Size)});
254  }
255 
256  void emitPair(IRBuilder<> &IRB, uint64_t Offset, Value *A, Value *B) {
257  LLVM_DEBUG(dbgs() << " [" << Offset << ", " << Offset + 16 << "):\n");
258  LLVM_DEBUG(dbgs() << " " << *A << "\n " << *B << "\n");
259  Value *Ptr = BasePtr;
260  if (Offset)
261  Ptr = IRB.CreateConstGEP1_32(Ptr, Offset);
262  IRB.CreateCall(StgpFn, {Ptr, A, B});
263  }
264 
265  Value *flatten(IRBuilder<> &IRB, Value *V) {
266  if (V->getType()->isIntegerTy())
267  return V;
268  // vector of pointers -> vector of ints
269  if (VectorType *VecTy = dyn_cast<VectorType>(V->getType())) {
270  LLVMContext &Ctx = IRB.getContext();
271  Type *EltTy = VecTy->getElementType();
272  if (EltTy->isPointerTy()) {
273  uint32_t EltSize = DL->getTypeSizeInBits(EltTy);
274  auto *NewTy = FixedVectorType::get(
275  IntegerType::get(Ctx, EltSize),
276  cast<FixedVectorType>(VecTy)->getNumElements());
277  V = IRB.CreatePointerCast(V, NewTy);
278  }
279  }
280  return IRB.CreateBitOrPointerCast(
281  V, IRB.getIntNTy(DL->getTypeStoreSize(V->getType()) * 8));
282  }
283 };
284 
285 class AArch64StackTagging : public FunctionPass {
286  struct AllocaInfo {
287  AllocaInst *AI;
288  TrackingVH<Instruction> OldAI; // Track through RAUW to replace debug uses.
289  SmallVector<IntrinsicInst *, 2> LifetimeStart;
291  SmallVector<DbgVariableIntrinsic *, 2> DbgVariableIntrinsics;
292  int Tag; // -1 for non-tagged allocations
293  };
294 
295  const bool MergeInit;
296  const bool UseStackSafety;
297 
298 public:
299  static char ID; // Pass ID, replacement for typeid
300 
301  AArch64StackTagging(bool IsOptNone = false)
302  : FunctionPass(ID),
303  MergeInit(ClMergeInit.getNumOccurrences() ? ClMergeInit : !IsOptNone),
304  UseStackSafety(ClUseStackSafety.getNumOccurrences() ? ClUseStackSafety
305  : !IsOptNone) {
307  }
308 
309  bool isInterestingAlloca(const AllocaInst &AI);
310  void alignAndPadAlloca(AllocaInfo &Info);
311 
312  void tagAlloca(AllocaInst *AI, Instruction *InsertBefore, Value *Ptr,
313  uint64_t Size);
314  void untagAlloca(AllocaInst *AI, Instruction *InsertBefore, uint64_t Size);
315 
316  Instruction *collectInitializers(Instruction *StartInst, Value *StartPtr,
317  uint64_t Size, InitializerBuilder &IB);
318 
319  Instruction *
320  insertBaseTaggedPointer(const MapVector<AllocaInst *, AllocaInfo> &Allocas,
321  const DominatorTree *DT);
322  bool runOnFunction(Function &F) override;
323 
324  StringRef getPassName() const override { return "AArch64 Stack Tagging"; }
325 
326 private:
327  Function *F = nullptr;
328  Function *SetTagFunc = nullptr;
329  const DataLayout *DL = nullptr;
330  AAResults *AA = nullptr;
331  const StackSafetyGlobalInfo *SSI = nullptr;
332 
333  void getAnalysisUsage(AnalysisUsage &AU) const override {
334  AU.setPreservesCFG();
335  if (UseStackSafety)
337  if (MergeInit)
339  }
340 };
341 
342 } // end anonymous namespace
343 
344 char AArch64StackTagging::ID = 0;
345 
346 INITIALIZE_PASS_BEGIN(AArch64StackTagging, DEBUG_TYPE, "AArch64 Stack Tagging",
347  false, false)
350 INITIALIZE_PASS_END(AArch64StackTagging, DEBUG_TYPE, "AArch64 Stack Tagging",
352 
354  return new AArch64StackTagging(IsOptNone);
355 }
356 
357 Instruction *AArch64StackTagging::collectInitializers(Instruction *StartInst,
358  Value *StartPtr,
359  uint64_t Size,
360  InitializerBuilder &IB) {
361  MemoryLocation AllocaLoc{StartPtr, Size};
362  Instruction *LastInst = StartInst;
363  BasicBlock::iterator BI(StartInst);
364 
365  unsigned Count = 0;
366  for (; Count < ClScanLimit && !BI->isTerminator(); ++BI) {
367  if (!isa<DbgInfoIntrinsic>(*BI))
368  ++Count;
369 
370  if (isNoModRef(AA->getModRefInfo(&*BI, AllocaLoc)))
371  continue;
372 
373  if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
374  // If the instruction is readnone, ignore it, otherwise bail out. We
375  // don't even allow readonly here because we don't want something like:
376  // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
377  if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
378  break;
379  continue;
380  }
381 
382  if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
383  if (!NextStore->isSimple())
384  break;
385 
386  // Check to see if this store is to a constant offset from the start ptr.
388  isPointerOffset(StartPtr, NextStore->getPointerOperand(), *DL);
389  if (!Offset)
390  break;
391 
392  if (!IB.addStore(*Offset, NextStore, DL))
393  break;
394  LastInst = NextStore;
395  } else {
396  MemSetInst *MSI = cast<MemSetInst>(BI);
397 
398  if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength()))
399  break;
400 
401  if (!isa<ConstantInt>(MSI->getValue()))
402  break;
403 
404  // Check to see if this store is to a constant offset from the start ptr.
405  Optional<int64_t> Offset = isPointerOffset(StartPtr, MSI->getDest(), *DL);
406  if (!Offset)
407  break;
408 
409  if (!IB.addMemSet(*Offset, MSI))
410  break;
411  LastInst = MSI;
412  }
413  }
414  return LastInst;
415 }
416 
417 bool AArch64StackTagging::isInterestingAlloca(const AllocaInst &AI) {
418  // FIXME: support dynamic allocas
419  bool IsInteresting =
420  AI.getAllocatedType()->isSized() && AI.isStaticAlloca() &&
421  // alloca() may be called with 0 size, ignore it.
422  AI.getAllocationSizeInBits(*DL).getValue() > 0 &&
423  // inalloca allocas are not treated as static, and we don't want
424  // dynamic alloca instrumentation for them as well.
425  !AI.isUsedWithInAlloca() &&
426  // swifterror allocas are register promoted by ISel
427  !AI.isSwiftError() &&
428  // safe allocas are not interesting
429  !(SSI && SSI->isSafe(AI));
430  return IsInteresting;
431 }
432 
433 void AArch64StackTagging::tagAlloca(AllocaInst *AI, Instruction *InsertBefore,
434  Value *Ptr, uint64_t Size) {
435  auto SetTagZeroFunc =
436  Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_settag_zero);
437  auto StgpFunc =
438  Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_stgp);
439 
440  InitializerBuilder IB(Size, DL, Ptr, SetTagFunc, SetTagZeroFunc, StgpFunc);
441  bool LittleEndian =
443  // Current implementation of initializer merging assumes little endianness.
444  if (MergeInit && !F->hasOptNone() && LittleEndian &&
446  LLVM_DEBUG(dbgs() << "collecting initializers for " << *AI
447  << ", size = " << Size << "\n");
448  InsertBefore = collectInitializers(InsertBefore, Ptr, Size, IB);
449  }
450 
451  IRBuilder<> IRB(InsertBefore);
452  IB.generate(IRB);
453 }
454 
455 void AArch64StackTagging::untagAlloca(AllocaInst *AI, Instruction *InsertBefore,
456  uint64_t Size) {
457  IRBuilder<> IRB(InsertBefore);
458  IRB.CreateCall(SetTagFunc, {IRB.CreatePointerCast(AI, IRB.getInt8PtrTy()),
459  ConstantInt::get(IRB.getInt64Ty(), Size)});
460 }
461 
462 Instruction *AArch64StackTagging::insertBaseTaggedPointer(
464  const DominatorTree *DT) {
465  BasicBlock *PrologueBB = nullptr;
466  // Try sinking IRG as deep as possible to avoid hurting shrink wrap.
467  for (auto &I : Allocas) {
468  const AllocaInfo &Info = I.second;
469  AllocaInst *AI = Info.AI;
470  if (Info.Tag < 0)
471  continue;
472  if (!PrologueBB) {
473  PrologueBB = AI->getParent();
474  continue;
475  }
476  PrologueBB = DT->findNearestCommonDominator(PrologueBB, AI->getParent());
477  }
478  assert(PrologueBB);
479 
480  IRBuilder<> IRB(&PrologueBB->front());
481  Function *IRG_SP =
482  Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_irg_sp);
483  Instruction *Base =
484  IRB.CreateCall(IRG_SP, {Constant::getNullValue(IRB.getInt64Ty())});
485  Base->setName("basetag");
486  return Base;
487 }
488 
489 void AArch64StackTagging::alignAndPadAlloca(AllocaInfo &Info) {
490  const Align NewAlignment =
491  max(MaybeAlign(Info.AI->getAlignment()), kTagGranuleSize);
492  Info.AI->setAlignment(NewAlignment);
493 
494  uint64_t Size = Info.AI->getAllocationSizeInBits(*DL).getValue() / 8;
495  uint64_t AlignedSize = alignTo(Size, kTagGranuleSize);
496  if (Size == AlignedSize)
497  return;
498 
499  // Add padding to the alloca.
500  Type *AllocatedType =
501  Info.AI->isArrayAllocation()
502  ? ArrayType::get(
503  Info.AI->getAllocatedType(),
504  cast<ConstantInt>(Info.AI->getArraySize())->getZExtValue())
505  : Info.AI->getAllocatedType();
506  Type *PaddingType =
507  ArrayType::get(Type::getInt8Ty(F->getContext()), AlignedSize - Size);
508  Type *TypeWithPadding = StructType::get(AllocatedType, PaddingType);
509  auto *NewAI = new AllocaInst(
510  TypeWithPadding, Info.AI->getType()->getAddressSpace(), nullptr, "", Info.AI);
511  NewAI->takeName(Info.AI);
512  NewAI->setAlignment(Info.AI->getAlign());
513  NewAI->setUsedWithInAlloca(Info.AI->isUsedWithInAlloca());
514  NewAI->setSwiftError(Info.AI->isSwiftError());
515  NewAI->copyMetadata(*Info.AI);
516 
517  auto *NewPtr = new BitCastInst(NewAI, Info.AI->getType(), "", Info.AI);
518  Info.AI->replaceAllUsesWith(NewPtr);
519  Info.AI->eraseFromParent();
520  Info.AI = NewAI;
521 }
522 
523 // Helper function to check for post-dominance.
524 static bool postDominates(const PostDominatorTree *PDT, const IntrinsicInst *A,
525  const IntrinsicInst *B) {
526  const BasicBlock *ABB = A->getParent();
527  const BasicBlock *BBB = B->getParent();
528 
529  if (ABB != BBB)
530  return PDT->dominates(ABB, BBB);
531 
532  for (const Instruction &I : *ABB) {
533  if (&I == B)
534  return true;
535  if (&I == A)
536  return false;
537  }
538  llvm_unreachable("Corrupt instruction list");
539 }
540 
541 // FIXME: check for MTE extension
543  if (!Fn.hasFnAttribute(Attribute::SanitizeMemTag))
544  return false;
545 
546  if (UseStackSafety)
547  SSI = &getAnalysis<StackSafetyGlobalInfoWrapperPass>().getResult();
548  F = &Fn;
549  DL = &Fn.getParent()->getDataLayout();
550  if (MergeInit)
551  AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
552 
553  MapVector<AllocaInst *, AllocaInfo> Allocas; // need stable iteration order
555  SmallVector<Instruction *, 4> UnrecognizedLifetimes;
556 
557  for (auto &BB : *F) {
558  for (BasicBlock::iterator IT = BB.begin(); IT != BB.end(); ++IT) {
559  Instruction *I = &*IT;
560  if (auto *AI = dyn_cast<AllocaInst>(I)) {
561  Allocas[AI].AI = AI;
562  Allocas[AI].OldAI = AI;
563  continue;
564  }
565 
566  if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(I)) {
567  for (Value *V : DVI->location_ops())
568  if (auto *AI = dyn_cast_or_null<AllocaInst>(V))
569  Allocas[AI].DbgVariableIntrinsics.push_back(DVI);
570  continue;
571  }
572 
573  auto *II = dyn_cast<IntrinsicInst>(I);
574  if (II && (II->getIntrinsicID() == Intrinsic::lifetime_start ||
575  II->getIntrinsicID() == Intrinsic::lifetime_end)) {
576  AllocaInst *AI = findAllocaForValue(II->getArgOperand(1));
577  if (!AI) {
578  UnrecognizedLifetimes.push_back(I);
579  continue;
580  }
581  if (II->getIntrinsicID() == Intrinsic::lifetime_start)
582  Allocas[AI].LifetimeStart.push_back(II);
583  else
584  Allocas[AI].LifetimeEnd.push_back(II);
585  }
586 
587  if (isa<ReturnInst>(I) || isa<ResumeInst>(I) || isa<CleanupReturnInst>(I))
588  RetVec.push_back(I);
589  }
590  }
591 
592  if (Allocas.empty())
593  return false;
594 
595  int NextTag = 0;
596  int NumInterestingAllocas = 0;
597  for (auto &I : Allocas) {
598  AllocaInfo &Info = I.second;
599  assert(Info.AI);
600 
601  if (!isInterestingAlloca(*Info.AI)) {
602  Info.Tag = -1;
603  continue;
604  }
605 
606  alignAndPadAlloca(Info);
607  NumInterestingAllocas++;
608  Info.Tag = NextTag;
609  NextTag = (NextTag + 1) % 16;
610  }
611 
612  if (NumInterestingAllocas == 0)
613  return true;
614 
615  std::unique_ptr<DominatorTree> DeleteDT;
616  DominatorTree *DT = nullptr;
617  if (auto *P = getAnalysisIfAvailable<DominatorTreeWrapperPass>())
618  DT = &P->getDomTree();
619 
620  if (DT == nullptr && (NumInterestingAllocas > 1 ||
621  !F->hasFnAttribute(Attribute::OptimizeNone))) {
622  DeleteDT = std::make_unique<DominatorTree>(*F);
623  DT = DeleteDT.get();
624  }
625 
626  std::unique_ptr<PostDominatorTree> DeletePDT;
627  PostDominatorTree *PDT = nullptr;
628  if (auto *P = getAnalysisIfAvailable<PostDominatorTreeWrapperPass>())
629  PDT = &P->getPostDomTree();
630 
631  if (PDT == nullptr && !F->hasFnAttribute(Attribute::OptimizeNone)) {
632  DeletePDT = std::make_unique<PostDominatorTree>(*F);
633  PDT = DeletePDT.get();
634  }
635 
636  SetTagFunc =
637  Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_settag);
638 
639  Instruction *Base = insertBaseTaggedPointer(Allocas, DT);
640 
641  for (auto &I : Allocas) {
642  const AllocaInfo &Info = I.second;
643  AllocaInst *AI = Info.AI;
644  if (Info.Tag < 0)
645  continue;
646 
647  // Replace alloca with tagp(alloca).
648  IRBuilder<> IRB(Info.AI->getNextNode());
650  F->getParent(), Intrinsic::aarch64_tagp, {Info.AI->getType()});
651  Instruction *TagPCall =
652  IRB.CreateCall(TagP, {Constant::getNullValue(Info.AI->getType()), Base,
653  ConstantInt::get(IRB.getInt64Ty(), Info.Tag)});
654  if (Info.AI->hasName())
655  TagPCall->setName(Info.AI->getName() + ".tag");
656  Info.AI->replaceAllUsesWith(TagPCall);
657  TagPCall->setOperand(0, Info.AI);
658 
659  if (UnrecognizedLifetimes.empty() && Info.LifetimeStart.size() == 1 &&
660  Info.LifetimeEnd.size() == 1) {
661  IntrinsicInst *Start = Info.LifetimeStart[0];
662  IntrinsicInst *End = Info.LifetimeEnd[0];
663  uint64_t Size =
664  cast<ConstantInt>(Start->getArgOperand(0))->getZExtValue();
666  tagAlloca(AI, Start->getNextNode(), Start->getArgOperand(1), Size);
667  // We need to ensure that if we tag some object, we certainly untag it
668  // before the function exits.
669  if (PDT != nullptr && postDominates(PDT, End, Start)) {
670  untagAlloca(AI, End, Size);
671  } else {
672  SmallVector<Instruction *, 8> ReachableRetVec;
673  unsigned NumCoveredExits = 0;
674  for (auto &RI : RetVec) {
675  if (!isPotentiallyReachable(Start, RI, nullptr, DT))
676  continue;
677  ReachableRetVec.push_back(RI);
678  if (DT != nullptr && DT->dominates(End, RI))
679  ++NumCoveredExits;
680  }
681  // If there's a mix of covered and non-covered exits, just put the untag
682  // on exits, so we avoid the redundancy of untagging twice.
683  if (NumCoveredExits == ReachableRetVec.size()) {
684  untagAlloca(AI, End, Size);
685  } else {
686  for (auto &RI : ReachableRetVec)
687  untagAlloca(AI, RI, Size);
688  // We may have inserted untag outside of the lifetime interval.
689  // Remove the lifetime end call for this alloca.
690  End->eraseFromParent();
691  }
692  }
693  } else {
694  uint64_t Size = Info.AI->getAllocationSizeInBits(*DL).getValue() / 8;
695  Value *Ptr = IRB.CreatePointerCast(TagPCall, IRB.getInt8PtrTy());
696  tagAlloca(AI, &*IRB.GetInsertPoint(), Ptr, Size);
697  for (auto &RI : RetVec) {
698  untagAlloca(AI, RI, Size);
699  }
700  // We may have inserted tag/untag outside of any lifetime interval.
701  // Remove all lifetime intrinsics for this alloca.
702  for (auto &II : Info.LifetimeStart)
703  II->eraseFromParent();
704  for (auto &II : Info.LifetimeEnd)
705  II->eraseFromParent();
706  }
707 
708  // Fixup debug intrinsics to point to the new alloca.
709  for (auto DVI : Info.DbgVariableIntrinsics)
710  DVI->replaceVariableLocationOp(Info.OldAI, Info.AI);
711  }
712 
713  // If we have instrumented at least one alloca, all unrecognized lifetime
714  // instrinsics have to go.
715  for (auto &I : UnrecognizedLifetimes)
716  I->eraseFromParent();
717 
718  return true;
719 }
llvm::Check::Size
@ Size
Definition: FileCheck.h:73
llvm::alignTo
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:148
StackSafetyAnalysis.h
llvm::Type::isSized
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:272
MachineInstr.h
llvm
Definition: AllocatorList.h:23
llvm::DominatorTreeBase::findNearestCommonDominator
NodeT * findNearestCommonDominator(NodeT *A, NodeT *B) const
Find nearest common dominator basic block for basic block A and B.
Definition: GenericDomTree.h:468
llvm::initializeAArch64StackTaggingPass
void initializeAArch64StackTaggingPass(PassRegistry &)
ClUseStackSafety
static cl::opt< bool > ClUseStackSafety("stack-tagging-use-stack-safety", cl::Hidden, cl::init(true), cl::ZeroOrMore, cl::desc("Use Stack Safety analysis results"))
llvm::Instruction::getModule
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:66
AArch64.h
llvm::IRBuilderBase::getInt64Ty
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Definition: IRBuilder.h:523
Optional.h
llvm::StructType::get
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition: Type.cpp:366
llvm::StackSafetyGlobalInfo::isSafe
bool isSafe(const AllocaInst &AI) const
Definition: StackSafetyAnalysis.cpp:882
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:112
llvm::Intrinsic::getDeclaration
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1329
Metadata.h
llvm::BasicBlock::iterator
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:90
IntrinsicInst.h
llvm::Type::isPointerTy
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:229
DEBUG_TYPE
#define DEBUG_TYPE
Definition: AArch64StackTagging.cpp:63
flatten
loop flatten
Definition: LoopFlatten.cpp:718
llvm::Function
Definition: Function.h:61
llvm::lower_bound
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1592
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
Pass.h
llvm::BitCastInst
This class represents a no-op cast from one type to another.
Definition: Instructions.h:5166
GetElementPtrTypeIterator.h
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1167
Statistic.h
llvm::IRBuilder<>
MapVector.h
ValueTracking.h
Local.h
llvm::IRBuilderBase::CreateOr
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1374
llvm::DominatorTree
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:151
llvm::Triple
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:45
MachineBasicBlock.h
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:143
ScalarEvolution.h
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:46
DenseMap.h
llvm::Function::hasFnAttribute
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:355
llvm::Optional< int64_t >
llvm::MapVector
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:37
Offset
uint64_t Offset
Definition: ELFObjHandler.cpp:81
ClScanLimit
static cl::opt< unsigned > ClScanLimit("stack-tagging-merge-init-scan-limit", cl::init(40), cl::Hidden)
llvm::dwarf::Tag
Tag
Definition: Dwarf.h:104
llvm::Type::getInt8Ty
static IntegerType * getInt8Ty(LLVMContext &C)
Definition: Type.cpp:195
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:122
DepthFirstIterator.h
F
#define F(x, y, z)
Definition: MD5.cpp:56
llvm::RISCVFenceField::R
@ R
Definition: RISCVBaseInfo.h:180
llvm::MemSetBase::getValue
Value * getValue() const
Definition: IntrinsicInst.h:726
MachineRegisterInfo.h
llvm::BasicBlock
LLVM Basic Block Representation.
Definition: BasicBlock.h:58
AliasAnalysis.h
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
llvm::DominatorTree::dominates
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:115
llvm::isPointerOffset
Optional< int64_t > isPointerOffset(const Value *Ptr1, const Value *Ptr2, const DataLayout &DL)
If Ptr1 is provably equal to Ptr2 plus a constant offset, return that offset.
Definition: ValueTracking.cpp:7023
Instruction.h
llvm::Triple::isLittleEndian
bool isLittleEndian() const
Tests whether the target triple is little endian.
Definition: Triple.cpp:1574
llvm::ConstantInt
This is the shared class of boolean and integer constants.
Definition: Constants.h:77
llvm::AllocaInst::isStaticAlloca
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Definition: Instructions.cpp:1374
MachineLoopInfo.h
AArch64TargetMachine.h
AArch64InstrInfo.h
llvm::PassRegistry::getPassRegistry
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Definition: PassRegistry.cpp:31
llvm::AAResults
Definition: AliasAnalysis.h:456
PostDominators.h
llvm::AllocaInst::getAllocatedType
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:112
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::ARM_PROC::A
@ A
Definition: ARMBaseInfo.h:34
llvm::IRBuilderBase::getIntNTy
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
Definition: IRBuilder.h:531
llvm::AnalysisUsage
Represent the analysis usage information of a pass.
Definition: PassAnalysisSupport.h:47
false
Definition: StackSlotColoring.cpp:142
llvm::MaybeAlign
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:109
llvm::isPotentiallyReachable
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
Definition: CFG.cpp:236
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
kTagGranuleSize
static const Align kTagGranuleSize
Definition: AArch64StackTagging.cpp:81
llvm::Instruction
Definition: Instruction.h:45
llvm::IRBuilderBase::getContext
LLVMContext & getContext() const
Definition: IRBuilder.h:180
llvm::Value::setName
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:361
llvm::codeview::EncodedFramePtrReg::BasePtr
@ BasePtr
llvm::ConstantInt::get
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:898
DebugLoc.h
Info
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
Align
uint64_t Align
Definition: ELFObjHandler.cpp:83
llvm::FixedVectorType::get
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition: Type.cpp:644
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
llvm::AllocaInst::getAllocationSizeInBits
Optional< TypeSize > getAllocationSizeInBits(const DataLayout &DL) const
Get allocation size in bits.
Definition: Instructions.cpp:53
llvm::MemSetInst
This class wraps the llvm.memset intrinsic.
Definition: IntrinsicInst.h:865
llvm::IRBuilderBase::CreatePointerCast
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2120
INITIALIZE_PASS_BEGIN
INITIALIZE_PASS_BEGIN(AArch64StackTagging, DEBUG_TYPE, "AArch64 Stack Tagging", false, false) INITIALIZE_PASS_END(AArch64StackTagging
INITIALIZE_PASS_END
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:58
LoopInfo.h
llvm::AllocaInst::isSwiftError
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:147
llvm::cl::ZeroOrMore
@ ZeroOrMore
Definition: CommandLine.h:120
llvm::Type::isIntegerTy
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:202
llvm::VectorType
Base class of all SIMD vector types.
Definition: DerivedTypes.h:391
llvm::cl::opt< bool >
ClMergeInitSizeLimit
static cl::opt< unsigned > ClMergeInitSizeLimit("stack-tagging-merge-init-size-limit", cl::init(272), cl::Hidden)
llvm::StoreInst
An instruction for storing to memory.
Definition: Instructions.h:303
llvm::isNoModRef
LLVM_NODISCARD bool isNoModRef(const ModRefInfo MRI)
Definition: AliasAnalysis.h:185
llvm::IRBuilderBase::GetInsertPoint
BasicBlock::iterator GetInsertPoint() const
Definition: IRBuilder.h:179
llvm::GlobalValue::getParent
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:572
INITIALIZE_PASS_DEPENDENCY
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
llvm::LLVMContext
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::IRBuilderBase::getInt8PtrTy
PointerType * getInt8PtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer to an 8-bit integer value.
Definition: IRBuilder.h:561
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
TargetPassConfig.h
MachineFunctionPass.h
llvm::TrackingVH
Value handle that tracks a Value across RAUW.
Definition: ValueHandle.h:331
IRBuilder.h
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI
StandardInstrumentations SI(Debug, VerifyEach)
llvm::ArrayType::get
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Definition: Type.cpp:598
llvm::User::setOperand
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
addRange
static void addRange(SmallVectorImpl< ConstantInt * > &EndPoints, ConstantInt *Low, ConstantInt *High)
Definition: Metadata.cpp:1005
CFG.h
llvm::MemIntrinsicBase::getDest
Value * getDest() const
This is just like getRawDest, but it strips off any cast instructions (including addrspacecast) that ...
Definition: IntrinsicInst.h:612
IT
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::ZeroOrMore, cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate IT block based on arch"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow deprecated IT based on ARMv8"), clEnumValN(NoRestrictedIT, "arm-no-restrict-it", "Allow IT blocks based on ARMv7")))
None.h
llvm::AnalysisUsage::setPreservesCFG
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:253
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:58
llvm::PostDominatorTree
PostDominatorTree Class - Concrete subclass of DominatorTree that is used to compute the post-dominat...
Definition: PostDominators.h:28
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:136
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
llvm::ConstantInt::isZero
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:192
llvm::IRBuilderBase::CreateConstGEP1_32
Value * CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const Twine &Name="")
Definition: IRBuilder.h:1835
uint32_t
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
LiveRegUnits.h
llvm::ConstantInt::getZExtValue
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:140
llvm::BasicBlock::front
const Instruction & front() const
Definition: BasicBlock.h:308
runOnFunction
static bool runOnFunction(Function &F, bool PostInlining)
Definition: EntryExitInstrumenter.cpp:69
llvm::MapVector::empty
bool empty() const
Definition: MapVector.h:79
llvm::IRBuilderBase::CreateBitOrPointerCast
Value * CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2152
llvm::Constant::getNullValue
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:346
llvm::MemIntrinsicBase::getLength
Value * getLength() const
Definition: IntrinsicInst.h:603
Casting.h
Function.h
llvm::IntrinsicInst
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:45
llvm::StackSafetyGlobalInfoWrapperPass
This pass performs the global (interprocedural) stack safety analysis (legacy pass manager).
Definition: StackSafetyAnalysis.h:142
ScalarEvolutionExpressions.h
Instructions.h
AArch64Subtarget.h
llvm::AllocaInst::isUsedWithInAlloca
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:137
llvm::MemIntrinsic::isVolatile
bool isVolatile() const
Definition: IntrinsicInst.h:843
SmallVector.h
MachineInstrBuilder.h
llvm::createAArch64StackTaggingPass
FunctionPass * createAArch64StackTaggingPass(bool IsOptNone)
Definition: AArch64StackTagging.cpp:353
llvm::IRBuilderBase::CreateLShr
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1308
Dominators.h
llvm::Module::getTargetTriple
const std::string & getTargetTriple() const
Get the target triple which is a string describing the target host.
Definition: Module.h:257
ClMergeInit
static cl::opt< bool > ClMergeInit("stack-tagging-merge-init", cl::Hidden, cl::init(true), cl::ZeroOrMore, cl::desc("merge stack variable initializers with tagging when possible"))
llvm::IRBuilderBase::CreateShl
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1287
llvm::AAResultsWrapperPass
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Definition: AliasAnalysis.h:1281
llvm::Instruction::getParent
const BasicBlock * getParent() const
Definition: Instruction.h:94
llvm::max
Align max(MaybeAlign Lhs, Align Rhs)
Definition: Alignment.h:340
generate
We currently generate
Definition: README.txt:597
Tagging
AArch64 Stack Tagging
Definition: AArch64StackTagging.cpp:350
llvm::IRBuilderBase::CreateZExtOrTrunc
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
Definition: IRBuilder.h:1992
MachineOperand.h
llvm::Module::getDataLayout
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.cpp:397
llvm::StackSafetyGlobalInfo
Definition: StackSafetyAnalysis.h:58
llvm::IntegerType::get
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:269
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:298
postDominates
static bool postDominates(const PostDominatorTree *PDT, const IntrinsicInst *A, const IntrinsicInst *B)
Definition: AArch64StackTagging.cpp:524
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
llvm::AnalysisUsage::addRequired
AnalysisUsage & addRequired()
Definition: PassAnalysisSupport.h:75
llvm::AllocaInst
an instruction to allocate memory on the stack
Definition: Instructions.h:61
llvm::cl::desc
Definition: CommandLine.h:414
raw_ostream.h
MachineFunction.h
llvm::PostDominatorTree::dominates
bool dominates(const Instruction *I1, const Instruction *I2) const
Return true if I1 dominates I2.
Definition: PostDominators.cpp:54
InitializePasses.h
llvm::Value
LLVM Value Representation.
Definition: Value.h:75
TargetRegisterInfo.h
Debug.h
llvm::AAResults::getModRefInfo
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc)
getModRefInfo (for call sites) - Return information about whether a particular call site modifies or ...
Definition: AliasAnalysis.cpp:219
llvm::IRBuilderBase::CreateCall
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2352
llvm::MemoryLocation
Representation for a specific memory location.
Definition: MemoryLocation.h:209
llvm::findAllocaForValue
AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
Definition: ValueTracking.cpp:4463
llvm::sampleprof::Base
@ Base
Definition: Discriminator.h:58
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:38
llvm::SmallVectorImpl::insert
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:772