LLVM 18.0.0git
AArch64StackTagging.cpp
Go to the documentation of this file.
1//===- AArch64StackTagging.cpp - Stack tagging in IR --===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//===----------------------------------------------------------------------===//
9
10#include "AArch64.h"
11#include "AArch64InstrInfo.h"
12#include "AArch64Subtarget.h"
14#include "llvm/ADT/MapVector.h"
16#include "llvm/ADT/Statistic.h"
18#include "llvm/Analysis/CFG.h"
36#include "llvm/IR/DebugLoc.h"
37#include "llvm/IR/Dominators.h"
38#include "llvm/IR/Function.h"
40#include "llvm/IR/IRBuilder.h"
42#include "llvm/IR/Instruction.h"
45#include "llvm/IR/IntrinsicsAArch64.h"
46#include "llvm/IR/Metadata.h"
47#include "llvm/IR/ValueHandle.h"
49#include "llvm/Pass.h"
51#include "llvm/Support/Debug.h"
55#include <cassert>
56#include <iterator>
57#include <memory>
58#include <utility>
59
60using namespace llvm;
61
62#define DEBUG_TYPE "aarch64-stack-tagging"
63
65 "stack-tagging-merge-init", cl::Hidden, cl::init(true),
66 cl::desc("merge stack variable initializers with tagging when possible"));
67
68static cl::opt<bool>
69 ClUseStackSafety("stack-tagging-use-stack-safety", cl::Hidden,
70 cl::init(true),
71 cl::desc("Use Stack Safety analysis results"));
72
73static cl::opt<unsigned> ClScanLimit("stack-tagging-merge-init-scan-limit",
74 cl::init(40), cl::Hidden);
75
77 ClMergeInitSizeLimit("stack-tagging-merge-init-size-limit", cl::init(272),
79
81 "stack-tagging-max-lifetimes-for-alloca", cl::Hidden, cl::init(3),
83 cl::desc("How many lifetime ends to handle for a single alloca."),
85
86static const Align kTagGranuleSize = Align(16);
87
88namespace {
89
90class InitializerBuilder {
91 uint64_t Size;
92 const DataLayout *DL;
93 Value *BasePtr;
94 Function *SetTagFn;
95 Function *SetTagZeroFn;
96 Function *StgpFn;
97
98 // List of initializers sorted by start offset.
99 struct Range {
100 uint64_t Start, End;
101 Instruction *Inst;
102 };
104 // 8-aligned offset => 8-byte initializer
105 // Missing keys are zero initialized.
106 std::map<uint64_t, Value *> Out;
107
108public:
109 InitializerBuilder(uint64_t Size, const DataLayout *DL, Value *BasePtr,
110 Function *SetTagFn, Function *SetTagZeroFn,
111 Function *StgpFn)
112 : Size(Size), DL(DL), BasePtr(BasePtr), SetTagFn(SetTagFn),
113 SetTagZeroFn(SetTagZeroFn), StgpFn(StgpFn) {}
114
115 bool addRange(uint64_t Start, uint64_t End, Instruction *Inst) {
116 auto I =
117 llvm::lower_bound(Ranges, Start, [](const Range &LHS, uint64_t RHS) {
118 return LHS.End <= RHS;
119 });
120 if (I != Ranges.end() && End > I->Start) {
121 // Overlap - bail.
122 return false;
123 }
124 Ranges.insert(I, {Start, End, Inst});
125 return true;
126 }
127
128 bool addStore(uint64_t Offset, StoreInst *SI, const DataLayout *DL) {
129 int64_t StoreSize = DL->getTypeStoreSize(SI->getOperand(0)->getType());
130 if (!addRange(Offset, Offset + StoreSize, SI))
131 return false;
132 IRBuilder<> IRB(SI);
133 applyStore(IRB, Offset, Offset + StoreSize, SI->getOperand(0));
134 return true;
135 }
136
137 bool addMemSet(uint64_t Offset, MemSetInst *MSI) {
138 uint64_t StoreSize = cast<ConstantInt>(MSI->getLength())->getZExtValue();
139 if (!addRange(Offset, Offset + StoreSize, MSI))
140 return false;
141 IRBuilder<> IRB(MSI);
142 applyMemSet(IRB, Offset, Offset + StoreSize,
143 cast<ConstantInt>(MSI->getValue()));
144 return true;
145 }
146
147 void applyMemSet(IRBuilder<> &IRB, int64_t Start, int64_t End,
148 ConstantInt *V) {
149 // Out[] does not distinguish between zero and undef, and we already know
150 // that this memset does not overlap with any other initializer. Nothing to
151 // do for memset(0).
152 if (V->isZero())
153 return;
154 for (int64_t Offset = Start - Start % 8; Offset < End; Offset += 8) {
155 uint64_t Cst = 0x0101010101010101UL;
156 int LowBits = Offset < Start ? (Start - Offset) * 8 : 0;
157 if (LowBits)
158 Cst = (Cst >> LowBits) << LowBits;
159 int HighBits = End - Offset < 8 ? (8 - (End - Offset)) * 8 : 0;
160 if (HighBits)
161 Cst = (Cst << HighBits) >> HighBits;
162 ConstantInt *C =
163 ConstantInt::get(IRB.getInt64Ty(), Cst * V->getZExtValue());
164
165 Value *&CurrentV = Out[Offset];
166 if (!CurrentV) {
167 CurrentV = C;
168 } else {
169 CurrentV = IRB.CreateOr(CurrentV, C);
170 }
171 }
172 }
173
174 // Take a 64-bit slice of the value starting at the given offset (in bytes).
175 // Offset can be negative. Pad with zeroes on both sides when necessary.
176 Value *sliceValue(IRBuilder<> &IRB, Value *V, int64_t Offset) {
177 if (Offset > 0) {
178 V = IRB.CreateLShr(V, Offset * 8);
179 V = IRB.CreateZExtOrTrunc(V, IRB.getInt64Ty());
180 } else if (Offset < 0) {
181 V = IRB.CreateZExtOrTrunc(V, IRB.getInt64Ty());
182 V = IRB.CreateShl(V, -Offset * 8);
183 } else {
184 V = IRB.CreateZExtOrTrunc(V, IRB.getInt64Ty());
185 }
186 return V;
187 }
188
189 void applyStore(IRBuilder<> &IRB, int64_t Start, int64_t End,
190 Value *StoredValue) {
191 StoredValue = flatten(IRB, StoredValue);
192 for (int64_t Offset = Start - Start % 8; Offset < End; Offset += 8) {
193 Value *V = sliceValue(IRB, StoredValue, Offset - Start);
194 Value *&CurrentV = Out[Offset];
195 if (!CurrentV) {
196 CurrentV = V;
197 } else {
198 CurrentV = IRB.CreateOr(CurrentV, V);
199 }
200 }
201 }
202
203 void generate(IRBuilder<> &IRB) {
204 LLVM_DEBUG(dbgs() << "Combined initializer\n");
205 // No initializers => the entire allocation is undef.
206 if (Ranges.empty()) {
207 emitUndef(IRB, 0, Size);
208 return;
209 }
210
211 // Look through 8-byte initializer list 16 bytes at a time;
212 // If one of the two 8-byte halfs is non-zero non-undef, emit STGP.
213 // Otherwise, emit zeroes up to next available item.
214 uint64_t LastOffset = 0;
215 for (uint64_t Offset = 0; Offset < Size; Offset += 16) {
216 auto I1 = Out.find(Offset);
217 auto I2 = Out.find(Offset + 8);
218 if (I1 == Out.end() && I2 == Out.end())
219 continue;
220
221 if (Offset > LastOffset)
222 emitZeroes(IRB, LastOffset, Offset - LastOffset);
223
224 Value *Store1 = I1 == Out.end() ? Constant::getNullValue(IRB.getInt64Ty())
225 : I1->second;
226 Value *Store2 = I2 == Out.end() ? Constant::getNullValue(IRB.getInt64Ty())
227 : I2->second;
228 emitPair(IRB, Offset, Store1, Store2);
229 LastOffset = Offset + 16;
230 }
231
232 // memset(0) does not update Out[], therefore the tail can be either undef
233 // or zero.
234 if (LastOffset < Size)
235 emitZeroes(IRB, LastOffset, Size - LastOffset);
236
237 for (const auto &R : Ranges) {
238 R.Inst->eraseFromParent();
239 }
240 }
241
242 void emitZeroes(IRBuilder<> &IRB, uint64_t Offset, uint64_t Size) {
243 LLVM_DEBUG(dbgs() << " [" << Offset << ", " << Offset + Size
244 << ") zero\n");
245 Value *Ptr = BasePtr;
246 if (Offset)
248 IRB.CreateCall(SetTagZeroFn,
249 {Ptr, ConstantInt::get(IRB.getInt64Ty(), Size)});
250 }
251
252 void emitUndef(IRBuilder<> &IRB, uint64_t Offset, uint64_t Size) {
253 LLVM_DEBUG(dbgs() << " [" << Offset << ", " << Offset + Size
254 << ") undef\n");
255 Value *Ptr = BasePtr;
256 if (Offset)
258 IRB.CreateCall(SetTagFn, {Ptr, ConstantInt::get(IRB.getInt64Ty(), Size)});
259 }
260
261 void emitPair(IRBuilder<> &IRB, uint64_t Offset, Value *A, Value *B) {
262 LLVM_DEBUG(dbgs() << " [" << Offset << ", " << Offset + 16 << "):\n");
263 LLVM_DEBUG(dbgs() << " " << *A << "\n " << *B << "\n");
264 Value *Ptr = BasePtr;
265 if (Offset)
267 IRB.CreateCall(StgpFn, {Ptr, A, B});
268 }
269
270 Value *flatten(IRBuilder<> &IRB, Value *V) {
271 if (V->getType()->isIntegerTy())
272 return V;
273 // vector of pointers -> vector of ints
274 if (VectorType *VecTy = dyn_cast<VectorType>(V->getType())) {
275 LLVMContext &Ctx = IRB.getContext();
276 Type *EltTy = VecTy->getElementType();
277 if (EltTy->isPointerTy()) {
278 uint32_t EltSize = DL->getTypeSizeInBits(EltTy);
279 auto *NewTy = FixedVectorType::get(
280 IntegerType::get(Ctx, EltSize),
281 cast<FixedVectorType>(VecTy)->getNumElements());
282 V = IRB.CreatePointerCast(V, NewTy);
283 }
284 }
285 return IRB.CreateBitOrPointerCast(
286 V, IRB.getIntNTy(DL->getTypeStoreSize(V->getType()) * 8));
287 }
288};
289
290class AArch64StackTagging : public FunctionPass {
291 const bool MergeInit;
292 const bool UseStackSafety;
293
294public:
295 static char ID; // Pass ID, replacement for typeid
296
297 AArch64StackTagging(bool IsOptNone = false)
298 : FunctionPass(ID),
299 MergeInit(ClMergeInit.getNumOccurrences() ? ClMergeInit : !IsOptNone),
300 UseStackSafety(ClUseStackSafety.getNumOccurrences() ? ClUseStackSafety
301 : !IsOptNone) {
303 }
304
305 void tagAlloca(AllocaInst *AI, Instruction *InsertBefore, Value *Ptr,
306 uint64_t Size);
307 void untagAlloca(AllocaInst *AI, Instruction *InsertBefore, uint64_t Size);
308
309 Instruction *collectInitializers(Instruction *StartInst, Value *StartPtr,
310 uint64_t Size, InitializerBuilder &IB);
311
312 Instruction *insertBaseTaggedPointer(
314 const DominatorTree *DT);
315 bool runOnFunction(Function &F) override;
316
317 StringRef getPassName() const override { return "AArch64 Stack Tagging"; }
318
319private:
320 Function *F = nullptr;
321 Function *SetTagFunc = nullptr;
322 const DataLayout *DL = nullptr;
323 AAResults *AA = nullptr;
324 const StackSafetyGlobalInfo *SSI = nullptr;
325
326 void getAnalysisUsage(AnalysisUsage &AU) const override {
327 AU.setPreservesCFG();
328 if (UseStackSafety)
330 if (MergeInit)
332 }
333};
334
335} // end anonymous namespace
336
337char AArch64StackTagging::ID = 0;
338
339INITIALIZE_PASS_BEGIN(AArch64StackTagging, DEBUG_TYPE, "AArch64 Stack Tagging",
340 false, false)
343INITIALIZE_PASS_END(AArch64StackTagging, DEBUG_TYPE, "AArch64 Stack Tagging",
345
347 return new AArch64StackTagging(IsOptNone);
348}
349
350Instruction *AArch64StackTagging::collectInitializers(Instruction *StartInst,
351 Value *StartPtr,
353 InitializerBuilder &IB) {
354 MemoryLocation AllocaLoc{StartPtr, Size};
355 Instruction *LastInst = StartInst;
356 BasicBlock::iterator BI(StartInst);
357
358 unsigned Count = 0;
359 for (; Count < ClScanLimit && !BI->isTerminator(); ++BI) {
360 if (!isa<DbgInfoIntrinsic>(*BI))
361 ++Count;
362
363 if (isNoModRef(AA->getModRefInfo(&*BI, AllocaLoc)))
364 continue;
365
366 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
367 // If the instruction is readnone, ignore it, otherwise bail out. We
368 // don't even allow readonly here because we don't want something like:
369 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
370 if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
371 break;
372 continue;
373 }
374
375 if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
376 if (!NextStore->isSimple())
377 break;
378
379 // Check to see if this store is to a constant offset from the start ptr.
380 std::optional<int64_t> Offset =
381 NextStore->getPointerOperand()->getPointerOffsetFrom(StartPtr, *DL);
382 if (!Offset)
383 break;
384
385 if (!IB.addStore(*Offset, NextStore, DL))
386 break;
387 LastInst = NextStore;
388 } else {
389 MemSetInst *MSI = cast<MemSetInst>(BI);
390
391 if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength()))
392 break;
393
394 if (!isa<ConstantInt>(MSI->getValue()))
395 break;
396
397 // Check to see if this store is to a constant offset from the start ptr.
398 std::optional<int64_t> Offset =
399 MSI->getDest()->getPointerOffsetFrom(StartPtr, *DL);
400 if (!Offset)
401 break;
402
403 if (!IB.addMemSet(*Offset, MSI))
404 break;
405 LastInst = MSI;
406 }
407 }
408 return LastInst;
409}
410
411void AArch64StackTagging::tagAlloca(AllocaInst *AI, Instruction *InsertBefore,
413 auto SetTagZeroFunc =
414 Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_settag_zero);
415 auto StgpFunc =
416 Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_stgp);
417
418 InitializerBuilder IB(Size, DL, Ptr, SetTagFunc, SetTagZeroFunc, StgpFunc);
419 bool LittleEndian =
421 // Current implementation of initializer merging assumes little endianness.
422 if (MergeInit && !F->hasOptNone() && LittleEndian &&
424 LLVM_DEBUG(dbgs() << "collecting initializers for " << *AI
425 << ", size = " << Size << "\n");
426 InsertBefore = collectInitializers(InsertBefore, Ptr, Size, IB);
427 }
428
429 IRBuilder<> IRB(InsertBefore);
430 IB.generate(IRB);
431}
432
433void AArch64StackTagging::untagAlloca(AllocaInst *AI, Instruction *InsertBefore,
434 uint64_t Size) {
435 IRBuilder<> IRB(InsertBefore);
436 IRB.CreateCall(SetTagFunc, {IRB.CreatePointerCast(AI, IRB.getPtrTy()),
438}
439
440Instruction *AArch64StackTagging::insertBaseTaggedPointer(
441 const MapVector<AllocaInst *, memtag::AllocaInfo> &AllocasToInstrument,
442 const DominatorTree *DT) {
443 BasicBlock *PrologueBB = nullptr;
444 // Try sinking IRG as deep as possible to avoid hurting shrink wrap.
445 for (auto &I : AllocasToInstrument) {
446 const memtag::AllocaInfo &Info = I.second;
447 AllocaInst *AI = Info.AI;
448 if (!PrologueBB) {
449 PrologueBB = AI->getParent();
450 continue;
451 }
452 PrologueBB = DT->findNearestCommonDominator(PrologueBB, AI->getParent());
453 }
454 assert(PrologueBB);
455
456 IRBuilder<> IRB(&PrologueBB->front());
457 Function *IRG_SP =
458 Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_irg_sp);
460 IRB.CreateCall(IRG_SP, {Constant::getNullValue(IRB.getInt64Ty())});
461 Base->setName("basetag");
462 return Base;
463}
464
465// FIXME: check for MTE extension
466bool AArch64StackTagging::runOnFunction(Function &Fn) {
467 if (!Fn.hasFnAttribute(Attribute::SanitizeMemTag))
468 return false;
469
470 if (UseStackSafety)
471 SSI = &getAnalysis<StackSafetyGlobalInfoWrapperPass>().getResult();
472 F = &Fn;
473 DL = &Fn.getParent()->getDataLayout();
474 if (MergeInit)
475 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
476
478 for (Instruction &I : instructions(F))
479 SIB.visit(I);
480 memtag::StackInfo &SInfo = SIB.get();
481
482 if (SInfo.AllocasToInstrument.empty())
483 return false;
484
485 std::unique_ptr<DominatorTree> DeleteDT;
486 DominatorTree *DT = nullptr;
487 if (auto *P = getAnalysisIfAvailable<DominatorTreeWrapperPass>())
488 DT = &P->getDomTree();
489
490 if (DT == nullptr) {
491 DeleteDT = std::make_unique<DominatorTree>(*F);
492 DT = DeleteDT.get();
493 }
494
495 std::unique_ptr<PostDominatorTree> DeletePDT;
496 PostDominatorTree *PDT = nullptr;
497 if (auto *P = getAnalysisIfAvailable<PostDominatorTreeWrapperPass>())
498 PDT = &P->getPostDomTree();
499
500 if (PDT == nullptr) {
501 DeletePDT = std::make_unique<PostDominatorTree>(*F);
502 PDT = DeletePDT.get();
503 }
504
505 std::unique_ptr<LoopInfo> DeleteLI;
506 LoopInfo *LI = nullptr;
507 if (auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>()) {
508 LI = &LIWP->getLoopInfo();
509 } else {
510 DeleteLI = std::make_unique<LoopInfo>(*DT);
511 LI = DeleteLI.get();
512 }
513
514 SetTagFunc =
515 Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_settag);
516
517 Instruction *Base = insertBaseTaggedPointer(SInfo.AllocasToInstrument, DT);
518
519 int NextTag = 0;
520 for (auto &I : SInfo.AllocasToInstrument) {
521 memtag::AllocaInfo &Info = I.second;
522 assert(Info.AI && SIB.isInterestingAlloca(*Info.AI));
523 TrackingVH<Instruction> OldAI = Info.AI;
525 AllocaInst *AI = Info.AI;
526 int Tag = NextTag;
527 NextTag = (NextTag + 1) % 16;
528 // Replace alloca with tagp(alloca).
529 IRBuilder<> IRB(Info.AI->getNextNode());
531 F->getParent(), Intrinsic::aarch64_tagp, {Info.AI->getType()});
532 Instruction *TagPCall =
533 IRB.CreateCall(TagP, {Constant::getNullValue(Info.AI->getType()), Base,
535 if (Info.AI->hasName())
536 TagPCall->setName(Info.AI->getName() + ".tag");
537 Info.AI->replaceAllUsesWith(TagPCall);
538 TagPCall->setOperand(0, Info.AI);
539
540 // Calls to functions that may return twice (e.g. setjmp) confuse the
541 // postdominator analysis, and will leave us to keep memory tagged after
542 // function return. Work around this by always untagging at every return
543 // statement if return_twice functions are called.
544 bool StandardLifetime =
545 SInfo.UnrecognizedLifetimes.empty() &&
546 memtag::isStandardLifetime(Info.LifetimeStart, Info.LifetimeEnd, DT, LI,
548 !SInfo.CallsReturnTwice;
549 if (StandardLifetime) {
550 IntrinsicInst *Start = Info.LifetimeStart[0];
551 uint64_t Size =
552 cast<ConstantInt>(Start->getArgOperand(0))->getZExtValue();
554 tagAlloca(AI, Start->getNextNode(), Start->getArgOperand(1), Size);
555
556 auto TagEnd = [&](Instruction *Node) { untagAlloca(AI, Node, Size); };
557 if (!DT || !PDT ||
558 !memtag::forAllReachableExits(*DT, *PDT, *LI, Start, Info.LifetimeEnd,
559 SInfo.RetVec, TagEnd)) {
560 for (auto *End : Info.LifetimeEnd)
561 End->eraseFromParent();
562 }
563 } else {
564 uint64_t Size = *Info.AI->getAllocationSize(*DL);
565 Value *Ptr = IRB.CreatePointerCast(TagPCall, IRB.getPtrTy());
566 tagAlloca(AI, &*IRB.GetInsertPoint(), Ptr, Size);
567 for (auto *RI : SInfo.RetVec) {
568 untagAlloca(AI, RI, Size);
569 }
570 // We may have inserted tag/untag outside of any lifetime interval.
571 // Remove all lifetime intrinsics for this alloca.
572 for (auto *II : Info.LifetimeStart)
573 II->eraseFromParent();
574 for (auto *II : Info.LifetimeEnd)
575 II->eraseFromParent();
576 }
577
578 // Fixup debug intrinsics to point to the new alloca.
579 for (auto *DVI : Info.DbgVariableIntrinsics)
580 DVI->replaceVariableLocationOp(OldAI, Info.AI);
581 }
582
583 // If we have instrumented at least one alloca, all unrecognized lifetime
584 // intrinsics have to go.
585 for (auto *I : SInfo.UnrecognizedLifetimes)
586 I->eraseFromParent();
587
588 return true;
589}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< bool > ClMergeInit("stack-tagging-merge-init", cl::Hidden, cl::init(true), cl::desc("merge stack variable initializers with tagging when possible"))
static cl::opt< unsigned > ClMergeInitSizeLimit("stack-tagging-merge-init-size-limit", cl::init(272), cl::Hidden)
static cl::opt< unsigned > ClScanLimit("stack-tagging-merge-init-scan-limit", cl::init(40), cl::Hidden)
static cl::opt< bool > ClUseStackSafety("stack-tagging-use-stack-safety", cl::Hidden, cl::init(true), cl::desc("Use Stack Safety analysis results"))
static cl::opt< size_t > ClMaxLifetimes("stack-tagging-max-lifetimes-for-alloca", cl::Hidden, cl::init(3), cl::ReallyHidden, cl::desc("How many lifetime ends to handle for a single alloca."), cl::Optional)
AArch64 Stack Tagging
#define DEBUG_TYPE
static const Align kTagGranuleSize
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Size
bool End
Definition: ELF_riscv.cpp:478
Select target instructions out of generic instructions
A set of register units.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
static void addRange(SmallVectorImpl< ConstantInt * > &EndPoints, ConstantInt *Low, ConstantInt *High)
Definition: Metadata.cpp:1228
This file contains the declarations for metadata subclasses.
#define P(N)
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:59
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
Target-Independent Code Generator Pass Configuration Options pass.
Value * RHS
Value * LHS
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
Check whether or not an instruction may read or write the optionally specified memory location.
an instruction to allocate memory on the stack
Definition: Instructions.h:58
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:269
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
const Instruction & front() const
Definition: BasicBlock.h:460
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:173
This is the shared class of boolean and integer constants.
Definition: Constants.h:78
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:888
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:356
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:165
Instruction * findNearestCommonDominator(Instruction *I1, Instruction *I2) const
Find the nearest instruction I that dominates both I1 and I2, in the sense that a result produced bef...
Definition: Dominators.cpp:345
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition: Type.cpp:699
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
virtual bool runOnFunction(Function &F)=0
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:666
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:652
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
Definition: IRBuilder.h:1885
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
Definition: IRBuilder.h:533
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
Definition: IRBuilder.h:2023
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2148
BasicBlock::iterator GetInsertPoint() const
Definition: IRBuilder.h:175
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1436
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Definition: IRBuilder.h:525
Value * CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2183
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1415
LLVMContext & getContext() const
Definition: IRBuilder.h:176
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1496
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Definition: IRBuilder.h:563
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2390
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition: IRBuilder.h:510
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2644
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:71
const BasicBlock * getParent() const
Definition: Instruction.h:139
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:285
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
Value * getLength() const
Value * getDest() const
This is just like getRawDest, but it strips off any cast instructions (including addrspacecast) that ...
bool isVolatile() const
Value * getValue() const
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
Representation for a specific memory location.
const std::string & getTargetTriple() const
Get the target triple which is a string describing the target host.
Definition: Module.h:279
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.h:275
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
virtual void getAnalysisUsage(AnalysisUsage &) const
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: Pass.cpp:98
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Definition: Pass.cpp:81
PostDominatorTree Class - Concrete subclass of DominatorTree that is used to compute the post-dominat...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
This pass performs the global (interprocedural) stack safety analysis (legacy pass manager).
An instruction for storing to memory.
Definition: Instructions.h:301
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Value handle that tracks a Value across RAUW.
Definition: ValueHandle.h:331
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool isLittleEndian() const
Tests whether the target triple is little endian.
Definition: Triple.cpp:1748
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
LLVM Value Representation.
Definition: Value.h:74
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
std::optional< int64_t > getPointerOffsetFrom(const Value *Other, const DataLayout &DL) const
If this ptr is provably equal to Other plus a constant offset, return that offset in bytes.
Definition: Value.cpp:1027
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1444
@ ReallyHidden
Definition: CommandLine.h:139
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
bool isStandardLifetime(const SmallVectorImpl< IntrinsicInst * > &LifetimeStart, const SmallVectorImpl< IntrinsicInst * > &LifetimeEnd, const DominatorTree *DT, const LoopInfo *LI, size_t MaxLifetimes)
bool forAllReachableExits(const DominatorTree &DT, const PostDominatorTree &PDT, const LoopInfo &LI, const Instruction *Start, const SmallVectorImpl< IntrinsicInst * > &Ends, const SmallVectorImpl< Instruction * > &RetVec, llvm::function_ref< void(Instruction *)> Callback)
void alignAndPadAlloca(memtag::AllocaInfo &Info, llvm::Align Align)
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:440
void initializeAArch64StackTaggingPass(PassRegistry &)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1945
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
FunctionPass * createAArch64StackTaggingPass(bool IsOptNone)
bool isNoModRef(const ModRefInfo MRI)
Definition: ModRef.h:39
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
MapVector< AllocaInst *, AllocaInfo > AllocasToInstrument
SmallVector< Instruction *, 4 > UnrecognizedLifetimes
SmallVector< Instruction *, 8 > RetVec