LLVM 17.0.0git
SafeStack.cpp
Go to the documentation of this file.
1//===- SafeStack.cpp - Safe Stack Insertion -------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass splits the stack into the safe stack (kept as-is for LLVM backend)
10// and the unsafe stack (explicitly allocated and managed through the runtime
11// support library).
12//
13// http://clang.llvm.org/docs/SafeStack.html
14//
15//===----------------------------------------------------------------------===//
16
17#include "SafeStackLayout.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
22#include "llvm/ADT/Statistic.h"
35#include "llvm/IR/Argument.h"
36#include "llvm/IR/Attributes.h"
38#include "llvm/IR/Constants.h"
39#include "llvm/IR/DIBuilder.h"
40#include "llvm/IR/DataLayout.h"
42#include "llvm/IR/Dominators.h"
43#include "llvm/IR/Function.h"
44#include "llvm/IR/IRBuilder.h"
46#include "llvm/IR/Instruction.h"
49#include "llvm/IR/Intrinsics.h"
50#include "llvm/IR/MDBuilder.h"
51#include "llvm/IR/Metadata.h"
52#include "llvm/IR/Module.h"
53#include "llvm/IR/Type.h"
54#include "llvm/IR/Use.h"
55#include "llvm/IR/Value.h"
57#include "llvm/Pass.h"
59#include "llvm/Support/Debug.h"
67#include <algorithm>
68#include <cassert>
69#include <cstdint>
70#include <optional>
71#include <string>
72#include <utility>
73
74using namespace llvm;
75using namespace llvm::safestack;
76
77#define DEBUG_TYPE "safe-stack"
78
79namespace llvm {
80
81STATISTIC(NumFunctions, "Total number of functions");
82STATISTIC(NumUnsafeStackFunctions, "Number of functions with unsafe stack");
83STATISTIC(NumUnsafeStackRestorePointsFunctions,
84 "Number of functions that use setjmp or exceptions");
85
86STATISTIC(NumAllocas, "Total number of allocas");
87STATISTIC(NumUnsafeStaticAllocas, "Number of unsafe static allocas");
88STATISTIC(NumUnsafeDynamicAllocas, "Number of unsafe dynamic allocas");
89STATISTIC(NumUnsafeByValArguments, "Number of unsafe byval arguments");
90STATISTIC(NumUnsafeStackRestorePoints, "Number of setjmps and landingpads");
91
92} // namespace llvm
93
94/// Use __safestack_pointer_address even if the platform has a faster way of
95/// access safe stack pointer.
96static cl::opt<bool>
97 SafeStackUsePointerAddress("safestack-use-pointer-address",
98 cl::init(false), cl::Hidden);
99
100static cl::opt<bool> ClColoring("safe-stack-coloring",
101 cl::desc("enable safe stack coloring"),
102 cl::Hidden, cl::init(true));
103
104namespace {
105
106/// The SafeStack pass splits the stack of each function into the safe
107/// stack, which is only accessed through memory safe dereferences (as
108/// determined statically), and the unsafe stack, which contains all
109/// local variables that are accessed in ways that we can't prove to
110/// be safe.
111class SafeStack {
112 Function &F;
113 const TargetLoweringBase &TL;
114 const DataLayout &DL;
115 DomTreeUpdater *DTU;
116 ScalarEvolution &SE;
117
118 Type *StackPtrTy;
119 Type *IntPtrTy;
120 Type *Int32Ty;
121 Type *Int8Ty;
122
123 Value *UnsafeStackPtr = nullptr;
124
125 /// Unsafe stack alignment. Each stack frame must ensure that the stack is
126 /// aligned to this value. We need to re-align the unsafe stack if the
127 /// alignment of any object on the stack exceeds this value.
128 ///
129 /// 16 seems like a reasonable upper bound on the alignment of objects that we
130 /// might expect to appear on the stack on most common targets.
131 static constexpr Align StackAlignment = Align::Constant<16>();
132
133 /// Return the value of the stack canary.
135
136 /// Load stack guard from the frame and check if it has changed.
137 void checkStackGuard(IRBuilder<> &IRB, Function &F, Instruction &RI,
138 AllocaInst *StackGuardSlot, Value *StackGuard);
139
140 /// Find all static allocas, dynamic allocas, return instructions and
141 /// stack restore points (exception unwind blocks and setjmp calls) in the
142 /// given function and append them to the respective vectors.
143 void findInsts(Function &F, SmallVectorImpl<AllocaInst *> &StaticAllocas,
144 SmallVectorImpl<AllocaInst *> &DynamicAllocas,
145 SmallVectorImpl<Argument *> &ByValArguments,
147 SmallVectorImpl<Instruction *> &StackRestorePoints);
148
149 /// Calculate the allocation size of a given alloca. Returns 0 if the
150 /// size can not be statically determined.
151 uint64_t getStaticAllocaAllocationSize(const AllocaInst* AI);
152
153 /// Allocate space for all static allocas in \p StaticAllocas,
154 /// replace allocas with pointers into the unsafe stack.
155 ///
156 /// \returns A pointer to the top of the unsafe stack after all unsafe static
157 /// allocas are allocated.
158 Value *moveStaticAllocasToUnsafeStack(IRBuilder<> &IRB, Function &F,
159 ArrayRef<AllocaInst *> StaticAllocas,
160 ArrayRef<Argument *> ByValArguments,
161 Instruction *BasePointer,
162 AllocaInst *StackGuardSlot);
163
164 /// Generate code to restore the stack after all stack restore points
165 /// in \p StackRestorePoints.
166 ///
167 /// \returns A local variable in which to maintain the dynamic top of the
168 /// unsafe stack if needed.
169 AllocaInst *
170 createStackRestorePoints(IRBuilder<> &IRB, Function &F,
171 ArrayRef<Instruction *> StackRestorePoints,
172 Value *StaticTop, bool NeedDynamicTop);
173
174 /// Replace all allocas in \p DynamicAllocas with code to allocate
175 /// space dynamically on the unsafe stack and store the dynamic unsafe stack
176 /// top to \p DynamicTop if non-null.
177 void moveDynamicAllocasToUnsafeStack(Function &F, Value *UnsafeStackPtr,
178 AllocaInst *DynamicTop,
179 ArrayRef<AllocaInst *> DynamicAllocas);
180
181 bool IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize);
182
183 bool IsMemIntrinsicSafe(const MemIntrinsic *MI, const Use &U,
184 const Value *AllocaPtr, uint64_t AllocaSize);
185 bool IsAccessSafe(Value *Addr, uint64_t Size, const Value *AllocaPtr,
186 uint64_t AllocaSize);
187
188 bool ShouldInlinePointerAddress(CallInst &CI);
189 void TryInlinePointerAddress();
190
191public:
192 SafeStack(Function &F, const TargetLoweringBase &TL, const DataLayout &DL,
194 : F(F), TL(TL), DL(DL), DTU(DTU), SE(SE),
195 StackPtrTy(Type::getInt8PtrTy(F.getContext())),
196 IntPtrTy(DL.getIntPtrType(F.getContext())),
197 Int32Ty(Type::getInt32Ty(F.getContext())),
198 Int8Ty(Type::getInt8Ty(F.getContext())) {}
199
200 // Run the transformation on the associated function.
201 // Returns whether the function was changed.
202 bool run();
203};
204
205constexpr Align SafeStack::StackAlignment;
206
207uint64_t SafeStack::getStaticAllocaAllocationSize(const AllocaInst* AI) {
208 uint64_t Size = DL.getTypeAllocSize(AI->getAllocatedType());
209 if (AI->isArrayAllocation()) {
210 auto C = dyn_cast<ConstantInt>(AI->getArraySize());
211 if (!C)
212 return 0;
213 Size *= C->getZExtValue();
214 }
215 return Size;
216}
217
218bool SafeStack::IsAccessSafe(Value *Addr, uint64_t AccessSize,
219 const Value *AllocaPtr, uint64_t AllocaSize) {
220 const SCEV *AddrExpr = SE.getSCEV(Addr);
221 const auto *Base = dyn_cast<SCEVUnknown>(SE.getPointerBase(AddrExpr));
222 if (!Base || Base->getValue() != AllocaPtr) {
224 dbgs() << "[SafeStack] "
225 << (isa<AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ")
226 << *AllocaPtr << "\n"
227 << "SCEV " << *AddrExpr << " not directly based on alloca\n");
228 return false;
229 }
230
231 const SCEV *Expr = SE.removePointerBase(AddrExpr);
232 uint64_t BitWidth = SE.getTypeSizeInBits(Expr->getType());
233 ConstantRange AccessStartRange = SE.getUnsignedRange(Expr);
234 ConstantRange SizeRange =
235 ConstantRange(APInt(BitWidth, 0), APInt(BitWidth, AccessSize));
236 ConstantRange AccessRange = AccessStartRange.add(SizeRange);
237 ConstantRange AllocaRange =
238 ConstantRange(APInt(BitWidth, 0), APInt(BitWidth, AllocaSize));
239 bool Safe = AllocaRange.contains(AccessRange);
240
242 dbgs() << "[SafeStack] "
243 << (isa<AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ")
244 << *AllocaPtr << "\n"
245 << " Access " << *Addr << "\n"
246 << " SCEV " << *Expr
247 << " U: " << SE.getUnsignedRange(Expr)
248 << ", S: " << SE.getSignedRange(Expr) << "\n"
249 << " Range " << AccessRange << "\n"
250 << " AllocaRange " << AllocaRange << "\n"
251 << " " << (Safe ? "safe" : "unsafe") << "\n");
252
253 return Safe;
254}
255
256bool SafeStack::IsMemIntrinsicSafe(const MemIntrinsic *MI, const Use &U,
257 const Value *AllocaPtr,
258 uint64_t AllocaSize) {
259 if (auto MTI = dyn_cast<MemTransferInst>(MI)) {
260 if (MTI->getRawSource() != U && MTI->getRawDest() != U)
261 return true;
262 } else {
263 if (MI->getRawDest() != U)
264 return true;
265 }
266
267 const auto *Len = dyn_cast<ConstantInt>(MI->getLength());
268 // Non-constant size => unsafe. FIXME: try SCEV getRange.
269 if (!Len) return false;
270 return IsAccessSafe(U, Len->getZExtValue(), AllocaPtr, AllocaSize);
271}
272
273/// Check whether a given allocation must be put on the safe
274/// stack or not. The function analyzes all uses of AI and checks whether it is
275/// only accessed in a memory safe way (as decided statically).
276bool SafeStack::IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize) {
277 // Go through all uses of this alloca and check whether all accesses to the
278 // allocated object are statically known to be memory safe and, hence, the
279 // object can be placed on the safe stack.
282 WorkList.push_back(AllocaPtr);
283
284 // A DFS search through all uses of the alloca in bitcasts/PHI/GEPs/etc.
285 while (!WorkList.empty()) {
286 const Value *V = WorkList.pop_back_val();
287 for (const Use &UI : V->uses()) {
288 auto I = cast<const Instruction>(UI.getUser());
289 assert(V == UI.get());
290
291 switch (I->getOpcode()) {
292 case Instruction::Load:
293 if (!IsAccessSafe(UI, DL.getTypeStoreSize(I->getType()), AllocaPtr,
294 AllocaSize))
295 return false;
296 break;
297
298 case Instruction::VAArg:
299 // "va-arg" from a pointer is safe.
300 break;
301 case Instruction::Store:
302 if (V == I->getOperand(0)) {
303 // Stored the pointer - conservatively assume it may be unsafe.
305 << "[SafeStack] Unsafe alloca: " << *AllocaPtr
306 << "\n store of address: " << *I << "\n");
307 return false;
308 }
309
310 if (!IsAccessSafe(UI, DL.getTypeStoreSize(I->getOperand(0)->getType()),
311 AllocaPtr, AllocaSize))
312 return false;
313 break;
314
315 case Instruction::Ret:
316 // Information leak.
317 return false;
318
319 case Instruction::Call:
320 case Instruction::Invoke: {
321 const CallBase &CS = *cast<CallBase>(I);
322
323 if (I->isLifetimeStartOrEnd())
324 continue;
325
326 if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
327 if (!IsMemIntrinsicSafe(MI, UI, AllocaPtr, AllocaSize)) {
329 << "[SafeStack] Unsafe alloca: " << *AllocaPtr
330 << "\n unsafe memintrinsic: " << *I << "\n");
331 return false;
332 }
333 continue;
334 }
335
336 // LLVM 'nocapture' attribute is only set for arguments whose address
337 // is not stored, passed around, or used in any other non-trivial way.
338 // We assume that passing a pointer to an object as a 'nocapture
339 // readnone' argument is safe.
340 // FIXME: a more precise solution would require an interprocedural
341 // analysis here, which would look at all uses of an argument inside
342 // the function being called.
343 auto B = CS.arg_begin(), E = CS.arg_end();
344 for (const auto *A = B; A != E; ++A)
345 if (A->get() == V)
346 if (!(CS.doesNotCapture(A - B) && (CS.doesNotAccessMemory(A - B) ||
347 CS.doesNotAccessMemory()))) {
348 LLVM_DEBUG(dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtr
349 << "\n unsafe call: " << *I << "\n");
350 return false;
351 }
352 continue;
353 }
354
355 default:
356 if (Visited.insert(I).second)
357 WorkList.push_back(cast<const Instruction>(I));
358 }
359 }
360 }
361
362 // All uses of the alloca are safe, we can place it on the safe stack.
363 return true;
364}
365
366Value *SafeStack::getStackGuard(IRBuilder<> &IRB, Function &F) {
367 Value *StackGuardVar = TL.getIRStackGuard(IRB);
368 Module *M = F.getParent();
369
370 if (!StackGuardVar) {
371 TL.insertSSPDeclarations(*M);
372 return IRB.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stackguard));
373 }
374
375 return IRB.CreateLoad(StackPtrTy, StackGuardVar, "StackGuard");
376}
377
378void SafeStack::findInsts(Function &F,
379 SmallVectorImpl<AllocaInst *> &StaticAllocas,
380 SmallVectorImpl<AllocaInst *> &DynamicAllocas,
381 SmallVectorImpl<Argument *> &ByValArguments,
383 SmallVectorImpl<Instruction *> &StackRestorePoints) {
384 for (Instruction &I : instructions(&F)) {
385 if (auto AI = dyn_cast<AllocaInst>(&I)) {
386 ++NumAllocas;
387
388 uint64_t Size = getStaticAllocaAllocationSize(AI);
389 if (IsSafeStackAlloca(AI, Size))
390 continue;
391
392 if (AI->isStaticAlloca()) {
393 ++NumUnsafeStaticAllocas;
394 StaticAllocas.push_back(AI);
395 } else {
396 ++NumUnsafeDynamicAllocas;
397 DynamicAllocas.push_back(AI);
398 }
399 } else if (auto RI = dyn_cast<ReturnInst>(&I)) {
400 if (CallInst *CI = I.getParent()->getTerminatingMustTailCall())
401 Returns.push_back(CI);
402 else
403 Returns.push_back(RI);
404 } else if (auto CI = dyn_cast<CallInst>(&I)) {
405 // setjmps require stack restore.
406 if (CI->getCalledFunction() && CI->canReturnTwice())
407 StackRestorePoints.push_back(CI);
408 } else if (auto LP = dyn_cast<LandingPadInst>(&I)) {
409 // Exception landing pads require stack restore.
410 StackRestorePoints.push_back(LP);
411 } else if (auto II = dyn_cast<IntrinsicInst>(&I)) {
412 if (II->getIntrinsicID() == Intrinsic::gcroot)
414 "gcroot intrinsic not compatible with safestack attribute");
415 }
416 }
417 for (Argument &Arg : F.args()) {
418 if (!Arg.hasByValAttr())
419 continue;
420 uint64_t Size = DL.getTypeStoreSize(Arg.getParamByValType());
421 if (IsSafeStackAlloca(&Arg, Size))
422 continue;
423
424 ++NumUnsafeByValArguments;
425 ByValArguments.push_back(&Arg);
426 }
427}
428
430SafeStack::createStackRestorePoints(IRBuilder<> &IRB, Function &F,
431 ArrayRef<Instruction *> StackRestorePoints,
432 Value *StaticTop, bool NeedDynamicTop) {
433 assert(StaticTop && "The stack top isn't set.");
434
435 if (StackRestorePoints.empty())
436 return nullptr;
437
438 // We need the current value of the shadow stack pointer to restore
439 // after longjmp or exception catching.
440
441 // FIXME: On some platforms this could be handled by the longjmp/exception
442 // runtime itself.
443
444 AllocaInst *DynamicTop = nullptr;
445 if (NeedDynamicTop) {
446 // If we also have dynamic alloca's, the stack pointer value changes
447 // throughout the function. For now we store it in an alloca.
448 DynamicTop = IRB.CreateAlloca(StackPtrTy, /*ArraySize=*/nullptr,
449 "unsafe_stack_dynamic_ptr");
450 IRB.CreateStore(StaticTop, DynamicTop);
451 }
452
453 // Restore current stack pointer after longjmp/exception catch.
454 for (Instruction *I : StackRestorePoints) {
455 ++NumUnsafeStackRestorePoints;
456
457 IRB.SetInsertPoint(I->getNextNode());
458 Value *CurrentTop =
459 DynamicTop ? IRB.CreateLoad(StackPtrTy, DynamicTop) : StaticTop;
460 IRB.CreateStore(CurrentTop, UnsafeStackPtr);
461 }
462
463 return DynamicTop;
464}
465
466void SafeStack::checkStackGuard(IRBuilder<> &IRB, Function &F, Instruction &RI,
467 AllocaInst *StackGuardSlot, Value *StackGuard) {
468 Value *V = IRB.CreateLoad(StackPtrTy, StackGuardSlot);
469 Value *Cmp = IRB.CreateICmpNE(StackGuard, V);
470
473 MDNode *Weights = MDBuilder(F.getContext())
474 .createBranchWeights(SuccessProb.getNumerator(),
475 FailureProb.getNumerator());
476 Instruction *CheckTerm =
477 SplitBlockAndInsertIfThen(Cmp, &RI, /* Unreachable */ true, Weights, DTU);
478 IRBuilder<> IRBFail(CheckTerm);
479 // FIXME: respect -fsanitize-trap / -ftrap-function here?
480 FunctionCallee StackChkFail =
481 F.getParent()->getOrInsertFunction("__stack_chk_fail", IRB.getVoidTy());
482 IRBFail.CreateCall(StackChkFail, {});
483}
484
485/// We explicitly compute and set the unsafe stack layout for all unsafe
486/// static alloca instructions. We save the unsafe "base pointer" in the
487/// prologue into a local variable and restore it in the epilogue.
488Value *SafeStack::moveStaticAllocasToUnsafeStack(
489 IRBuilder<> &IRB, Function &F, ArrayRef<AllocaInst *> StaticAllocas,
490 ArrayRef<Argument *> ByValArguments, Instruction *BasePointer,
491 AllocaInst *StackGuardSlot) {
492 if (StaticAllocas.empty() && ByValArguments.empty())
493 return BasePointer;
494
495 DIBuilder DIB(*F.getParent());
496
497 StackLifetime SSC(F, StaticAllocas, StackLifetime::LivenessType::May);
498 static const StackLifetime::LiveRange NoColoringRange(1, true);
499 if (ClColoring)
500 SSC.run();
501
502 for (const auto *I : SSC.getMarkers()) {
503 auto *Op = dyn_cast<Instruction>(I->getOperand(1));
504 const_cast<IntrinsicInst *>(I)->eraseFromParent();
505 // Remove the operand bitcast, too, if it has no more uses left.
506 if (Op && Op->use_empty())
507 Op->eraseFromParent();
508 }
509
510 // Unsafe stack always grows down.
511 StackLayout SSL(StackAlignment);
512 if (StackGuardSlot) {
513 Type *Ty = StackGuardSlot->getAllocatedType();
514 Align Align = std::max(DL.getPrefTypeAlign(Ty), StackGuardSlot->getAlign());
515 SSL.addObject(StackGuardSlot, getStaticAllocaAllocationSize(StackGuardSlot),
516 Align, SSC.getFullLiveRange());
517 }
518
519 for (Argument *Arg : ByValArguments) {
520 Type *Ty = Arg->getParamByValType();
521 uint64_t Size = DL.getTypeStoreSize(Ty);
522 if (Size == 0)
523 Size = 1; // Don't create zero-sized stack objects.
524
525 // Ensure the object is properly aligned.
526 Align Align = DL.getPrefTypeAlign(Ty);
527 if (auto A = Arg->getParamAlign())
528 Align = std::max(Align, *A);
529 SSL.addObject(Arg, Size, Align, SSC.getFullLiveRange());
530 }
531
532 for (AllocaInst *AI : StaticAllocas) {
533 Type *Ty = AI->getAllocatedType();
534 uint64_t Size = getStaticAllocaAllocationSize(AI);
535 if (Size == 0)
536 Size = 1; // Don't create zero-sized stack objects.
537
538 // Ensure the object is properly aligned.
539 Align Align = std::max(DL.getPrefTypeAlign(Ty), AI->getAlign());
540
541 SSL.addObject(AI, Size, Align,
542 ClColoring ? SSC.getLiveRange(AI) : NoColoringRange);
543 }
544
545 SSL.computeLayout();
546 Align FrameAlignment = SSL.getFrameAlignment();
547
548 // FIXME: tell SSL that we start at a less-then-MaxAlignment aligned location
549 // (AlignmentSkew).
550 if (FrameAlignment > StackAlignment) {
551 // Re-align the base pointer according to the max requested alignment.
552 IRB.SetInsertPoint(BasePointer->getNextNode());
553 BasePointer = cast<Instruction>(IRB.CreateIntToPtr(
554 IRB.CreateAnd(
555 IRB.CreatePtrToInt(BasePointer, IntPtrTy),
556 ConstantInt::get(IntPtrTy, ~(FrameAlignment.value() - 1))),
557 StackPtrTy));
558 }
559
560 IRB.SetInsertPoint(BasePointer->getNextNode());
561
562 if (StackGuardSlot) {
563 unsigned Offset = SSL.getObjectOffset(StackGuardSlot);
564 Value *Off = IRB.CreateGEP(Int8Ty, BasePointer, // BasePointer is i8*
566 Value *NewAI =
567 IRB.CreateBitCast(Off, StackGuardSlot->getType(), "StackGuardSlot");
568
569 // Replace alloc with the new location.
570 StackGuardSlot->replaceAllUsesWith(NewAI);
571 StackGuardSlot->eraseFromParent();
572 }
573
574 for (Argument *Arg : ByValArguments) {
575 unsigned Offset = SSL.getObjectOffset(Arg);
576 MaybeAlign Align(SSL.getObjectAlignment(Arg));
577 Type *Ty = Arg->getParamByValType();
578
579 uint64_t Size = DL.getTypeStoreSize(Ty);
580 if (Size == 0)
581 Size = 1; // Don't create zero-sized stack objects.
582
583 Value *Off = IRB.CreateGEP(Int8Ty, BasePointer, // BasePointer is i8*
585 Value *NewArg = IRB.CreateBitCast(Off, Arg->getType(),
586 Arg->getName() + ".unsafe-byval");
587
588 // Replace alloc with the new location.
590 -Offset);
591 Arg->replaceAllUsesWith(NewArg);
592 IRB.SetInsertPoint(cast<Instruction>(NewArg)->getNextNode());
593 IRB.CreateMemCpy(Off, Align, Arg, Arg->getParamAlign(), Size);
594 }
595
596 // Allocate space for every unsafe static AllocaInst on the unsafe stack.
597 for (AllocaInst *AI : StaticAllocas) {
598 IRB.SetInsertPoint(AI);
599 unsigned Offset = SSL.getObjectOffset(AI);
600
601 replaceDbgDeclare(AI, BasePointer, DIB, DIExpression::ApplyOffset, -Offset);
602 replaceDbgValueForAlloca(AI, BasePointer, DIB, -Offset);
603
604 // Replace uses of the alloca with the new location.
605 // Insert address calculation close to each use to work around PR27844.
606 std::string Name = std::string(AI->getName()) + ".unsafe";
607 while (!AI->use_empty()) {
608 Use &U = *AI->use_begin();
609 Instruction *User = cast<Instruction>(U.getUser());
610
611 Instruction *InsertBefore;
612 if (auto *PHI = dyn_cast<PHINode>(User))
613 InsertBefore = PHI->getIncomingBlock(U)->getTerminator();
614 else
615 InsertBefore = User;
616
617 IRBuilder<> IRBUser(InsertBefore);
618 Value *Off = IRBUser.CreateGEP(Int8Ty, BasePointer, // BasePointer is i8*
620 Value *Replacement = IRBUser.CreateBitCast(Off, AI->getType(), Name);
621
622 if (auto *PHI = dyn_cast<PHINode>(User))
623 // PHI nodes may have multiple incoming edges from the same BB (why??),
624 // all must be updated at once with the same incoming value.
625 PHI->setIncomingValueForBlock(PHI->getIncomingBlock(U), Replacement);
626 else
627 U.set(Replacement);
628 }
629
630 AI->eraseFromParent();
631 }
632
633 // Re-align BasePointer so that our callees would see it aligned as
634 // expected.
635 // FIXME: no need to update BasePointer in leaf functions.
636 unsigned FrameSize = alignTo(SSL.getFrameSize(), StackAlignment);
637
638 MDBuilder MDB(F.getContext());
640 Data.push_back(MDB.createString("unsafe-stack-size"));
641 Data.push_back(MDB.createConstant(ConstantInt::get(Int32Ty, FrameSize)));
642 MDNode *MD = MDTuple::get(F.getContext(), Data);
643 F.setMetadata(LLVMContext::MD_annotation, MD);
644
645 // Update shadow stack pointer in the function epilogue.
646 IRB.SetInsertPoint(BasePointer->getNextNode());
647
648 Value *StaticTop =
649 IRB.CreateGEP(Int8Ty, BasePointer, ConstantInt::get(Int32Ty, -FrameSize),
650 "unsafe_stack_static_top");
651 IRB.CreateStore(StaticTop, UnsafeStackPtr);
652 return StaticTop;
653}
654
655void SafeStack::moveDynamicAllocasToUnsafeStack(
656 Function &F, Value *UnsafeStackPtr, AllocaInst *DynamicTop,
657 ArrayRef<AllocaInst *> DynamicAllocas) {
658 DIBuilder DIB(*F.getParent());
659
660 for (AllocaInst *AI : DynamicAllocas) {
661 IRBuilder<> IRB(AI);
662
663 // Compute the new SP value (after AI).
664 Value *ArraySize = AI->getArraySize();
665 if (ArraySize->getType() != IntPtrTy)
666 ArraySize = IRB.CreateIntCast(ArraySize, IntPtrTy, false);
667
668 Type *Ty = AI->getAllocatedType();
669 uint64_t TySize = DL.getTypeAllocSize(Ty);
670 Value *Size = IRB.CreateMul(ArraySize, ConstantInt::get(IntPtrTy, TySize));
671
672 Value *SP = IRB.CreatePtrToInt(IRB.CreateLoad(StackPtrTy, UnsafeStackPtr),
673 IntPtrTy);
674 SP = IRB.CreateSub(SP, Size);
675
676 // Align the SP value to satisfy the AllocaInst, type and stack alignments.
677 auto Align = std::max(std::max(DL.getPrefTypeAlign(Ty), AI->getAlign()),
678 StackAlignment);
679
680 Value *NewTop = IRB.CreateIntToPtr(
681 IRB.CreateAnd(SP,
682 ConstantInt::get(IntPtrTy, ~uint64_t(Align.value() - 1))),
683 StackPtrTy);
684
685 // Save the stack pointer.
686 IRB.CreateStore(NewTop, UnsafeStackPtr);
687 if (DynamicTop)
688 IRB.CreateStore(NewTop, DynamicTop);
689
690 Value *NewAI = IRB.CreatePointerCast(NewTop, AI->getType());
691 if (AI->hasName() && isa<Instruction>(NewAI))
692 NewAI->takeName(AI);
693
695 AI->replaceAllUsesWith(NewAI);
696 AI->eraseFromParent();
697 }
698
699 if (!DynamicAllocas.empty()) {
700 // Now go through the instructions again, replacing stacksave/stackrestore.
702 auto *II = dyn_cast<IntrinsicInst>(&I);
703 if (!II)
704 continue;
705
706 if (II->getIntrinsicID() == Intrinsic::stacksave) {
707 IRBuilder<> IRB(II);
708 Instruction *LI = IRB.CreateLoad(StackPtrTy, UnsafeStackPtr);
709 LI->takeName(II);
710 II->replaceAllUsesWith(LI);
711 II->eraseFromParent();
712 } else if (II->getIntrinsicID() == Intrinsic::stackrestore) {
713 IRBuilder<> IRB(II);
714 Instruction *SI = IRB.CreateStore(II->getArgOperand(0), UnsafeStackPtr);
715 SI->takeName(II);
716 assert(II->use_empty());
717 II->eraseFromParent();
718 }
719 }
720 }
721}
722
723bool SafeStack::ShouldInlinePointerAddress(CallInst &CI) {
725 if (CI.hasFnAttr(Attribute::AlwaysInline) &&
726 isInlineViable(*Callee).isSuccess())
727 return true;
728 if (Callee->isInterposable() || Callee->hasFnAttribute(Attribute::NoInline) ||
729 CI.isNoInline())
730 return false;
731 return true;
732}
733
734void SafeStack::TryInlinePointerAddress() {
735 auto *CI = dyn_cast<CallInst>(UnsafeStackPtr);
736 if (!CI)
737 return;
738
739 if(F.hasOptNone())
740 return;
741
743 if (!Callee || Callee->isDeclaration())
744 return;
745
746 if (!ShouldInlinePointerAddress(*CI))
747 return;
748
750 InlineFunction(*CI, IFI);
751}
752
753bool SafeStack::run() {
754 assert(F.hasFnAttribute(Attribute::SafeStack) &&
755 "Can't run SafeStack on a function without the attribute");
756 assert(!F.isDeclaration() && "Can't run SafeStack on a function declaration");
757
758 ++NumFunctions;
759
760 SmallVector<AllocaInst *, 16> StaticAllocas;
761 SmallVector<AllocaInst *, 4> DynamicAllocas;
762 SmallVector<Argument *, 4> ByValArguments;
764
765 // Collect all points where stack gets unwound and needs to be restored
766 // This is only necessary because the runtime (setjmp and unwind code) is
767 // not aware of the unsafe stack and won't unwind/restore it properly.
768 // To work around this problem without changing the runtime, we insert
769 // instrumentation to restore the unsafe stack pointer when necessary.
770 SmallVector<Instruction *, 4> StackRestorePoints;
771
772 // Find all static and dynamic alloca instructions that must be moved to the
773 // unsafe stack, all return instructions and stack restore points.
774 findInsts(F, StaticAllocas, DynamicAllocas, ByValArguments, Returns,
775 StackRestorePoints);
776
777 if (StaticAllocas.empty() && DynamicAllocas.empty() &&
778 ByValArguments.empty() && StackRestorePoints.empty())
779 return false; // Nothing to do in this function.
780
781 if (!StaticAllocas.empty() || !DynamicAllocas.empty() ||
782 !ByValArguments.empty())
783 ++NumUnsafeStackFunctions; // This function has the unsafe stack.
784
785 if (!StackRestorePoints.empty())
786 ++NumUnsafeStackRestorePointsFunctions;
787
788 IRBuilder<> IRB(&F.front(), F.begin()->getFirstInsertionPt());
789 // Calls must always have a debug location, or else inlining breaks. So
790 // we explicitly set a artificial debug location here.
791 if (DISubprogram *SP = F.getSubprogram())
793 DILocation::get(SP->getContext(), SP->getScopeLine(), 0, SP));
795 FunctionCallee Fn = F.getParent()->getOrInsertFunction(
796 "__safestack_pointer_address", StackPtrTy->getPointerTo(0));
797 UnsafeStackPtr = IRB.CreateCall(Fn);
798 } else {
799 UnsafeStackPtr = TL.getSafeStackPointerLocation(IRB);
800 }
801
802 // Load the current stack pointer (we'll also use it as a base pointer).
803 // FIXME: use a dedicated register for it ?
804 Instruction *BasePointer =
805 IRB.CreateLoad(StackPtrTy, UnsafeStackPtr, false, "unsafe_stack_ptr");
806 assert(BasePointer->getType() == StackPtrTy);
807
808 AllocaInst *StackGuardSlot = nullptr;
809 // FIXME: implement weaker forms of stack protector.
810 if (F.hasFnAttribute(Attribute::StackProtect) ||
811 F.hasFnAttribute(Attribute::StackProtectStrong) ||
812 F.hasFnAttribute(Attribute::StackProtectReq)) {
813 Value *StackGuard = getStackGuard(IRB, F);
814 StackGuardSlot = IRB.CreateAlloca(StackPtrTy, nullptr);
815 IRB.CreateStore(StackGuard, StackGuardSlot);
816
817 for (Instruction *RI : Returns) {
818 IRBuilder<> IRBRet(RI);
819 checkStackGuard(IRBRet, F, *RI, StackGuardSlot, StackGuard);
820 }
821 }
822
823 // The top of the unsafe stack after all unsafe static allocas are
824 // allocated.
825 Value *StaticTop = moveStaticAllocasToUnsafeStack(
826 IRB, F, StaticAllocas, ByValArguments, BasePointer, StackGuardSlot);
827
828 // Safe stack object that stores the current unsafe stack top. It is updated
829 // as unsafe dynamic (non-constant-sized) allocas are allocated and freed.
830 // This is only needed if we need to restore stack pointer after longjmp
831 // or exceptions, and we have dynamic allocations.
832 // FIXME: a better alternative might be to store the unsafe stack pointer
833 // before setjmp / invoke instructions.
834 AllocaInst *DynamicTop = createStackRestorePoints(
835 IRB, F, StackRestorePoints, StaticTop, !DynamicAllocas.empty());
836
837 // Handle dynamic allocas.
838 moveDynamicAllocasToUnsafeStack(F, UnsafeStackPtr, DynamicTop,
839 DynamicAllocas);
840
841 // Restore the unsafe stack pointer before each return.
842 for (Instruction *RI : Returns) {
843 IRB.SetInsertPoint(RI);
844 IRB.CreateStore(BasePointer, UnsafeStackPtr);
845 }
846
847 TryInlinePointerAddress();
848
849 LLVM_DEBUG(dbgs() << "[SafeStack] safestack applied\n");
850 return true;
851}
852
853class SafeStackLegacyPass : public FunctionPass {
854 const TargetMachine *TM = nullptr;
855
856public:
857 static char ID; // Pass identification, replacement for typeid..
858
859 SafeStackLegacyPass() : FunctionPass(ID) {
861 }
862
863 void getAnalysisUsage(AnalysisUsage &AU) const override {
868 }
869
870 bool runOnFunction(Function &F) override {
871 LLVM_DEBUG(dbgs() << "[SafeStack] Function: " << F.getName() << "\n");
872
873 if (!F.hasFnAttribute(Attribute::SafeStack)) {
874 LLVM_DEBUG(dbgs() << "[SafeStack] safestack is not requested"
875 " for this function\n");
876 return false;
877 }
878
879 if (F.isDeclaration()) {
880 LLVM_DEBUG(dbgs() << "[SafeStack] function definition"
881 " is not available\n");
882 return false;
883 }
884
885 TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
886 auto *TL = TM->getSubtargetImpl(F)->getTargetLowering();
887 if (!TL)
888 report_fatal_error("TargetLowering instance is required");
889
890 auto *DL = &F.getParent()->getDataLayout();
891 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
892 auto &ACT = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
893
894 // Compute DT and LI only for functions that have the attribute.
895 // This is only useful because the legacy pass manager doesn't let us
896 // compute analyzes lazily.
897
898 DominatorTree *DT;
899 bool ShouldPreserveDominatorTree;
900 std::optional<DominatorTree> LazilyComputedDomTree;
901
902 // Do we already have a DominatorTree avaliable from the previous pass?
903 // Note that we should *NOT* require it, to avoid the case where we end up
904 // not needing it, but the legacy PM would have computed it for us anyways.
905 if (auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>()) {
906 DT = &DTWP->getDomTree();
907 ShouldPreserveDominatorTree = true;
908 } else {
909 // Otherwise, we need to compute it.
910 LazilyComputedDomTree.emplace(F);
911 DT = &*LazilyComputedDomTree;
912 ShouldPreserveDominatorTree = false;
913 }
914
915 // Likewise, lazily compute loop info.
916 LoopInfo LI(*DT);
917
918 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
919
920 ScalarEvolution SE(F, TLI, ACT, *DT, LI);
921
922 return SafeStack(F, *TL, *DL, ShouldPreserveDominatorTree ? &DTU : nullptr,
923 SE)
924 .run();
925 }
926};
927
928} // end anonymous namespace
929
930char SafeStackLegacyPass::ID = 0;
931
933 "Safe Stack instrumentation pass", false, false)
936INITIALIZE_PASS_END(SafeStackLegacyPass, DEBUG_TYPE,
938
939FunctionPass *llvm::createSafeStackPass() { return new SafeStackLegacyPass(); }
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu Simplify well known AMD library false FunctionCallee Callee
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Rewrite undef for PHI
This file implements a class to represent arbitrary precision integral constant values and operations...
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Addr
std::string Name
uint64_t Size
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file contains the declarations for metadata subclasses.
Module.h This file contains the declarations for the Module class.
print must be executed print the must be executed context for all instructions
IntegerType * Int32Ty
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:59
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
@ SI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static cl::opt< bool > SafeStackUsePointerAddress("safestack-use-pointer-address", cl::init(false), cl::Hidden)
Use __safestack_pointer_address even if the platform has a faster way of access safe stack pointer.
static cl::opt< bool > ClColoring("safe-stack-coloring", cl::desc("enable safe stack coloring"), cl::Hidden, cl::init(true))
#define DEBUG_TYPE
Definition: SafeStack.cpp:77
Safe Stack instrumentation pass
Definition: SafeStack.cpp:937
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static Value * getStackGuard(const TargetLoweringBase *TLI, Module *M, IRBuilder<> &B, bool *SupportsSelectionDAGSP=nullptr)
Create a stack guard loading and populate whether SelectionDAG SSP is supported.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
This defines the Use class.
xray instrumentation
Class for arbitrary precision integers.
Definition: APInt.h:75
an instruction to allocate memory on the stack
Definition: Instructions.h:58
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:125
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:100
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:118
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:96
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:158
An immutable pass that tracks lazily created AssumptionCache objects.
static BranchProbability getBranchProbStackProtector(bool IsLikely)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1190
bool doesNotCapture(unsigned OpNo) const
Determine whether this data operand is not captured.
Definition: InstrTypes.h:1694
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1412
bool doesNotAccessMemory(unsigned OpNo) const
Definition: InstrTypes.h:1735
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
Definition: InstrTypes.h:1499
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1332
bool isNoInline() const
Return true if the call should not be inlined.
Definition: InstrTypes.h:1886
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Definition: InstrTypes.h:1338
This class represents a function call, abstracting a target machine's calling convention.
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:888
This class represents a range of values.
Definition: ConstantRange.h:47
ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
Subprogram description.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:314
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:166
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:165
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
virtual bool runOnFunction(Function &F)=0
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Definition: IRBuilder.h:1708
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2074
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2023
void SetCurrentDebugLocation(DebugLoc L)
Set location information used by debugging information.
Definition: IRBuilder.h:212
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2146
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1279
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2028
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1725
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1410
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition: IRBuilder.h:1738
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2018
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2097
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:180
Type * getVoidTy()
Fetch the type representing void.
Definition: IRBuilder.h:550
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2313
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", bool IsInBounds=false)
Definition: IRBuilder.h:1801
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memcpy between the specified pointers.
Definition: IRBuilder.h:634
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1296
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2570
This class captures the data input to the InlineFunction call, and records the auxiliary results prod...
Definition: Cloning.h:203
bool isSuccess() const
Definition: InlineCost.h:188
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:82
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight)
Return metadata containing two branch weights.
Definition: MDBuilder.cpp:37
Metadata node.
Definition: Metadata.h:950
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1373
This is the common base class for memset/memcpy/memmove.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
virtual void getAnalysisUsage(AnalysisUsage &) const
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: Pass.cpp:98
This class represents an analyzed expression in the program.
Type * getType() const
Return the LLVM type of this SCEV expression.
The main scalar evolution driver.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:450
bool empty() const
Definition: SmallVector.h:94
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
This class represents a set of interesting instructions where an alloca is live.
Definition: StackLifetime.h:63
Compute live ranges of allocas.
Definition: StackLifetime.h:37
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:78
Target-Independent Code Generator Pass Configuration Options.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:535
use_iterator use_begin()
Definition: Value.h:360
bool use_empty() const
Definition: Value.h:344
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1069
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:384
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:289
Compute the layout of an unsafe stack frame.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1465
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:440
FunctionPass * createSafeStackPass()
This pass splits the stack into a safe stack and an unsafe stack to protect against stack-based overf...
Definition: SafeStack.cpp:939
Instruction * SplitBlockAndInsertIfThen(Value *Cond, Instruction *SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:748
InlineResult isInlineViable(Function &Callee)
Minimal filter to detect invalid constructs for inlining.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:145
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
void initializeSafeStackLegacyPassPass(PassRegistry &)
InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, bool MergeAttributes=false, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr)
This function inlines the called function into the basic block of the caller.
void replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress, DIBuilder &Builder, int Offset=0)
Replaces multiple llvm.dbg.value instructions when the alloca it describes is replaced with a new val...
Definition: Local.cpp:1801
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:184
bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces llvm.dbg.declare instruction when the address it describes is replaced with a new value.
Definition: Local.cpp:1760
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117