LLVM 23.0.0git
SafeStack.cpp
Go to the documentation of this file.
1//===- SafeStack.cpp - Safe Stack Insertion -------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass splits the stack into the safe stack (kept as-is for LLVM backend)
10// and the unsafe stack (explicitly allocated and managed through the runtime
11// support library).
12//
13// http://clang.llvm.org/docs/SafeStack.html
14//
15//===----------------------------------------------------------------------===//
16
18#include "SafeStackLayout.h"
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/Statistic.h"
36#include "llvm/IR/Argument.h"
37#include "llvm/IR/Attributes.h"
39#include "llvm/IR/Constants.h"
40#include "llvm/IR/DIBuilder.h"
41#include "llvm/IR/DataLayout.h"
43#include "llvm/IR/Dominators.h"
44#include "llvm/IR/Function.h"
45#include "llvm/IR/IRBuilder.h"
47#include "llvm/IR/Instruction.h"
50#include "llvm/IR/Intrinsics.h"
51#include "llvm/IR/MDBuilder.h"
52#include "llvm/IR/Metadata.h"
53#include "llvm/IR/Module.h"
54#include "llvm/IR/Type.h"
55#include "llvm/IR/Use.h"
56#include "llvm/IR/Value.h"
58#include "llvm/Pass.h"
60#include "llvm/Support/Debug.h"
67#include <algorithm>
68#include <cassert>
69#include <cstdint>
70#include <optional>
71#include <string>
72
73using namespace llvm;
74using namespace llvm::safestack;
75
76#define DEBUG_TYPE "safe-stack"
77
78STATISTIC(NumFunctions, "Total number of functions");
79STATISTIC(NumUnsafeStackFunctions, "Number of functions with unsafe stack");
80STATISTIC(NumUnsafeStackRestorePointsFunctions,
81 "Number of functions that use setjmp or exceptions");
82
83STATISTIC(NumAllocas, "Total number of allocas");
84STATISTIC(NumUnsafeStaticAllocas, "Number of unsafe static allocas");
85STATISTIC(NumUnsafeDynamicAllocas, "Number of unsafe dynamic allocas");
86STATISTIC(NumUnsafeByValArguments, "Number of unsafe byval arguments");
87STATISTIC(NumUnsafeStackRestorePoints, "Number of setjmps and landingpads");
88
89/// Use __safestack_pointer_address even if the platform has a faster way of
90/// access safe stack pointer.
91static cl::opt<bool>
92 SafeStackUsePointerAddress("safestack-use-pointer-address",
93 cl::init(false), cl::Hidden);
94
95static cl::opt<bool> ClColoring("safe-stack-coloring",
96 cl::desc("enable safe stack coloring"),
97 cl::Hidden, cl::init(true));
98
99namespace {
100
101/// The SafeStack pass splits the stack of each function into the safe
102/// stack, which is only accessed through memory safe dereferences (as
103/// determined statically), and the unsafe stack, which contains all
104/// local variables that are accessed in ways that we can't prove to
105/// be safe.
106class SafeStack {
107 Function &F;
108 const TargetLoweringBase &TL;
109 const LibcallLoweringInfo &Libcalls;
110 const DataLayout &DL;
111 DomTreeUpdater *DTU;
112 ScalarEvolution &SE;
113
114 Type *StackPtrTy;
115 Type *IntPtrTy;
116 Type *Int32Ty;
117
118 Value *UnsafeStackPtr = nullptr;
119
120 /// Unsafe stack alignment. Each stack frame must ensure that the stack is
121 /// aligned to this value. We need to re-align the unsafe stack if the
122 /// alignment of any object on the stack exceeds this value.
123 ///
124 /// 16 seems like a reasonable upper bound on the alignment of objects that we
125 /// might expect to appear on the stack on most common targets.
126 static constexpr Align StackAlignment = Align::Constant<16>();
127
128 /// Return the value of the stack canary.
130
131 /// Load stack guard from the frame and check if it has changed.
132 void checkStackGuard(IRBuilder<> &IRB, Function &F, Instruction &RI,
133 AllocaInst *StackGuardSlot, Value *StackGuard);
134
135 /// Find all static allocas, dynamic allocas, return instructions and
136 /// stack restore points (exception unwind blocks and setjmp calls) in the
137 /// given function and append them to the respective vectors.
138 void findInsts(Function &F, SmallVectorImpl<AllocaInst *> &StaticAllocas,
139 SmallVectorImpl<AllocaInst *> &DynamicAllocas,
140 SmallVectorImpl<Argument *> &ByValArguments,
142 SmallVectorImpl<Instruction *> &StackRestorePoints);
143
144 /// Calculate the allocation size of a given alloca. Returns 0 if the
145 /// size can not be statically determined.
146 uint64_t getStaticAllocaAllocationSize(const AllocaInst* AI);
147
148 /// Allocate space for all static allocas in \p StaticAllocas,
149 /// replace allocas with pointers into the unsafe stack.
150 ///
151 /// \returns A pointer to the top of the unsafe stack after all unsafe static
152 /// allocas are allocated.
153 Value *moveStaticAllocasToUnsafeStack(IRBuilder<> &IRB, Function &F,
154 ArrayRef<AllocaInst *> StaticAllocas,
155 ArrayRef<Argument *> ByValArguments,
156 Instruction *BasePointer,
157 AllocaInst *StackGuardSlot);
158
159 /// Generate code to restore the stack after all stack restore points
160 /// in \p StackRestorePoints.
161 ///
162 /// \returns A local variable in which to maintain the dynamic top of the
163 /// unsafe stack if needed.
164 AllocaInst *
165 createStackRestorePoints(IRBuilder<> &IRB, Function &F,
166 ArrayRef<Instruction *> StackRestorePoints,
167 Value *StaticTop, bool NeedDynamicTop);
168
169 /// Replace all allocas in \p DynamicAllocas with code to allocate
170 /// space dynamically on the unsafe stack and store the dynamic unsafe stack
171 /// top to \p DynamicTop if non-null.
172 void moveDynamicAllocasToUnsafeStack(Function &F, Value *UnsafeStackPtr,
173 AllocaInst *DynamicTop,
174 ArrayRef<AllocaInst *> DynamicAllocas);
175
176 bool IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize);
177
178 bool IsMemIntrinsicSafe(const MemIntrinsic *MI, const Use &U,
179 const Value *AllocaPtr, uint64_t AllocaSize);
180 bool IsAccessSafe(Value *Addr, uint64_t Size, const Value *AllocaPtr,
181 uint64_t AllocaSize);
182
183 bool ShouldInlinePointerAddress(CallInst &CI);
184 void TryInlinePointerAddress();
185
186public:
187 SafeStack(Function &F, const TargetLoweringBase &TL,
188 const LibcallLoweringInfo &Libcalls, const DataLayout &DL,
190 : F(F), TL(TL), Libcalls(Libcalls), DL(DL), DTU(DTU), SE(SE),
191 StackPtrTy(DL.getAllocaPtrType(F.getContext())),
192 IntPtrTy(DL.getIntPtrType(F.getContext())),
193 Int32Ty(Type::getInt32Ty(F.getContext())) {}
194
195 // Run the transformation on the associated function.
196 // Returns whether the function was changed.
197 bool run();
198};
199
200uint64_t SafeStack::getStaticAllocaAllocationSize(const AllocaInst* AI) {
201 if (auto Size = AI->getAllocationSize(DL))
202 if (Size->isFixed())
203 return Size->getFixedValue();
204 return 0;
205}
206
207bool SafeStack::IsAccessSafe(Value *Addr, uint64_t AccessSize,
208 const Value *AllocaPtr, uint64_t AllocaSize) {
209 const SCEV *AddrExpr = SE.getSCEV(Addr);
210 const auto *Base = dyn_cast<SCEVUnknown>(SE.getPointerBase(AddrExpr));
211 if (!Base || Base->getValue() != AllocaPtr) {
213 dbgs() << "[SafeStack] "
214 << (isa<AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ")
215 << *AllocaPtr << "\n"
216 << "SCEV " << *AddrExpr << " not directly based on alloca\n");
217 return false;
218 }
219
220 const SCEV *Expr = SE.removePointerBase(AddrExpr);
221 uint64_t BitWidth = SE.getTypeSizeInBits(Expr->getType());
222 ConstantRange AccessStartRange = SE.getUnsignedRange(Expr);
223 ConstantRange SizeRange =
224 ConstantRange(APInt(BitWidth, 0), APInt(BitWidth, AccessSize));
225 ConstantRange AccessRange = AccessStartRange.add(SizeRange);
226 ConstantRange AllocaRange =
227 ConstantRange(APInt(BitWidth, 0), APInt(BitWidth, AllocaSize));
228 bool Safe = AllocaRange.contains(AccessRange);
229
231 dbgs() << "[SafeStack] "
232 << (isa<AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ")
233 << *AllocaPtr << "\n"
234 << " Access " << *Addr << "\n"
235 << " SCEV " << *Expr
236 << " U: " << SE.getUnsignedRange(Expr)
237 << ", S: " << SE.getSignedRange(Expr) << "\n"
238 << " Range " << AccessRange << "\n"
239 << " AllocaRange " << AllocaRange << "\n"
240 << " " << (Safe ? "safe" : "unsafe") << "\n");
241
242 return Safe;
243}
244
245bool SafeStack::IsMemIntrinsicSafe(const MemIntrinsic *MI, const Use &U,
246 const Value *AllocaPtr,
247 uint64_t AllocaSize) {
248 if (auto MTI = dyn_cast<MemTransferInst>(MI)) {
249 if (MTI->getRawSource() != U && MTI->getRawDest() != U)
250 return true;
251 } else {
252 if (MI->getRawDest() != U)
253 return true;
254 }
255
256 auto Len = MI->getLengthInBytes();
257 // Non-constant size => unsafe. FIXME: try SCEV getRange.
258 if (!Len) return false;
259 return IsAccessSafe(U, Len->getZExtValue(), AllocaPtr, AllocaSize);
260}
261
262/// Check whether a given allocation must be put on the safe
263/// stack or not. The function analyzes all uses of AI and checks whether it is
264/// only accessed in a memory safe way (as decided statically).
265bool SafeStack::IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize) {
266 // Go through all uses of this alloca and check whether all accesses to the
267 // allocated object are statically known to be memory safe and, hence, the
268 // object can be placed on the safe stack.
269 SmallPtrSet<const Value *, 16> Visited;
270 SmallVector<const Value *, 8> WorkList;
271 WorkList.push_back(AllocaPtr);
272
273 // A DFS search through all uses of the alloca in bitcasts/PHI/GEPs/etc.
274 while (!WorkList.empty()) {
275 const Value *V = WorkList.pop_back_val();
276 for (const Use &UI : V->uses()) {
277 auto I = cast<const Instruction>(UI.getUser());
278 assert(V == UI.get());
279
280 switch (I->getOpcode()) {
281 case Instruction::Load:
282 if (!IsAccessSafe(UI, DL.getTypeStoreSize(I->getType()), AllocaPtr,
283 AllocaSize))
284 return false;
285 break;
286
287 case Instruction::VAArg:
288 // "va-arg" from a pointer is safe.
289 break;
290 case Instruction::Store:
291 if (V == I->getOperand(0)) {
292 // Stored the pointer - conservatively assume it may be unsafe.
294 << "[SafeStack] Unsafe alloca: " << *AllocaPtr
295 << "\n store of address: " << *I << "\n");
296 return false;
297 }
298
299 if (!IsAccessSafe(UI, DL.getTypeStoreSize(I->getOperand(0)->getType()),
300 AllocaPtr, AllocaSize))
301 return false;
302 break;
303
304 case Instruction::Ret:
305 // Information leak.
306 return false;
307
308 case Instruction::Call:
309 case Instruction::Invoke: {
310 const CallBase &CS = *cast<CallBase>(I);
311
312 if (I->isLifetimeStartOrEnd())
313 continue;
314
315 if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
316 if (!IsMemIntrinsicSafe(MI, UI, AllocaPtr, AllocaSize)) {
318 << "[SafeStack] Unsafe alloca: " << *AllocaPtr
319 << "\n unsafe memintrinsic: " << *I << "\n");
320 return false;
321 }
322 continue;
323 }
324
325 // LLVM 'nocapture' attribute is only set for arguments whose address
326 // is not stored, passed around, or used in any other non-trivial way.
327 // We assume that passing a pointer to an object as a 'nocapture
328 // readnone' argument is safe.
329 // FIXME: a more precise solution would require an interprocedural
330 // analysis here, which would look at all uses of an argument inside
331 // the function being called.
332 auto B = CS.arg_begin(), E = CS.arg_end();
333 for (const auto *A = B; A != E; ++A)
334 if (A->get() == V)
335 if (!(CS.doesNotCapture(A - B) && (CS.doesNotAccessMemory(A - B) ||
336 CS.doesNotAccessMemory()))) {
337 LLVM_DEBUG(dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtr
338 << "\n unsafe call: " << *I << "\n");
339 return false;
340 }
341 continue;
342 }
343
344 default:
345 if (Visited.insert(I).second)
347 }
348 }
349 }
350
351 // All uses of the alloca are safe, we can place it on the safe stack.
352 return true;
353}
354
355Value *SafeStack::getStackGuard(IRBuilder<> &IRB, Function &F) {
356 Value *StackGuardVar = TL.getIRStackGuard(IRB, Libcalls);
357 Module *M = F.getParent();
358
359 if (!StackGuardVar) {
360 TL.insertSSPDeclarations(*M, Libcalls);
361 return IRB.CreateIntrinsic(Intrinsic::stackguard, {});
362 }
363
364 return IRB.CreateLoad(StackPtrTy, StackGuardVar, "StackGuard");
365}
366
367void SafeStack::findInsts(Function &F,
368 SmallVectorImpl<AllocaInst *> &StaticAllocas,
369 SmallVectorImpl<AllocaInst *> &DynamicAllocas,
370 SmallVectorImpl<Argument *> &ByValArguments,
371 SmallVectorImpl<Instruction *> &Returns,
372 SmallVectorImpl<Instruction *> &StackRestorePoints) {
373 for (Instruction &I : instructions(&F)) {
374 if (auto AI = dyn_cast<AllocaInst>(&I)) {
375 ++NumAllocas;
376
377 uint64_t Size = getStaticAllocaAllocationSize(AI);
378 if (IsSafeStackAlloca(AI, Size))
379 continue;
380
381 if (AI->isStaticAlloca()) {
382 ++NumUnsafeStaticAllocas;
383 StaticAllocas.push_back(AI);
384 } else {
385 ++NumUnsafeDynamicAllocas;
386 DynamicAllocas.push_back(AI);
387 }
388 } else if (auto RI = dyn_cast<ReturnInst>(&I)) {
389 if (CallInst *CI = I.getParent()->getTerminatingMustTailCall())
390 Returns.push_back(CI);
391 else
392 Returns.push_back(RI);
393 } else if (auto CI = dyn_cast<CallInst>(&I)) {
394 // setjmps require stack restore.
395 if (CI->getCalledFunction() && CI->canReturnTwice())
396 StackRestorePoints.push_back(CI);
397 } else if (auto LP = dyn_cast<LandingPadInst>(&I)) {
398 // Exception landing pads require stack restore.
399 StackRestorePoints.push_back(LP);
400 } else if (auto II = dyn_cast<IntrinsicInst>(&I)) {
401 if (II->getIntrinsicID() == Intrinsic::gcroot)
403 "gcroot intrinsic not compatible with safestack attribute");
404 }
405 }
406 for (Argument &Arg : F.args()) {
407 if (!Arg.hasByValAttr())
408 continue;
409 uint64_t Size = DL.getTypeStoreSize(Arg.getParamByValType());
410 if (IsSafeStackAlloca(&Arg, Size))
411 continue;
412
413 ++NumUnsafeByValArguments;
414 ByValArguments.push_back(&Arg);
415 }
416}
417
418AllocaInst *
419SafeStack::createStackRestorePoints(IRBuilder<> &IRB, Function &F,
420 ArrayRef<Instruction *> StackRestorePoints,
421 Value *StaticTop, bool NeedDynamicTop) {
422 assert(StaticTop && "The stack top isn't set.");
423
424 if (StackRestorePoints.empty())
425 return nullptr;
426
427 // We need the current value of the shadow stack pointer to restore
428 // after longjmp or exception catching.
429
430 // FIXME: On some platforms this could be handled by the longjmp/exception
431 // runtime itself.
432
433 AllocaInst *DynamicTop = nullptr;
434 if (NeedDynamicTop) {
435 // If we also have dynamic alloca's, the stack pointer value changes
436 // throughout the function. For now we store it in an alloca.
437 DynamicTop = IRB.CreateAlloca(StackPtrTy, /*ArraySize=*/nullptr,
438 "unsafe_stack_dynamic_ptr");
439 IRB.CreateStore(StaticTop, DynamicTop);
440 }
441
442 // Restore current stack pointer after longjmp/exception catch.
443 for (Instruction *I : StackRestorePoints) {
444 ++NumUnsafeStackRestorePoints;
445
446 IRB.SetInsertPoint(I->getNextNode());
447 Value *CurrentTop =
448 DynamicTop ? IRB.CreateLoad(StackPtrTy, DynamicTop) : StaticTop;
449 IRB.CreateStore(CurrentTop, UnsafeStackPtr);
450 }
451
452 return DynamicTop;
453}
454
455void SafeStack::checkStackGuard(IRBuilder<> &IRB, Function &F, Instruction &RI,
456 AllocaInst *StackGuardSlot, Value *StackGuard) {
457 Value *V = IRB.CreateLoad(StackPtrTy, StackGuardSlot);
458 Value *Cmp = IRB.CreateICmpNE(StackGuard, V);
459
462 MDNode *Weights = MDBuilder(F.getContext())
463 .createBranchWeights(SuccessProb.getNumerator(),
464 FailureProb.getNumerator());
465 Instruction *CheckTerm =
466 SplitBlockAndInsertIfThen(Cmp, &RI, /* Unreachable */ true, Weights, DTU);
467 IRBuilder<> IRBFail(CheckTerm);
468 // FIXME: respect -fsanitize-trap / -ftrap-function here?
469 RTLIB::LibcallImpl StackChkFailImpl =
470 Libcalls.getLibcallImpl(RTLIB::STACKPROTECTOR_CHECK_FAIL);
471 if (StackChkFailImpl == RTLIB::Unsupported) {
472 F.getContext().emitError(
473 "no libcall available for stackprotector check fail");
474 return;
475 }
476
477 StringRef StackChkFailName =
479
480 FunctionCallee StackChkFail =
481 F.getParent()->getOrInsertFunction(StackChkFailName, IRB.getVoidTy());
482 IRBFail.CreateCall(StackChkFail, {});
483}
484
485/// We explicitly compute and set the unsafe stack layout for all unsafe
486/// static alloca instructions. We save the unsafe "base pointer" in the
487/// prologue into a local variable and restore it in the epilogue.
488Value *SafeStack::moveStaticAllocasToUnsafeStack(
489 IRBuilder<> &IRB, Function &F, ArrayRef<AllocaInst *> StaticAllocas,
490 ArrayRef<Argument *> ByValArguments, Instruction *BasePointer,
491 AllocaInst *StackGuardSlot) {
492 if (StaticAllocas.empty() && ByValArguments.empty())
493 return BasePointer;
494
495 DIBuilder DIB(*F.getParent());
496
497 StackLifetime SSC(F, StaticAllocas, StackLifetime::LivenessType::May);
498 static const StackLifetime::LiveRange NoColoringRange(1, true);
499 if (ClColoring)
500 SSC.run();
501
502 for (const auto *I : SSC.getMarkers()) {
503 auto *Op = dyn_cast<Instruction>(I->getOperand(1));
504 const_cast<IntrinsicInst *>(I)->eraseFromParent();
505 // Remove the operand bitcast, too, if it has no more uses left.
506 if (Op && Op->use_empty())
507 Op->eraseFromParent();
508 }
509
510 // Unsafe stack always grows down.
511 StackLayout SSL(StackAlignment);
512 if (StackGuardSlot) {
513 SSL.addObject(StackGuardSlot, getStaticAllocaAllocationSize(StackGuardSlot),
514 StackGuardSlot->getAlign(), SSC.getFullLiveRange());
515 }
516
517 for (Argument *Arg : ByValArguments) {
518 Type *Ty = Arg->getParamByValType();
519 uint64_t Size = DL.getTypeStoreSize(Ty);
520 if (Size == 0)
521 Size = 1; // Don't create zero-sized stack objects.
522
523 // Ensure the object is properly aligned.
524 Align Align = DL.getPrefTypeAlign(Ty);
525 if (auto A = Arg->getParamAlign())
526 Align = std::max(Align, *A);
527 SSL.addObject(Arg, Size, Align, SSC.getFullLiveRange());
528 }
529
530 for (AllocaInst *AI : StaticAllocas) {
531 uint64_t Size = getStaticAllocaAllocationSize(AI);
532 if (Size == 0)
533 Size = 1; // Don't create zero-sized stack objects.
534
535 SSL.addObject(AI, Size, AI->getAlign(),
536 ClColoring ? SSC.getLiveRange(AI) : NoColoringRange);
537 }
538
539 SSL.computeLayout();
540 Align FrameAlignment = SSL.getFrameAlignment();
541
542 // FIXME: tell SSL that we start at a less-then-MaxAlignment aligned location
543 // (AlignmentSkew).
544 if (FrameAlignment > StackAlignment) {
545 // Re-align the base pointer according to the max requested alignment.
546 IRB.SetInsertPoint(BasePointer->getNextNode());
547 BasePointer = cast<Instruction>(IRB.CreateIntToPtr(
548 IRB.CreateAnd(
549 IRB.CreatePtrToInt(BasePointer, IntPtrTy),
550 ConstantInt::get(IntPtrTy, ~(FrameAlignment.value() - 1))),
551 StackPtrTy));
552 }
553
554 IRB.SetInsertPoint(BasePointer->getNextNode());
555
556 if (StackGuardSlot) {
557 unsigned Offset = SSL.getObjectOffset(StackGuardSlot);
558 Value *Off =
559 IRB.CreatePtrAdd(BasePointer, ConstantInt::get(Int32Ty, -Offset));
560 Value *NewAI =
561 IRB.CreateBitCast(Off, StackGuardSlot->getType(), "StackGuardSlot");
562
563 // Replace alloc with the new location.
564 StackGuardSlot->replaceAllUsesWith(NewAI);
565 StackGuardSlot->eraseFromParent();
566 }
567
568 for (Argument *Arg : ByValArguments) {
569 unsigned Offset = SSL.getObjectOffset(Arg);
570 MaybeAlign Align(SSL.getObjectAlignment(Arg));
571 Type *Ty = Arg->getParamByValType();
572
573 uint64_t Size = DL.getTypeStoreSize(Ty);
574 if (Size == 0)
575 Size = 1; // Don't create zero-sized stack objects.
576
577 Value *Off =
578 IRB.CreatePtrAdd(BasePointer, ConstantInt::get(Int32Ty, -Offset));
579 Value *NewArg = IRB.CreateBitCast(Off, Arg->getType(),
580 Arg->getName() + ".unsafe-byval");
581
582 // Replace alloc with the new location.
583 replaceDbgDeclare(Arg, BasePointer, DIB, DIExpression::ApplyOffset,
584 -Offset);
585 Arg->replaceAllUsesWith(NewArg);
587 IRB.CreateMemCpy(Off, Align, Arg, Arg->getParamAlign(), Size);
588 }
589
590 // Allocate space for every unsafe static AllocaInst on the unsafe stack.
591 for (AllocaInst *AI : StaticAllocas) {
592 IRB.SetInsertPoint(AI);
593 unsigned Offset = SSL.getObjectOffset(AI);
594
595 replaceDbgDeclare(AI, BasePointer, DIB, DIExpression::ApplyOffset, -Offset);
596 replaceDbgValueForAlloca(AI, BasePointer, DIB, -Offset);
597
598 // Replace uses of the alloca with the new location.
599 // Insert address calculation close to each use to work around PR27844.
600 std::string Name = std::string(AI->getName()) + ".unsafe";
601 while (!AI->use_empty()) {
602 Use &U = *AI->use_begin();
603 Instruction *User = cast<Instruction>(U.getUser());
604
605 // Drop lifetime markers now that this is no longer an alloca.
606 // SafeStack has already performed its own stack coloring.
607 if (User->isLifetimeStartOrEnd()) {
608 User->eraseFromParent();
609 continue;
610 }
611
612 Instruction *InsertBefore;
613 if (auto *PHI = dyn_cast<PHINode>(User))
614 InsertBefore = PHI->getIncomingBlock(U)->getTerminator();
615 else
616 InsertBefore = User;
617
618 IRBuilder<> IRBUser(InsertBefore);
619 Value *Off =
620 IRBUser.CreatePtrAdd(BasePointer, ConstantInt::get(Int32Ty, -Offset));
621 Value *Replacement =
622 IRBUser.CreateAddrSpaceCast(Off, AI->getType(), Name);
623
624 if (auto *PHI = dyn_cast<PHINode>(User))
625 // PHI nodes may have multiple incoming edges from the same BB (why??),
626 // all must be updated at once with the same incoming value.
627 PHI->setIncomingValueForBlock(PHI->getIncomingBlock(U), Replacement);
628 else
629 U.set(Replacement);
630 }
631
632 AI->eraseFromParent();
633 }
634
635 // Re-align BasePointer so that our callees would see it aligned as
636 // expected.
637 // FIXME: no need to update BasePointer in leaf functions.
638 unsigned FrameSize = alignTo(SSL.getFrameSize(), StackAlignment);
639
640 MDBuilder MDB(F.getContext());
642 Data.push_back(MDB.createString("unsafe-stack-size"));
643 Data.push_back(MDB.createConstant(ConstantInt::get(Int32Ty, FrameSize)));
644 MDNode *MD = MDTuple::get(F.getContext(), Data);
645 F.setMetadata(LLVMContext::MD_annotation, MD);
646
647 // Update shadow stack pointer in the function epilogue.
648 IRB.SetInsertPoint(BasePointer->getNextNode());
649
650 Value *StaticTop =
651 IRB.CreatePtrAdd(BasePointer, ConstantInt::get(Int32Ty, -FrameSize),
652 "unsafe_stack_static_top");
653 IRB.CreateStore(StaticTop, UnsafeStackPtr);
654 return StaticTop;
655}
656
657void SafeStack::moveDynamicAllocasToUnsafeStack(
658 Function &F, Value *UnsafeStackPtr, AllocaInst *DynamicTop,
659 ArrayRef<AllocaInst *> DynamicAllocas) {
660 DIBuilder DIB(*F.getParent());
661
662 for (AllocaInst *AI : DynamicAllocas) {
663 IRBuilder<> IRB(AI);
664
665 // Compute the new SP value (after AI).
666 Value *Size = IRB.CreateAllocationSize(IntPtrTy, AI);
667 Value *SP = IRB.CreatePtrToInt(IRB.CreateLoad(StackPtrTy, UnsafeStackPtr),
668 IntPtrTy);
669 SP = IRB.CreateSub(SP, Size);
670
671 // Align the SP value to satisfy the AllocaInst and stack alignments.
672 auto Align = std::max(AI->getAlign(), StackAlignment);
673
674 Value *NewTop = IRB.CreateIntToPtr(
675 IRB.CreateAnd(
676 SP, ConstantInt::getSigned(IntPtrTy, ~uint64_t(Align.value() - 1))),
677 StackPtrTy);
678
679 // Save the stack pointer.
680 IRB.CreateStore(NewTop, UnsafeStackPtr);
681 if (DynamicTop)
682 IRB.CreateStore(NewTop, DynamicTop);
683
684 Value *NewAI = IRB.CreatePointerCast(NewTop, AI->getType());
685 if (AI->hasName() && isa<Instruction>(NewAI))
686 NewAI->takeName(AI);
687
689 AI->replaceAllUsesWith(NewAI);
690 AI->eraseFromParent();
691 }
692
693 if (!DynamicAllocas.empty()) {
694 // Now go through the instructions again, replacing stacksave/stackrestore.
695 for (Instruction &I : llvm::make_early_inc_range(instructions(&F))) {
696 auto *II = dyn_cast<IntrinsicInst>(&I);
697 if (!II)
698 continue;
699
700 if (II->getIntrinsicID() == Intrinsic::stacksave) {
701 IRBuilder<> IRB(II);
702 Instruction *LI = IRB.CreateLoad(StackPtrTy, UnsafeStackPtr);
703 LI->takeName(II);
704 II->replaceAllUsesWith(LI);
705 II->eraseFromParent();
706 } else if (II->getIntrinsicID() == Intrinsic::stackrestore) {
707 IRBuilder<> IRB(II);
708 Instruction *SI = IRB.CreateStore(II->getArgOperand(0), UnsafeStackPtr);
709 SI->takeName(II);
710 assert(II->use_empty());
711 II->eraseFromParent();
712 }
713 }
714 }
715}
716
717bool SafeStack::ShouldInlinePointerAddress(CallInst &CI) {
719 if (CI.hasFnAttr(Attribute::AlwaysInline) &&
720 isInlineViable(*Callee).isSuccess())
721 return true;
722 if (Callee->isInterposable() || Callee->hasFnAttribute(Attribute::NoInline) ||
723 CI.isNoInline())
724 return false;
725 return true;
726}
727
728void SafeStack::TryInlinePointerAddress() {
729 auto *CI = dyn_cast<CallInst>(UnsafeStackPtr);
730 if (!CI)
731 return;
732
733 if(F.hasOptNone())
734 return;
735
737 if (!Callee || Callee->isDeclaration())
738 return;
739
740 if (!ShouldInlinePointerAddress(*CI))
741 return;
742
743 InlineFunctionInfo IFI;
744 InlineFunction(*CI, IFI);
745}
746
747bool SafeStack::run() {
748 assert(F.hasFnAttribute(Attribute::SafeStack) &&
749 "Can't run SafeStack on a function without the attribute");
750 assert(!F.isDeclaration() && "Can't run SafeStack on a function declaration");
751
752 ++NumFunctions;
753
754 SmallVector<AllocaInst *, 16> StaticAllocas;
755 SmallVector<AllocaInst *, 4> DynamicAllocas;
756 SmallVector<Argument *, 4> ByValArguments;
757 SmallVector<Instruction *, 4> Returns;
758
759 // Collect all points where stack gets unwound and needs to be restored
760 // This is only necessary because the runtime (setjmp and unwind code) is
761 // not aware of the unsafe stack and won't unwind/restore it properly.
762 // To work around this problem without changing the runtime, we insert
763 // instrumentation to restore the unsafe stack pointer when necessary.
764 SmallVector<Instruction *, 4> StackRestorePoints;
765
766 // Find all static and dynamic alloca instructions that must be moved to the
767 // unsafe stack, all return instructions and stack restore points.
768 findInsts(F, StaticAllocas, DynamicAllocas, ByValArguments, Returns,
769 StackRestorePoints);
770
771 if (StaticAllocas.empty() && DynamicAllocas.empty() &&
772 ByValArguments.empty() && StackRestorePoints.empty())
773 return false; // Nothing to do in this function.
774
775 if (!StaticAllocas.empty() || !DynamicAllocas.empty() ||
776 !ByValArguments.empty())
777 ++NumUnsafeStackFunctions; // This function has the unsafe stack.
778
779 if (!StackRestorePoints.empty())
780 ++NumUnsafeStackRestorePointsFunctions;
781
782 IRBuilder<> IRB(&F.front(), F.begin()->getFirstInsertionPt());
783 // Calls must always have a debug location, or else inlining breaks. So
784 // we explicitly set a artificial debug location here.
785 if (DISubprogram *SP = F.getSubprogram())
787 DILocation::get(SP->getContext(), SP->getScopeLine(), 0, SP));
789 RTLIB::LibcallImpl SafestackPointerAddressImpl =
790 Libcalls.getLibcallImpl(RTLIB::SAFESTACK_POINTER_ADDRESS);
791 if (SafestackPointerAddressImpl == RTLIB::Unsupported) {
792 F.getContext().emitError(
793 "no libcall available for safestack pointer address");
794 return false;
795 }
796
797 StringRef SafestackPointerAddressName =
799 SafestackPointerAddressImpl);
800
801 FunctionCallee Fn = F.getParent()->getOrInsertFunction(
802 SafestackPointerAddressName, IRB.getPtrTy(0));
803 UnsafeStackPtr = IRB.CreateCall(Fn);
804 } else {
805 UnsafeStackPtr = TL.getSafeStackPointerLocation(IRB, Libcalls);
806 }
807
808 // Load the current stack pointer (we'll also use it as a base pointer).
809 // FIXME: use a dedicated register for it ?
810 Instruction *BasePointer =
811 IRB.CreateLoad(StackPtrTy, UnsafeStackPtr, false, "unsafe_stack_ptr");
812 assert(BasePointer->getType() == StackPtrTy);
813
814 AllocaInst *StackGuardSlot = nullptr;
815 // FIXME: implement weaker forms of stack protector.
816 if (F.hasFnAttribute(Attribute::StackProtect) ||
817 F.hasFnAttribute(Attribute::StackProtectStrong) ||
818 F.hasFnAttribute(Attribute::StackProtectReq)) {
819 Value *StackGuard = getStackGuard(IRB, F);
820 StackGuardSlot = IRB.CreateAlloca(StackPtrTy, nullptr);
821 IRB.CreateStore(StackGuard, StackGuardSlot);
822
823 for (Instruction *RI : Returns) {
824 IRBuilder<> IRBRet(RI);
825 checkStackGuard(IRBRet, F, *RI, StackGuardSlot, StackGuard);
826 }
827 }
828
829 // The top of the unsafe stack after all unsafe static allocas are
830 // allocated.
831 Value *StaticTop = moveStaticAllocasToUnsafeStack(
832 IRB, F, StaticAllocas, ByValArguments, BasePointer, StackGuardSlot);
833
834 // Safe stack object that stores the current unsafe stack top. It is updated
835 // as unsafe dynamic (non-constant-sized) allocas are allocated and freed.
836 // This is only needed if we need to restore stack pointer after longjmp
837 // or exceptions, and we have dynamic allocations.
838 // FIXME: a better alternative might be to store the unsafe stack pointer
839 // before setjmp / invoke instructions.
840 AllocaInst *DynamicTop = createStackRestorePoints(
841 IRB, F, StackRestorePoints, StaticTop, !DynamicAllocas.empty());
842
843 // Handle dynamic allocas.
844 moveDynamicAllocasToUnsafeStack(F, UnsafeStackPtr, DynamicTop,
845 DynamicAllocas);
846
847 // Restore the unsafe stack pointer before each return.
848 for (Instruction *RI : Returns) {
849 IRB.SetInsertPoint(RI);
850 IRB.CreateStore(BasePointer, UnsafeStackPtr);
851 }
852
853 TryInlinePointerAddress();
854
855 LLVM_DEBUG(dbgs() << "[SafeStack] safestack applied\n");
856 return true;
857}
858
859class SafeStackLegacyPass : public FunctionPass {
860 const TargetMachine *TM = nullptr;
861
862public:
863 static char ID; // Pass identification, replacement for typeid..
864
865 SafeStackLegacyPass() : FunctionPass(ID) {}
866
867 void getAnalysisUsage(AnalysisUsage &AU) const override {
868 AU.addRequired<LibcallLoweringInfoWrapper>();
869 AU.addRequired<TargetPassConfig>();
870 AU.addRequired<TargetLibraryInfoWrapperPass>();
871 AU.addRequired<AssumptionCacheTracker>();
872 AU.addPreserved<DominatorTreeWrapperPass>();
873 }
874
875 bool runOnFunction(Function &F) override {
876 LLVM_DEBUG(dbgs() << "[SafeStack] Function: " << F.getName() << "\n");
877
878 if (!F.hasFnAttribute(Attribute::SafeStack)) {
879 LLVM_DEBUG(dbgs() << "[SafeStack] safestack is not requested"
880 " for this function\n");
881 return false;
882 }
883
884 if (F.isDeclaration()) {
885 LLVM_DEBUG(dbgs() << "[SafeStack] function definition"
886 " is not available\n");
887 return false;
888 }
889
890 TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
891 const TargetSubtargetInfo *Subtarget = TM->getSubtargetImpl(F);
892 auto *TL = Subtarget->getTargetLowering();
893 if (!TL)
894 report_fatal_error("TargetLowering instance is required");
895
896 const LibcallLoweringInfo &Libcalls =
897 getAnalysis<LibcallLoweringInfoWrapper>().getLibcallLowering(
898 *F.getParent(), *Subtarget);
899
900 auto *DL = &F.getDataLayout();
901 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
902 auto &ACT = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
903
904 // Compute DT and LI only for functions that have the attribute.
905 // This is only useful because the legacy pass manager doesn't let us
906 // compute analyzes lazily.
907
908 DominatorTree *DT;
909 bool ShouldPreserveDominatorTree;
910 std::optional<DominatorTree> LazilyComputedDomTree;
911
912 // Do we already have a DominatorTree available from the previous pass?
913 // Note that we should *NOT* require it, to avoid the case where we end up
914 // not needing it, but the legacy PM would have computed it for us anyways.
915 if (auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>()) {
916 DT = &DTWP->getDomTree();
917 ShouldPreserveDominatorTree = true;
918 } else {
919 // Otherwise, we need to compute it.
920 LazilyComputedDomTree.emplace(F);
921 DT = &*LazilyComputedDomTree;
922 ShouldPreserveDominatorTree = false;
923 }
924
925 // Likewise, lazily compute loop info.
926 LoopInfo LI(*DT);
927
928 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
929
930 ScalarEvolution SE(F, TLI, ACT, *DT, LI);
931
932 return SafeStack(F, *TL, Libcalls, *DL,
933 ShouldPreserveDominatorTree ? &DTU : nullptr, SE)
934 .run();
935 }
936};
937
938} // end anonymous namespace
939
942 LLVM_DEBUG(dbgs() << "[SafeStack] Function: " << F.getName() << "\n");
943
944 if (!F.hasFnAttribute(Attribute::SafeStack)) {
945 LLVM_DEBUG(dbgs() << "[SafeStack] safestack is not requested"
946 " for this function\n");
947 return PreservedAnalyses::all();
948 }
949
950 if (F.isDeclaration()) {
951 LLVM_DEBUG(dbgs() << "[SafeStack] function definition"
952 " is not available\n");
953 return PreservedAnalyses::all();
954 }
955
956 const TargetSubtargetInfo *Subtarget = TM->getSubtargetImpl(F);
957 auto *TL = Subtarget->getTargetLowering();
958
959 auto &DL = F.getDataLayout();
960
961 // preserve DominatorTree
962 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
963 auto &SE = FAM.getResult<ScalarEvolutionAnalysis>(F);
964
965 auto &MAMProxy = FAM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
966 const LibcallLoweringModuleAnalysisResult *LibcallLowering =
967 MAMProxy.getCachedResult<LibcallLoweringModuleAnalysis>(*F.getParent());
968
969 if (!LibcallLowering) {
970 F.getContext().emitError("'" + LibcallLoweringModuleAnalysis::name() +
971 "' analysis required");
972 return PreservedAnalyses::all();
973 }
974
975 const LibcallLoweringInfo &Libcalls =
976 LibcallLowering->getLibcallLowering(*Subtarget);
977
978 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
979
980 bool Changed = SafeStack(F, *TL, Libcalls, DL, &DTU, SE).run();
981
982 if (!Changed)
983 return PreservedAnalyses::all();
986 return PA;
987}
988
989char SafeStackLegacyPass::ID = 0;
990
992 "Safe Stack instrumentation pass", false, false)
996INITIALIZE_PASS_END(SafeStackLegacyPass, DEBUG_TYPE,
997 "Safe Stack instrumentation pass", false, false)
998
999FunctionPass *llvm::createSafeStackPass() { return new SafeStackLegacyPass(); }
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Rewrite undef for PHI
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
VarLocInsertPt getNextNode(const DbgRecord *DVR)
Expand Atomic instructions
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool runOnFunction(Function &F, bool PostInlining)
#define DEBUG_TYPE
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
This defines the Use class.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
This file contains the declarations for metadata subclasses.
uint64_t IntrinsicInst * II
FunctionAnalysisManager FAM
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
static cl::opt< bool > SafeStackUsePointerAddress("safestack-use-pointer-address", cl::init(false), cl::Hidden)
Use __safestack_pointer_address even if the platform has a faster way of access safe stack pointer.
static cl::opt< bool > ClColoring("safe-stack-coloring", cl::desc("enable safe stack coloring"), cl::Hidden, cl::init(true))
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static Value * getStackGuard(const TargetLoweringBase &TLI, const LibcallLoweringInfo &Libcalls, Module *M, IRBuilder<> &B, bool *SupportsSelectionDAGSP=nullptr)
Create a stack guard loading and populate whether SelectionDAG SSP is supported.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
an instruction to allocate memory on the stack
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
static BranchProbability getBranchProbStackProtector(bool IsLikely)
bool doesNotCapture(unsigned OpNo) const
Determine whether this data operand is not captured.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
bool isNoInline() const
Return true if the call should not be inlined.
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
This class represents a function call, abstracting a target machine's calling convention.
static ConstantInt * getSigned(IntegerType *Ty, int64_t V, bool ImplicitTrunc=false)
Return a ConstantInt with the specified value for the specified type.
Definition Constants.h:135
LLVM_ABI ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Analysis pass which computes a DominatorTree.
Definition Dominators.h:283
Legacy analysis pass which computes a DominatorTree.
Definition Dominators.h:321
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Definition IRBuilder.h:1838
LLVM_ABI Value * CreateAllocationSize(Type *DestTy, AllocaInst *AI)
Get allocation size of an alloca as a runtime Value* (handles both static and dynamic allocas and vsc...
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memcpy between the specified pointers.
Definition IRBuilder.h:687
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2224
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2172
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:2026
void SetCurrentDebugLocation(DebugLoc L)
Set location information used by debugging information.
Definition IRBuilder.h:247
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2306
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1424
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2177
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition IRBuilder.h:1855
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:1555
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition IRBuilder.h:1868
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2167
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2481
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Definition IRBuilder.h:605
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition IRBuilder.h:207
Type * getVoidTy()
Fetch the type representing void.
Definition IRBuilder.h:600
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2776
bool isSuccess() const
Definition InlineCost.h:190
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Tracks which library functions to use for a particular subtarget.
LLVM_ABI RTLIB::LibcallImpl getLibcallImpl(RTLIB::Libcall Call) const
Return the lowering's selection of implementation call for Call.
Record a mapping from subtarget to LibcallLoweringInfo.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1529
This is the common base class for memset/memcpy/memmove.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM)
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
LLVM_ABI const SCEV * removePointerBase(const SCEV *S)
Compute an expression equivalent to S - getPointerBase(S).
LLVM_ABI uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
ConstantRange getSignedRange(const SCEV *S)
Determine the signed range for a particular SCEV.
ConstantRange getUnsignedRange(const SCEV *S)
Determine the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * getPointerBase(const SCEV *V)
Transitively follow the chain of pointer-type operands until reaching a SCEV that does not have a sin...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
virtual Value * getIRStackGuard(IRBuilderBase &IRB, const LibcallLoweringInfo &Libcalls) const
If the target has a standard location for the stack protector guard, returns the address of that loca...
virtual void insertSSPDeclarations(Module &M, const LibcallLoweringInfo &Libcalls) const
Inserts necessary declarations for SSP (stack protection) purpose.
virtual Value * getSafeStackPointerLocation(IRBuilderBase &IRB, const LibcallLoweringInfo &Libcalls) const
Returns the target-specific address of the unsafe stack pointer.
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
Target-Independent Code Generator Pass Configuration Options.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetLowering * getTargetLowering() const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:553
use_iterator use_begin()
Definition Value.h:364
bool use_empty() const
Definition Value.h:346
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:403
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
Changed
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, bool MergeAttributes=false, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr, OptimizationRemarkEmitter *ORE=nullptr)
This function inlines the called function into the basic block of the caller.
LLVM_ABI FunctionPass * createSafeStackPass()
This pass splits the stack into a safe stack and an unsafe stack to protect against stack-based overf...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
LLVM_ABI InlineResult isInlineViable(Function &Callee)
Check if it is mechanically possible to inline the function Callee, based on the contents of the func...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
FunctionAddr VTableAddr uintptr_t uintptr_t Data
Definition InstrProf.h:189
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
LLVM_ABI void replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress, DIBuilder &Builder, int Offset=0)
Replaces multiple dbg.value records when the alloca it describes is replaced with a new value.
Definition Local.cpp:1995
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces dbg.declare record when the address it describes is replaced with a new value.
Definition Local.cpp:1955
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
static constexpr Align Constant()
Allow constructions of constexpr Align.
Definition Alignment.h:88
static StringRef getLibcallImplName(RTLIB::LibcallImpl CallImpl)
Get the libcall routine name for the specified libcall implementation.