LLVM 20.0.0git
Evaluator.cpp
Go to the documentation of this file.
1//===- Evaluator.cpp - LLVM IR evaluator ----------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Function evaluator for LLVM IR.
10//
11//===----------------------------------------------------------------------===//
12
14#include "llvm/ADT/DenseMap.h"
15#include "llvm/ADT/STLExtras.h"
19#include "llvm/IR/BasicBlock.h"
20#include "llvm/IR/Constant.h"
21#include "llvm/IR/Constants.h"
22#include "llvm/IR/DataLayout.h"
24#include "llvm/IR/Function.h"
25#include "llvm/IR/GlobalAlias.h"
26#include "llvm/IR/GlobalValue.h"
28#include "llvm/IR/InstrTypes.h"
29#include "llvm/IR/Instruction.h"
32#include "llvm/IR/Operator.h"
33#include "llvm/IR/Type.h"
34#include "llvm/IR/User.h"
35#include "llvm/IR/Value.h"
37#include "llvm/Support/Debug.h"
39
40#define DEBUG_TYPE "evaluator"
41
42using namespace llvm;
43
44static inline bool
46 SmallPtrSetImpl<Constant *> &SimpleConstants,
47 const DataLayout &DL);
48
49/// Return true if the specified constant can be handled by the code generator.
50/// We don't want to generate something like:
51/// void *X = &X/42;
52/// because the code generator doesn't have a relocation that can handle that.
53///
54/// This function should be called if C was not found (but just got inserted)
55/// in SimpleConstants to avoid having to rescan the same constants all the
56/// time.
57static bool
59 SmallPtrSetImpl<Constant *> &SimpleConstants,
60 const DataLayout &DL) {
61 // Simple global addresses are supported, do not allow dllimport or
62 // thread-local globals.
63 if (auto *GV = dyn_cast<GlobalValue>(C))
64 return !GV->hasDLLImportStorageClass() && !GV->isThreadLocal();
65
66 // Simple integer, undef, constant aggregate zero, etc are all supported.
67 if (C->getNumOperands() == 0 || isa<BlockAddress>(C))
68 return true;
69
70 // Aggregate values are safe if all their elements are.
71 if (isa<ConstantAggregate>(C)) {
72 for (Value *Op : C->operands())
73 if (!isSimpleEnoughValueToCommit(cast<Constant>(Op), SimpleConstants, DL))
74 return false;
75 return true;
76 }
77
78 // We don't know exactly what relocations are allowed in constant expressions,
79 // so we allow &global+constantoffset, which is safe and uniformly supported
80 // across targets.
81 ConstantExpr *CE = cast<ConstantExpr>(C);
82 switch (CE->getOpcode()) {
83 case Instruction::BitCast:
84 // Bitcast is fine if the casted value is fine.
85 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
86
87 case Instruction::IntToPtr:
88 case Instruction::PtrToInt:
89 // int <=> ptr is fine if the int type is the same size as the
90 // pointer type.
91 if (DL.getTypeSizeInBits(CE->getType()) !=
92 DL.getTypeSizeInBits(CE->getOperand(0)->getType()))
93 return false;
94 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
95
96 // GEP is fine if it is simple + constant offset.
97 case Instruction::GetElementPtr:
98 for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i)
99 if (!isa<ConstantInt>(CE->getOperand(i)))
100 return false;
101 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
102
103 case Instruction::Add:
104 // We allow simple+cst.
105 if (!isa<ConstantInt>(CE->getOperand(1)))
106 return false;
107 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
108 }
109 return false;
110}
111
112static inline bool
114 SmallPtrSetImpl<Constant *> &SimpleConstants,
115 const DataLayout &DL) {
116 // If we already checked this constant, we win.
117 if (!SimpleConstants.insert(C).second)
118 return true;
119 // Check the constant.
120 return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, DL);
121}
122
123void Evaluator::MutableValue::clear() {
124 if (auto *Agg = dyn_cast_if_present<MutableAggregate *>(Val))
125 delete Agg;
126 Val = nullptr;
127}
128
129Constant *Evaluator::MutableValue::read(Type *Ty, APInt Offset,
130 const DataLayout &DL) const {
131 TypeSize TySize = DL.getTypeStoreSize(Ty);
132 const MutableValue *V = this;
133 while (const auto *Agg = dyn_cast_if_present<MutableAggregate *>(V->Val)) {
134 Type *AggTy = Agg->Ty;
135 std::optional<APInt> Index = DL.getGEPIndexForOffset(AggTy, Offset);
136 if (!Index || Index->uge(Agg->Elements.size()) ||
137 !TypeSize::isKnownLE(TySize, DL.getTypeStoreSize(AggTy)))
138 return nullptr;
139
140 V = &Agg->Elements[Index->getZExtValue()];
141 }
142
143 return ConstantFoldLoadFromConst(cast<Constant *>(V->Val), Ty, Offset, DL);
144}
145
146bool Evaluator::MutableValue::makeMutable() {
147 Constant *C = cast<Constant *>(Val);
148 Type *Ty = C->getType();
149 unsigned NumElements;
150 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
151 NumElements = VT->getNumElements();
152 } else if (auto *AT = dyn_cast<ArrayType>(Ty))
153 NumElements = AT->getNumElements();
154 else if (auto *ST = dyn_cast<StructType>(Ty))
155 NumElements = ST->getNumElements();
156 else
157 return false;
158
159 MutableAggregate *MA = new MutableAggregate(Ty);
160 MA->Elements.reserve(NumElements);
161 for (unsigned I = 0; I < NumElements; ++I)
162 MA->Elements.push_back(C->getAggregateElement(I));
163 Val = MA;
164 return true;
165}
166
167bool Evaluator::MutableValue::write(Constant *V, APInt Offset,
168 const DataLayout &DL) {
169 Type *Ty = V->getType();
170 TypeSize TySize = DL.getTypeStoreSize(Ty);
171 MutableValue *MV = this;
172 while (Offset != 0 ||
173 !CastInst::isBitOrNoopPointerCastable(Ty, MV->getType(), DL)) {
174 if (isa<Constant *>(MV->Val) && !MV->makeMutable())
175 return false;
176
177 MutableAggregate *Agg = cast<MutableAggregate *>(MV->Val);
178 Type *AggTy = Agg->Ty;
179 std::optional<APInt> Index = DL.getGEPIndexForOffset(AggTy, Offset);
180 if (!Index || Index->uge(Agg->Elements.size()) ||
181 !TypeSize::isKnownLE(TySize, DL.getTypeStoreSize(AggTy)))
182 return false;
183
184 MV = &Agg->Elements[Index->getZExtValue()];
185 }
186
187 Type *MVType = MV->getType();
188 MV->clear();
189 if (Ty->isIntegerTy() && MVType->isPointerTy())
190 MV->Val = ConstantExpr::getIntToPtr(V, MVType);
191 else if (Ty->isPointerTy() && MVType->isIntegerTy())
192 MV->Val = ConstantExpr::getPtrToInt(V, MVType);
193 else if (Ty != MVType)
194 MV->Val = ConstantExpr::getBitCast(V, MVType);
195 else
196 MV->Val = V;
197 return true;
198}
199
200Constant *Evaluator::MutableAggregate::toConstant() const {
202 for (const MutableValue &MV : Elements)
203 Consts.push_back(MV.toConstant());
204
205 if (auto *ST = dyn_cast<StructType>(Ty))
206 return ConstantStruct::get(ST, Consts);
207 if (auto *AT = dyn_cast<ArrayType>(Ty))
208 return ConstantArray::get(AT, Consts);
209 assert(isa<FixedVectorType>(Ty) && "Must be vector");
210 return ConstantVector::get(Consts);
211}
212
213/// Return the value that would be computed by a load from P after the stores
214/// reflected by 'memory' have been performed. If we can't decide, return null.
215Constant *Evaluator::ComputeLoadResult(Constant *P, Type *Ty) {
216 APInt Offset(DL.getIndexTypeSizeInBits(P->getType()), 0);
217 P = cast<Constant>(P->stripAndAccumulateConstantOffsets(
218 DL, Offset, /* AllowNonInbounds */ true));
219 Offset = Offset.sextOrTrunc(DL.getIndexTypeSizeInBits(P->getType()));
220 if (auto *GV = dyn_cast<GlobalVariable>(P))
221 return ComputeLoadResult(GV, Ty, Offset);
222 return nullptr;
223}
224
225Constant *Evaluator::ComputeLoadResult(GlobalVariable *GV, Type *Ty,
226 const APInt &Offset) {
227 auto It = MutatedMemory.find(GV);
228 if (It != MutatedMemory.end())
229 return It->second.read(Ty, Offset, DL);
230
231 if (!GV->hasDefinitiveInitializer())
232 return nullptr;
233 return ConstantFoldLoadFromConst(GV->getInitializer(), Ty, Offset, DL);
234}
235
237 if (auto *Fn = dyn_cast<Function>(C))
238 return Fn;
239
240 if (auto *Alias = dyn_cast<GlobalAlias>(C))
241 if (auto *Fn = dyn_cast<Function>(Alias->getAliasee()))
242 return Fn;
243 return nullptr;
244}
245
246Function *
247Evaluator::getCalleeWithFormalArgs(CallBase &CB,
249 auto *V = CB.getCalledOperand()->stripPointerCasts();
250 if (auto *Fn = getFunction(getVal(V)))
251 return getFormalParams(CB, Fn, Formals) ? Fn : nullptr;
252 return nullptr;
253}
254
255bool Evaluator::getFormalParams(CallBase &CB, Function *F,
257 if (!F)
258 return false;
259
260 auto *FTy = F->getFunctionType();
261 if (FTy->getNumParams() > CB.arg_size()) {
262 LLVM_DEBUG(dbgs() << "Too few arguments for function.\n");
263 return false;
264 }
265
266 auto ArgI = CB.arg_begin();
267 for (Type *PTy : FTy->params()) {
268 auto *ArgC = ConstantFoldLoadThroughBitcast(getVal(*ArgI), PTy, DL);
269 if (!ArgC) {
270 LLVM_DEBUG(dbgs() << "Can not convert function argument.\n");
271 return false;
272 }
273 Formals.push_back(ArgC);
274 ++ArgI;
275 }
276 return true;
277}
278
279/// If call expression contains bitcast then we may need to cast
280/// evaluated return value to a type of the call expression.
281Constant *Evaluator::castCallResultIfNeeded(Type *ReturnType, Constant *RV) {
282 if (!RV || RV->getType() == ReturnType)
283 return RV;
284
285 RV = ConstantFoldLoadThroughBitcast(RV, ReturnType, DL);
286 if (!RV)
287 LLVM_DEBUG(dbgs() << "Failed to fold bitcast call expr\n");
288 return RV;
289}
290
291/// Evaluate all instructions in block BB, returning true if successful, false
292/// if we can't evaluate it. NewBB returns the next BB that control flows into,
293/// or null upon return. StrippedPointerCastsForAliasAnalysis is set to true if
294/// we looked through pointer casts to evaluate something.
295bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, BasicBlock *&NextBB,
296 bool &StrippedPointerCastsForAliasAnalysis) {
297 // This is the main evaluation loop.
298 while (true) {
299 Constant *InstResult = nullptr;
300
301 LLVM_DEBUG(dbgs() << "Evaluating Instruction: " << *CurInst << "\n");
302
303 if (StoreInst *SI = dyn_cast<StoreInst>(CurInst)) {
304 if (SI->isVolatile()) {
305 LLVM_DEBUG(dbgs() << "Store is volatile! Can not evaluate.\n");
306 return false; // no volatile accesses.
307 }
308 Constant *Ptr = getVal(SI->getOperand(1));
309 Constant *FoldedPtr = ConstantFoldConstant(Ptr, DL, TLI);
310 if (Ptr != FoldedPtr) {
311 LLVM_DEBUG(dbgs() << "Folding constant ptr expression: " << *Ptr);
312 Ptr = FoldedPtr;
313 LLVM_DEBUG(dbgs() << "; To: " << *Ptr << "\n");
314 }
315
316 APInt Offset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
317 Ptr = cast<Constant>(Ptr->stripAndAccumulateConstantOffsets(
318 DL, Offset, /* AllowNonInbounds */ true));
319 Offset = Offset.sextOrTrunc(DL.getIndexTypeSizeInBits(Ptr->getType()));
320 auto *GV = dyn_cast<GlobalVariable>(Ptr);
321 if (!GV || !GV->hasUniqueInitializer()) {
322 LLVM_DEBUG(dbgs() << "Store is not to global with unique initializer: "
323 << *Ptr << "\n");
324 return false;
325 }
326
327 // If this might be too difficult for the backend to handle (e.g. the addr
328 // of one global variable divided by another) then we can't commit it.
329 Constant *Val = getVal(SI->getOperand(0));
330 if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, DL)) {
331 LLVM_DEBUG(dbgs() << "Store value is too complex to evaluate store. "
332 << *Val << "\n");
333 return false;
334 }
335
336 auto Res = MutatedMemory.try_emplace(GV, GV->getInitializer());
337 if (!Res.first->second.write(Val, Offset, DL))
338 return false;
339 } else if (LoadInst *LI = dyn_cast<LoadInst>(CurInst)) {
340 if (LI->isVolatile()) {
342 dbgs() << "Found a Load! Volatile load, can not evaluate.\n");
343 return false; // no volatile accesses.
344 }
345
346 Constant *Ptr = getVal(LI->getOperand(0));
347 Constant *FoldedPtr = ConstantFoldConstant(Ptr, DL, TLI);
348 if (Ptr != FoldedPtr) {
349 Ptr = FoldedPtr;
350 LLVM_DEBUG(dbgs() << "Found a constant pointer expression, constant "
351 "folding: "
352 << *Ptr << "\n");
353 }
354 InstResult = ComputeLoadResult(Ptr, LI->getType());
355 if (!InstResult) {
357 dbgs() << "Failed to compute load result. Can not evaluate load."
358 "\n");
359 return false; // Could not evaluate load.
360 }
361
362 LLVM_DEBUG(dbgs() << "Evaluated load: " << *InstResult << "\n");
363 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(CurInst)) {
364 if (AI->isArrayAllocation()) {
365 LLVM_DEBUG(dbgs() << "Found an array alloca. Can not evaluate.\n");
366 return false; // Cannot handle array allocs.
367 }
368 Type *Ty = AI->getAllocatedType();
369 AllocaTmps.push_back(std::make_unique<GlobalVariable>(
371 AI->getName(), /*TLMode=*/GlobalValue::NotThreadLocal,
372 AI->getType()->getPointerAddressSpace()));
373 InstResult = AllocaTmps.back().get();
374 LLVM_DEBUG(dbgs() << "Found an alloca. Result: " << *InstResult << "\n");
375 } else if (isa<CallInst>(CurInst) || isa<InvokeInst>(CurInst)) {
376 CallBase &CB = *cast<CallBase>(&*CurInst);
377
378 // Debug info can safely be ignored here.
379 if (isa<DbgInfoIntrinsic>(CB)) {
380 LLVM_DEBUG(dbgs() << "Ignoring debug info.\n");
381 ++CurInst;
382 continue;
383 }
384
385 // Cannot handle inline asm.
386 if (CB.isInlineAsm()) {
387 LLVM_DEBUG(dbgs() << "Found inline asm, can not evaluate.\n");
388 return false;
389 }
390
391 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CB)) {
392 if (MemSetInst *MSI = dyn_cast<MemSetInst>(II)) {
393 if (MSI->isVolatile()) {
394 LLVM_DEBUG(dbgs() << "Can not optimize a volatile memset "
395 << "intrinsic.\n");
396 return false;
397 }
398
399 auto *LenC = dyn_cast<ConstantInt>(getVal(MSI->getLength()));
400 if (!LenC) {
401 LLVM_DEBUG(dbgs() << "Memset with unknown length.\n");
402 return false;
403 }
404
405 Constant *Ptr = getVal(MSI->getDest());
406 APInt Offset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
407 Ptr = cast<Constant>(Ptr->stripAndAccumulateConstantOffsets(
408 DL, Offset, /* AllowNonInbounds */ true));
409 auto *GV = dyn_cast<GlobalVariable>(Ptr);
410 if (!GV) {
411 LLVM_DEBUG(dbgs() << "Memset with unknown base.\n");
412 return false;
413 }
414
415 Constant *Val = getVal(MSI->getValue());
416 // Avoid the byte-per-byte scan if we're memseting a zeroinitializer
417 // to zero.
418 if (!Val->isNullValue() || MutatedMemory.contains(GV) ||
420 !GV->getInitializer()->isNullValue()) {
421 APInt Len = LenC->getValue();
422 if (Len.ugt(64 * 1024)) {
423 LLVM_DEBUG(dbgs() << "Not evaluating large memset of size "
424 << Len << "\n");
425 return false;
426 }
427
428 while (Len != 0) {
429 Constant *DestVal = ComputeLoadResult(GV, Val->getType(), Offset);
430 if (DestVal != Val) {
431 LLVM_DEBUG(dbgs() << "Memset is not a no-op at offset "
432 << Offset << " of " << *GV << ".\n");
433 return false;
434 }
435 ++Offset;
436 --Len;
437 }
438 }
439
440 LLVM_DEBUG(dbgs() << "Ignoring no-op memset.\n");
441 ++CurInst;
442 continue;
443 }
444
445 if (II->isLifetimeStartOrEnd()) {
446 LLVM_DEBUG(dbgs() << "Ignoring lifetime intrinsic.\n");
447 ++CurInst;
448 continue;
449 }
450
451 if (II->getIntrinsicID() == Intrinsic::invariant_start) {
452 // We don't insert an entry into Values, as it doesn't have a
453 // meaningful return value.
454 if (!II->use_empty()) {
456 << "Found unused invariant_start. Can't evaluate.\n");
457 return false;
458 }
459 ConstantInt *Size = cast<ConstantInt>(II->getArgOperand(0));
460 Value *PtrArg = getVal(II->getArgOperand(1));
461 Value *Ptr = PtrArg->stripPointerCasts();
462 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) {
463 Type *ElemTy = GV->getValueType();
464 if (!Size->isMinusOne() &&
465 Size->getValue().getLimitedValue() >=
466 DL.getTypeStoreSize(ElemTy)) {
467 Invariants.insert(GV);
468 LLVM_DEBUG(dbgs() << "Found a global var that is an invariant: "
469 << *GV << "\n");
470 } else {
472 << "Found a global var, but can not treat it as an "
473 "invariant.\n");
474 }
475 }
476 // Continue even if we do nothing.
477 ++CurInst;
478 continue;
479 } else if (II->getIntrinsicID() == Intrinsic::assume) {
480 LLVM_DEBUG(dbgs() << "Skipping assume intrinsic.\n");
481 ++CurInst;
482 continue;
483 } else if (II->getIntrinsicID() == Intrinsic::sideeffect) {
484 LLVM_DEBUG(dbgs() << "Skipping sideeffect intrinsic.\n");
485 ++CurInst;
486 continue;
487 } else if (II->getIntrinsicID() == Intrinsic::pseudoprobe) {
488 LLVM_DEBUG(dbgs() << "Skipping pseudoprobe intrinsic.\n");
489 ++CurInst;
490 continue;
491 } else {
492 Value *Stripped = CurInst->stripPointerCastsForAliasAnalysis();
493 // Only attempt to getVal() if we've actually managed to strip
494 // anything away, or else we'll call getVal() on the current
495 // instruction.
496 if (Stripped != &*CurInst) {
497 InstResult = getVal(Stripped);
498 }
499 if (InstResult) {
501 << "Stripped pointer casts for alias analysis for "
502 "intrinsic call.\n");
503 StrippedPointerCastsForAliasAnalysis = true;
504 InstResult = ConstantExpr::getBitCast(InstResult, II->getType());
505 } else {
506 LLVM_DEBUG(dbgs() << "Unknown intrinsic. Cannot evaluate.\n");
507 return false;
508 }
509 }
510 }
511
512 if (!InstResult) {
513 // Resolve function pointers.
515 Function *Callee = getCalleeWithFormalArgs(CB, Formals);
516 if (!Callee || Callee->isInterposable()) {
517 LLVM_DEBUG(dbgs() << "Can not resolve function pointer.\n");
518 return false; // Cannot resolve.
519 }
520
521 if (Callee->isDeclaration()) {
522 // If this is a function we can constant fold, do it.
523 if (Constant *C = ConstantFoldCall(&CB, Callee, Formals, TLI)) {
524 InstResult = castCallResultIfNeeded(CB.getType(), C);
525 if (!InstResult)
526 return false;
527 LLVM_DEBUG(dbgs() << "Constant folded function call. Result: "
528 << *InstResult << "\n");
529 } else {
530 LLVM_DEBUG(dbgs() << "Can not constant fold function call.\n");
531 return false;
532 }
533 } else {
534 if (Callee->getFunctionType()->isVarArg()) {
536 << "Can not constant fold vararg function call.\n");
537 return false;
538 }
539
540 Constant *RetVal = nullptr;
541 // Execute the call, if successful, use the return value.
542 ValueStack.emplace_back();
543 if (!EvaluateFunction(Callee, RetVal, Formals)) {
544 LLVM_DEBUG(dbgs() << "Failed to evaluate function.\n");
545 return false;
546 }
547 ValueStack.pop_back();
548 InstResult = castCallResultIfNeeded(CB.getType(), RetVal);
549 if (RetVal && !InstResult)
550 return false;
551
552 if (InstResult) {
553 LLVM_DEBUG(dbgs() << "Successfully evaluated function. Result: "
554 << *InstResult << "\n\n");
555 } else {
557 << "Successfully evaluated function. Result: 0\n\n");
558 }
559 }
560 }
561 } else if (CurInst->isTerminator()) {
562 LLVM_DEBUG(dbgs() << "Found a terminator instruction.\n");
563
564 if (BranchInst *BI = dyn_cast<BranchInst>(CurInst)) {
565 if (BI->isUnconditional()) {
566 NextBB = BI->getSuccessor(0);
567 } else {
569 dyn_cast<ConstantInt>(getVal(BI->getCondition()));
570 if (!Cond) return false; // Cannot determine.
571
572 NextBB = BI->getSuccessor(!Cond->getZExtValue());
573 }
574 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(CurInst)) {
575 ConstantInt *Val =
576 dyn_cast<ConstantInt>(getVal(SI->getCondition()));
577 if (!Val) return false; // Cannot determine.
578 NextBB = SI->findCaseValue(Val)->getCaseSuccessor();
579 } else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(CurInst)) {
580 Value *Val = getVal(IBI->getAddress())->stripPointerCasts();
581 if (BlockAddress *BA = dyn_cast<BlockAddress>(Val))
582 NextBB = BA->getBasicBlock();
583 else
584 return false; // Cannot determine.
585 } else if (isa<ReturnInst>(CurInst)) {
586 NextBB = nullptr;
587 } else {
588 // invoke, unwind, resume, unreachable.
589 LLVM_DEBUG(dbgs() << "Can not handle terminator.");
590 return false; // Cannot handle this terminator.
591 }
592
593 // We succeeded at evaluating this block!
594 LLVM_DEBUG(dbgs() << "Successfully evaluated block.\n");
595 return true;
596 } else {
598 for (Value *Op : CurInst->operands())
599 Ops.push_back(getVal(Op));
600 InstResult = ConstantFoldInstOperands(&*CurInst, Ops, DL, TLI);
601 if (!InstResult) {
602 LLVM_DEBUG(dbgs() << "Cannot fold instruction: " << *CurInst << "\n");
603 return false;
604 }
605 LLVM_DEBUG(dbgs() << "Folded instruction " << *CurInst << " to "
606 << *InstResult << "\n");
607 }
608
609 if (!CurInst->use_empty()) {
610 InstResult = ConstantFoldConstant(InstResult, DL, TLI);
611 setVal(&*CurInst, InstResult);
612 }
613
614 // If we just processed an invoke, we finished evaluating the block.
615 if (InvokeInst *II = dyn_cast<InvokeInst>(CurInst)) {
616 NextBB = II->getNormalDest();
617 LLVM_DEBUG(dbgs() << "Found an invoke instruction. Finished Block.\n\n");
618 return true;
619 }
620
621 // Advance program counter.
622 ++CurInst;
623 }
624}
625
626/// Evaluate a call to function F, returning true if successful, false if we
627/// can't evaluate it. ActualArgs contains the formal arguments for the
628/// function.
630 const SmallVectorImpl<Constant*> &ActualArgs) {
631 assert(ActualArgs.size() == F->arg_size() && "wrong number of arguments");
632
633 // Check to see if this function is already executing (recursion). If so,
634 // bail out. TODO: we might want to accept limited recursion.
635 if (is_contained(CallStack, F))
636 return false;
637
638 CallStack.push_back(F);
639
640 // Initialize arguments to the incoming values specified.
641 for (const auto &[ArgNo, Arg] : llvm::enumerate(F->args()))
642 setVal(&Arg, ActualArgs[ArgNo]);
643
644 // ExecutedBlocks - We only handle non-looping, non-recursive code. As such,
645 // we can only evaluate any one basic block at most once. This set keeps
646 // track of what we have executed so we can detect recursive cases etc.
647 SmallPtrSet<BasicBlock*, 32> ExecutedBlocks;
648
649 // CurBB - The current basic block we're evaluating.
650 BasicBlock *CurBB = &F->front();
651
652 BasicBlock::iterator CurInst = CurBB->begin();
653
654 while (true) {
655 BasicBlock *NextBB = nullptr; // Initialized to avoid compiler warnings.
656 LLVM_DEBUG(dbgs() << "Trying to evaluate BB: " << *CurBB << "\n");
657
658 bool StrippedPointerCastsForAliasAnalysis = false;
659
660 if (!EvaluateBlock(CurInst, NextBB, StrippedPointerCastsForAliasAnalysis))
661 return false;
662
663 if (!NextBB) {
664 // Successfully running until there's no next block means that we found
665 // the return. Fill it the return value and pop the call stack.
666 ReturnInst *RI = cast<ReturnInst>(CurBB->getTerminator());
667 if (RI->getNumOperands()) {
668 // The Evaluator can look through pointer casts as long as alias
669 // analysis holds because it's just a simple interpreter and doesn't
670 // skip memory accesses due to invariant group metadata, but we can't
671 // let users of Evaluator use a value that's been gleaned looking
672 // through stripping pointer casts.
673 if (StrippedPointerCastsForAliasAnalysis &&
674 !RI->getReturnValue()->getType()->isVoidTy()) {
675 return false;
676 }
677 RetVal = getVal(RI->getOperand(0));
678 }
679 CallStack.pop_back();
680 return true;
681 }
682
683 // Okay, we succeeded in evaluating this control flow. See if we have
684 // executed the new block before. If so, we have a looping function,
685 // which we cannot evaluate in reasonable time.
686 if (!ExecutedBlocks.insert(NextBB).second)
687 return false; // looped!
688
689 // Okay, we have never been in this block before. Check to see if there
690 // are any PHI nodes. If so, evaluate them with information about where
691 // we came from.
692 PHINode *PN = nullptr;
693 for (CurInst = NextBB->begin();
694 (PN = dyn_cast<PHINode>(CurInst)); ++CurInst)
695 setVal(PN, getVal(PN->getIncomingValueForBlock(CurBB)));
696
697 // Advance to the next block.
698 CurBB = NextBB;
699 }
700}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
uint64_t Size
static bool isSimpleEnoughValueToCommitHelper(Constant *C, SmallPtrSetImpl< Constant * > &SimpleConstants, const DataLayout &DL)
Return true if the specified constant can be handled by the code generator.
Definition: Evaluator.cpp:58
static bool isSimpleEnoughValueToCommit(Constant *C, SmallPtrSetImpl< Constant * > &SimpleConstants, const DataLayout &DL)
Definition: Evaluator.cpp:113
static Function * getFunction(Constant *C)
Definition: Evaluator.cpp:236
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
uint64_t IntrinsicInst * II
#define P(N)
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
Class for arbitrary precision integers.
Definition: APInt.h:77
an instruction to allocate memory on the stack
Definition: Instructions.h:61
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:448
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:177
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
The address of a basic block.
Definition: Constants.h:890
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1236
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1532
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1385
Value * getCalledOperand() const
Definition: InstrTypes.h:1458
unsigned arg_size() const
Definition: InstrTypes.h:1408
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
Definition: Constants.cpp:1292
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:1097
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2281
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2267
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2295
This is the shared class of boolean and integer constants.
Definition: Constants.h:81
static Constant * get(StructType *T, ArrayRef< Constant * > V)
Definition: Constants.cpp:1357
static Constant * get(ArrayRef< Constant * > V)
Definition: Constants.cpp:1399
This is an important base class in LLVM.
Definition: Constant.h:42
const Constant * stripPointerCasts() const
Definition: Constant.h:218
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:90
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
bool EvaluateFunction(Function *F, Constant *&RetVal, const SmallVectorImpl< Constant * > &ActualArgs)
Evaluate a call to function F, returning true if successful, false if we can't evaluate it.
Definition: Evaluator.cpp:629
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:59
Type * getValueType() const
Definition: GlobalValue.h:296
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasUniqueInitializer() const
hasUniqueInitializer - Whether the global variable has an initializer, and any changes made to the in...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
Indirect Branch Instruction.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
Invoke instruction.
An instruction for reading from memory.
Definition: Instructions.h:174
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
Value * getIncomingValueForBlock(const BasicBlock *BB) const
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:346
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:367
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:502
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:290
Multiway switch.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:251
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:224
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:139
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1833
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:694
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
const Value * stripPointerCastsForAliasAnalysis() const
Strip off pointer casts, all-zero GEPs, single-argument phi nodes and invariant group info.
Definition: Value.cpp:710
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
Constant * ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy, const DataLayout &DL)
ConstantFoldLoadThroughBitcast - try to cast constant to destination type returning null if unsuccess...
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition: STLExtras.h:2431
Constant * ConstantFoldCall(const CallBase *Call, Function *F, ArrayRef< Constant * > Operands, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldCall - Attempt to constant fold a call to the specified function with the specified argum...
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
Constant * ConstantFoldInstOperands(Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1886