File: | lib/Transforms/Utils/Local.cpp |
Warning: | line 146, column 7 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- Local.cpp - Functions to perform local transformations -------------===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // This family of functions perform various local transformations to the | |||
10 | // program. | |||
11 | // | |||
12 | //===----------------------------------------------------------------------===// | |||
13 | ||||
14 | #include "llvm/Transforms/Utils/Local.h" | |||
15 | #include "llvm/ADT/APInt.h" | |||
16 | #include "llvm/ADT/DenseMap.h" | |||
17 | #include "llvm/ADT/DenseMapInfo.h" | |||
18 | #include "llvm/ADT/DenseSet.h" | |||
19 | #include "llvm/ADT/Hashing.h" | |||
20 | #include "llvm/ADT/None.h" | |||
21 | #include "llvm/ADT/Optional.h" | |||
22 | #include "llvm/ADT/STLExtras.h" | |||
23 | #include "llvm/ADT/SetVector.h" | |||
24 | #include "llvm/ADT/SmallPtrSet.h" | |||
25 | #include "llvm/ADT/SmallVector.h" | |||
26 | #include "llvm/ADT/Statistic.h" | |||
27 | #include "llvm/ADT/TinyPtrVector.h" | |||
28 | #include "llvm/Analysis/ConstantFolding.h" | |||
29 | #include "llvm/Analysis/DomTreeUpdater.h" | |||
30 | #include "llvm/Analysis/EHPersonalities.h" | |||
31 | #include "llvm/Analysis/InstructionSimplify.h" | |||
32 | #include "llvm/Analysis/LazyValueInfo.h" | |||
33 | #include "llvm/Analysis/MemoryBuiltins.h" | |||
34 | #include "llvm/Analysis/MemorySSAUpdater.h" | |||
35 | #include "llvm/Analysis/TargetLibraryInfo.h" | |||
36 | #include "llvm/Analysis/ValueTracking.h" | |||
37 | #include "llvm/Analysis/VectorUtils.h" | |||
38 | #include "llvm/BinaryFormat/Dwarf.h" | |||
39 | #include "llvm/IR/Argument.h" | |||
40 | #include "llvm/IR/Attributes.h" | |||
41 | #include "llvm/IR/BasicBlock.h" | |||
42 | #include "llvm/IR/CFG.h" | |||
43 | #include "llvm/IR/CallSite.h" | |||
44 | #include "llvm/IR/Constant.h" | |||
45 | #include "llvm/IR/ConstantRange.h" | |||
46 | #include "llvm/IR/Constants.h" | |||
47 | #include "llvm/IR/DIBuilder.h" | |||
48 | #include "llvm/IR/DataLayout.h" | |||
49 | #include "llvm/IR/DebugInfoMetadata.h" | |||
50 | #include "llvm/IR/DebugLoc.h" | |||
51 | #include "llvm/IR/DerivedTypes.h" | |||
52 | #include "llvm/IR/Dominators.h" | |||
53 | #include "llvm/IR/Function.h" | |||
54 | #include "llvm/IR/GetElementPtrTypeIterator.h" | |||
55 | #include "llvm/IR/GlobalObject.h" | |||
56 | #include "llvm/IR/IRBuilder.h" | |||
57 | #include "llvm/IR/InstrTypes.h" | |||
58 | #include "llvm/IR/Instruction.h" | |||
59 | #include "llvm/IR/Instructions.h" | |||
60 | #include "llvm/IR/IntrinsicInst.h" | |||
61 | #include "llvm/IR/Intrinsics.h" | |||
62 | #include "llvm/IR/LLVMContext.h" | |||
63 | #include "llvm/IR/MDBuilder.h" | |||
64 | #include "llvm/IR/Metadata.h" | |||
65 | #include "llvm/IR/Module.h" | |||
66 | #include "llvm/IR/Operator.h" | |||
67 | #include "llvm/IR/PatternMatch.h" | |||
68 | #include "llvm/IR/Type.h" | |||
69 | #include "llvm/IR/Use.h" | |||
70 | #include "llvm/IR/User.h" | |||
71 | #include "llvm/IR/Value.h" | |||
72 | #include "llvm/IR/ValueHandle.h" | |||
73 | #include "llvm/Support/Casting.h" | |||
74 | #include "llvm/Support/Debug.h" | |||
75 | #include "llvm/Support/ErrorHandling.h" | |||
76 | #include "llvm/Support/KnownBits.h" | |||
77 | #include "llvm/Support/raw_ostream.h" | |||
78 | #include "llvm/Transforms/Utils/ValueMapper.h" | |||
79 | #include <algorithm> | |||
80 | #include <cassert> | |||
81 | #include <climits> | |||
82 | #include <cstdint> | |||
83 | #include <iterator> | |||
84 | #include <map> | |||
85 | #include <utility> | |||
86 | ||||
87 | using namespace llvm; | |||
88 | using namespace llvm::PatternMatch; | |||
89 | ||||
90 | #define DEBUG_TYPE"local" "local" | |||
91 | ||||
92 | STATISTIC(NumRemoved, "Number of unreachable basic blocks removed")static llvm::Statistic NumRemoved = {"local", "NumRemoved", "Number of unreachable basic blocks removed" , {0}, {false}}; | |||
93 | ||||
94 | // Max recursion depth for collectBitParts used when detecting bswap and | |||
95 | // bitreverse idioms | |||
96 | static const unsigned BitPartRecursionMaxDepth = 64; | |||
97 | ||||
98 | //===----------------------------------------------------------------------===// | |||
99 | // Local constant propagation. | |||
100 | // | |||
101 | ||||
102 | /// ConstantFoldTerminator - If a terminator instruction is predicated on a | |||
103 | /// constant value, convert it into an unconditional branch to the constant | |||
104 | /// destination. This is a nontrivial operation because the successors of this | |||
105 | /// basic block must have their PHI nodes updated. | |||
106 | /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch | |||
107 | /// conditions and indirectbr addresses this might make dead if | |||
108 | /// DeleteDeadConditions is true. | |||
109 | bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions, | |||
110 | const TargetLibraryInfo *TLI, | |||
111 | DomTreeUpdater *DTU) { | |||
112 | Instruction *T = BB->getTerminator(); | |||
113 | IRBuilder<> Builder(T); | |||
114 | ||||
115 | // Branch - See if we are conditional jumping on constant | |||
116 | if (auto *BI = dyn_cast<BranchInst>(T)) { | |||
117 | if (BI->isUnconditional()) return false; // Can't optimize uncond branch | |||
118 | BasicBlock *Dest1 = BI->getSuccessor(0); | |||
119 | BasicBlock *Dest2 = BI->getSuccessor(1); | |||
120 | ||||
121 | if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) { | |||
122 | // Are we branching on constant? | |||
123 | // YES. Change to unconditional branch... | |||
124 | BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2; | |||
125 | BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1; | |||
126 | ||||
127 | // Let the basic block know that we are letting go of it. Based on this, | |||
128 | // it will adjust it's PHI nodes. | |||
129 | OldDest->removePredecessor(BB); | |||
130 | ||||
131 | // Replace the conditional branch with an unconditional one. | |||
132 | Builder.CreateBr(Destination); | |||
133 | BI->eraseFromParent(); | |||
134 | if (DTU) | |||
135 | DTU->applyUpdatesPermissive({{DominatorTree::Delete, BB, OldDest}}); | |||
136 | return true; | |||
137 | } | |||
138 | ||||
139 | if (Dest2 == Dest1) { // Conditional branch to same location? | |||
140 | // This branch matches something like this: | |||
141 | // br bool %cond, label %Dest, label %Dest | |||
142 | // and changes it into: br label %Dest | |||
143 | ||||
144 | // Let the basic block know that we are letting go of one copy of it. | |||
145 | assert(BI->getParent() && "Terminator not inserted in block!")((BI->getParent() && "Terminator not inserted in block!" ) ? static_cast<void> (0) : __assert_fail ("BI->getParent() && \"Terminator not inserted in block!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 145, __PRETTY_FUNCTION__)); | |||
146 | Dest1->removePredecessor(BI->getParent()); | |||
| ||||
147 | ||||
148 | // Replace the conditional branch with an unconditional one. | |||
149 | Builder.CreateBr(Dest1); | |||
150 | Value *Cond = BI->getCondition(); | |||
151 | BI->eraseFromParent(); | |||
152 | if (DeleteDeadConditions) | |||
153 | RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI); | |||
154 | return true; | |||
155 | } | |||
156 | return false; | |||
157 | } | |||
158 | ||||
159 | if (auto *SI = dyn_cast<SwitchInst>(T)) { | |||
160 | // If we are switching on a constant, we can convert the switch to an | |||
161 | // unconditional branch. | |||
162 | auto *CI = dyn_cast<ConstantInt>(SI->getCondition()); | |||
163 | BasicBlock *DefaultDest = SI->getDefaultDest(); | |||
164 | BasicBlock *TheOnlyDest = DefaultDest; | |||
165 | ||||
166 | // If the default is unreachable, ignore it when searching for TheOnlyDest. | |||
167 | if (isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg()) && | |||
168 | SI->getNumCases() > 0) { | |||
169 | TheOnlyDest = SI->case_begin()->getCaseSuccessor(); | |||
170 | } | |||
171 | ||||
172 | // Figure out which case it goes to. | |||
173 | for (auto i = SI->case_begin(), e = SI->case_end(); i != e;) { | |||
174 | // Found case matching a constant operand? | |||
175 | if (i->getCaseValue() == CI) { | |||
176 | TheOnlyDest = i->getCaseSuccessor(); | |||
177 | break; | |||
178 | } | |||
179 | ||||
180 | // Check to see if this branch is going to the same place as the default | |||
181 | // dest. If so, eliminate it as an explicit compare. | |||
182 | if (i->getCaseSuccessor() == DefaultDest) { | |||
183 | MDNode *MD = SI->getMetadata(LLVMContext::MD_prof); | |||
184 | unsigned NCases = SI->getNumCases(); | |||
185 | // Fold the case metadata into the default if there will be any branches | |||
186 | // left, unless the metadata doesn't match the switch. | |||
187 | if (NCases > 1 && MD && MD->getNumOperands() == 2 + NCases) { | |||
188 | // Collect branch weights into a vector. | |||
189 | SmallVector<uint32_t, 8> Weights; | |||
190 | for (unsigned MD_i = 1, MD_e = MD->getNumOperands(); MD_i < MD_e; | |||
191 | ++MD_i) { | |||
192 | auto *CI = mdconst::extract<ConstantInt>(MD->getOperand(MD_i)); | |||
193 | Weights.push_back(CI->getValue().getZExtValue()); | |||
194 | } | |||
195 | // Merge weight of this case to the default weight. | |||
196 | unsigned idx = i->getCaseIndex(); | |||
197 | Weights[0] += Weights[idx+1]; | |||
198 | // Remove weight for this case. | |||
199 | std::swap(Weights[idx+1], Weights.back()); | |||
200 | Weights.pop_back(); | |||
201 | SI->setMetadata(LLVMContext::MD_prof, | |||
202 | MDBuilder(BB->getContext()). | |||
203 | createBranchWeights(Weights)); | |||
204 | } | |||
205 | // Remove this entry. | |||
206 | BasicBlock *ParentBB = SI->getParent(); | |||
207 | DefaultDest->removePredecessor(ParentBB); | |||
208 | i = SI->removeCase(i); | |||
209 | e = SI->case_end(); | |||
210 | if (DTU) | |||
211 | DTU->applyUpdatesPermissive( | |||
212 | {{DominatorTree::Delete, ParentBB, DefaultDest}}); | |||
213 | continue; | |||
214 | } | |||
215 | ||||
216 | // Otherwise, check to see if the switch only branches to one destination. | |||
217 | // We do this by reseting "TheOnlyDest" to null when we find two non-equal | |||
218 | // destinations. | |||
219 | if (i->getCaseSuccessor() != TheOnlyDest) | |||
220 | TheOnlyDest = nullptr; | |||
221 | ||||
222 | // Increment this iterator as we haven't removed the case. | |||
223 | ++i; | |||
224 | } | |||
225 | ||||
226 | if (CI && !TheOnlyDest) { | |||
227 | // Branching on a constant, but not any of the cases, go to the default | |||
228 | // successor. | |||
229 | TheOnlyDest = SI->getDefaultDest(); | |||
230 | } | |||
231 | ||||
232 | // If we found a single destination that we can fold the switch into, do so | |||
233 | // now. | |||
234 | if (TheOnlyDest) { | |||
235 | // Insert the new branch. | |||
236 | Builder.CreateBr(TheOnlyDest); | |||
237 | BasicBlock *BB = SI->getParent(); | |||
238 | std::vector <DominatorTree::UpdateType> Updates; | |||
239 | if (DTU) | |||
240 | Updates.reserve(SI->getNumSuccessors() - 1); | |||
241 | ||||
242 | // Remove entries from PHI nodes which we no longer branch to... | |||
243 | for (BasicBlock *Succ : successors(SI)) { | |||
244 | // Found case matching a constant operand? | |||
245 | if (Succ == TheOnlyDest) { | |||
246 | TheOnlyDest = nullptr; // Don't modify the first branch to TheOnlyDest | |||
247 | } else { | |||
248 | Succ->removePredecessor(BB); | |||
249 | if (DTU) | |||
250 | Updates.push_back({DominatorTree::Delete, BB, Succ}); | |||
251 | } | |||
252 | } | |||
253 | ||||
254 | // Delete the old switch. | |||
255 | Value *Cond = SI->getCondition(); | |||
256 | SI->eraseFromParent(); | |||
257 | if (DeleteDeadConditions) | |||
258 | RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI); | |||
259 | if (DTU) | |||
260 | DTU->applyUpdatesPermissive(Updates); | |||
261 | return true; | |||
262 | } | |||
263 | ||||
264 | if (SI->getNumCases() == 1) { | |||
265 | // Otherwise, we can fold this switch into a conditional branch | |||
266 | // instruction if it has only one non-default destination. | |||
267 | auto FirstCase = *SI->case_begin(); | |||
268 | Value *Cond = Builder.CreateICmpEQ(SI->getCondition(), | |||
269 | FirstCase.getCaseValue(), "cond"); | |||
270 | ||||
271 | // Insert the new branch. | |||
272 | BranchInst *NewBr = Builder.CreateCondBr(Cond, | |||
273 | FirstCase.getCaseSuccessor(), | |||
274 | SI->getDefaultDest()); | |||
275 | MDNode *MD = SI->getMetadata(LLVMContext::MD_prof); | |||
276 | if (MD && MD->getNumOperands() == 3) { | |||
277 | ConstantInt *SICase = | |||
278 | mdconst::dyn_extract<ConstantInt>(MD->getOperand(2)); | |||
279 | ConstantInt *SIDef = | |||
280 | mdconst::dyn_extract<ConstantInt>(MD->getOperand(1)); | |||
281 | assert(SICase && SIDef)((SICase && SIDef) ? static_cast<void> (0) : __assert_fail ("SICase && SIDef", "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 281, __PRETTY_FUNCTION__)); | |||
282 | // The TrueWeight should be the weight for the single case of SI. | |||
283 | NewBr->setMetadata(LLVMContext::MD_prof, | |||
284 | MDBuilder(BB->getContext()). | |||
285 | createBranchWeights(SICase->getValue().getZExtValue(), | |||
286 | SIDef->getValue().getZExtValue())); | |||
287 | } | |||
288 | ||||
289 | // Update make.implicit metadata to the newly-created conditional branch. | |||
290 | MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit); | |||
291 | if (MakeImplicitMD) | |||
292 | NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD); | |||
293 | ||||
294 | // Delete the old switch. | |||
295 | SI->eraseFromParent(); | |||
296 | return true; | |||
297 | } | |||
298 | return false; | |||
299 | } | |||
300 | ||||
301 | if (auto *IBI = dyn_cast<IndirectBrInst>(T)) { | |||
302 | // indirectbr blockaddress(@F, @BB) -> br label @BB | |||
303 | if (auto *BA = | |||
304 | dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) { | |||
305 | BasicBlock *TheOnlyDest = BA->getBasicBlock(); | |||
306 | std::vector <DominatorTree::UpdateType> Updates; | |||
307 | if (DTU) | |||
308 | Updates.reserve(IBI->getNumDestinations() - 1); | |||
309 | ||||
310 | // Insert the new branch. | |||
311 | Builder.CreateBr(TheOnlyDest); | |||
312 | ||||
313 | for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) { | |||
314 | if (IBI->getDestination(i) == TheOnlyDest) { | |||
315 | TheOnlyDest = nullptr; | |||
316 | } else { | |||
317 | BasicBlock *ParentBB = IBI->getParent(); | |||
318 | BasicBlock *DestBB = IBI->getDestination(i); | |||
319 | DestBB->removePredecessor(ParentBB); | |||
320 | if (DTU) | |||
321 | Updates.push_back({DominatorTree::Delete, ParentBB, DestBB}); | |||
322 | } | |||
323 | } | |||
324 | Value *Address = IBI->getAddress(); | |||
325 | IBI->eraseFromParent(); | |||
326 | if (DeleteDeadConditions) | |||
327 | RecursivelyDeleteTriviallyDeadInstructions(Address, TLI); | |||
328 | ||||
329 | // If we didn't find our destination in the IBI successor list, then we | |||
330 | // have undefined behavior. Replace the unconditional branch with an | |||
331 | // 'unreachable' instruction. | |||
332 | if (TheOnlyDest) { | |||
333 | BB->getTerminator()->eraseFromParent(); | |||
334 | new UnreachableInst(BB->getContext(), BB); | |||
335 | } | |||
336 | ||||
337 | if (DTU) | |||
338 | DTU->applyUpdatesPermissive(Updates); | |||
339 | return true; | |||
340 | } | |||
341 | } | |||
342 | ||||
343 | return false; | |||
344 | } | |||
345 | ||||
346 | //===----------------------------------------------------------------------===// | |||
347 | // Local dead code elimination. | |||
348 | // | |||
349 | ||||
350 | /// isInstructionTriviallyDead - Return true if the result produced by the | |||
351 | /// instruction is not used, and the instruction has no side effects. | |||
352 | /// | |||
353 | bool llvm::isInstructionTriviallyDead(Instruction *I, | |||
354 | const TargetLibraryInfo *TLI) { | |||
355 | if (!I->use_empty()) | |||
356 | return false; | |||
357 | return wouldInstructionBeTriviallyDead(I, TLI); | |||
358 | } | |||
359 | ||||
360 | bool llvm::wouldInstructionBeTriviallyDead(Instruction *I, | |||
361 | const TargetLibraryInfo *TLI) { | |||
362 | if (I->isTerminator()) | |||
363 | return false; | |||
364 | ||||
365 | // We don't want the landingpad-like instructions removed by anything this | |||
366 | // general. | |||
367 | if (I->isEHPad()) | |||
368 | return false; | |||
369 | ||||
370 | // We don't want debug info removed by anything this general, unless | |||
371 | // debug info is empty. | |||
372 | if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(I)) { | |||
373 | if (DDI->getAddress()) | |||
374 | return false; | |||
375 | return true; | |||
376 | } | |||
377 | if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(I)) { | |||
378 | if (DVI->getValue()) | |||
379 | return false; | |||
380 | return true; | |||
381 | } | |||
382 | if (DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(I)) { | |||
383 | if (DLI->getLabel()) | |||
384 | return false; | |||
385 | return true; | |||
386 | } | |||
387 | ||||
388 | if (!I->mayHaveSideEffects()) | |||
389 | return true; | |||
390 | ||||
391 | // Special case intrinsics that "may have side effects" but can be deleted | |||
392 | // when dead. | |||
393 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { | |||
394 | // Safe to delete llvm.stacksave and launder.invariant.group if dead. | |||
395 | if (II->getIntrinsicID() == Intrinsic::stacksave || | |||
396 | II->getIntrinsicID() == Intrinsic::launder_invariant_group) | |||
397 | return true; | |||
398 | ||||
399 | // Lifetime intrinsics are dead when their right-hand is undef. | |||
400 | if (II->isLifetimeStartOrEnd()) | |||
401 | return isa<UndefValue>(II->getArgOperand(1)); | |||
402 | ||||
403 | // Assumptions are dead if their condition is trivially true. Guards on | |||
404 | // true are operationally no-ops. In the future we can consider more | |||
405 | // sophisticated tradeoffs for guards considering potential for check | |||
406 | // widening, but for now we keep things simple. | |||
407 | if (II->getIntrinsicID() == Intrinsic::assume || | |||
408 | II->getIntrinsicID() == Intrinsic::experimental_guard) { | |||
409 | if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0))) | |||
410 | return !Cond->isZero(); | |||
411 | ||||
412 | return false; | |||
413 | } | |||
414 | } | |||
415 | ||||
416 | if (isAllocLikeFn(I, TLI)) | |||
417 | return true; | |||
418 | ||||
419 | if (CallInst *CI = isFreeCall(I, TLI)) | |||
420 | if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0))) | |||
421 | return C->isNullValue() || isa<UndefValue>(C); | |||
422 | ||||
423 | if (auto *Call = dyn_cast<CallBase>(I)) | |||
424 | if (isMathLibCallNoop(Call, TLI)) | |||
425 | return true; | |||
426 | ||||
427 | return false; | |||
428 | } | |||
429 | ||||
430 | /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a | |||
431 | /// trivially dead instruction, delete it. If that makes any of its operands | |||
432 | /// trivially dead, delete them too, recursively. Return true if any | |||
433 | /// instructions were deleted. | |||
434 | bool llvm::RecursivelyDeleteTriviallyDeadInstructions( | |||
435 | Value *V, const TargetLibraryInfo *TLI, MemorySSAUpdater *MSSAU) { | |||
436 | Instruction *I = dyn_cast<Instruction>(V); | |||
437 | if (!I || !isInstructionTriviallyDead(I, TLI)) | |||
438 | return false; | |||
439 | ||||
440 | SmallVector<Instruction*, 16> DeadInsts; | |||
441 | DeadInsts.push_back(I); | |||
442 | RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU); | |||
443 | ||||
444 | return true; | |||
445 | } | |||
446 | ||||
447 | void llvm::RecursivelyDeleteTriviallyDeadInstructions( | |||
448 | SmallVectorImpl<Instruction *> &DeadInsts, const TargetLibraryInfo *TLI, | |||
449 | MemorySSAUpdater *MSSAU) { | |||
450 | // Process the dead instruction list until empty. | |||
451 | while (!DeadInsts.empty()) { | |||
452 | Instruction &I = *DeadInsts.pop_back_val(); | |||
453 | assert(I.use_empty() && "Instructions with uses are not dead.")((I.use_empty() && "Instructions with uses are not dead." ) ? static_cast<void> (0) : __assert_fail ("I.use_empty() && \"Instructions with uses are not dead.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 453, __PRETTY_FUNCTION__)); | |||
454 | assert(isInstructionTriviallyDead(&I, TLI) &&((isInstructionTriviallyDead(&I, TLI) && "Live instruction found in dead worklist!" ) ? static_cast<void> (0) : __assert_fail ("isInstructionTriviallyDead(&I, TLI) && \"Live instruction found in dead worklist!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 455, __PRETTY_FUNCTION__)) | |||
455 | "Live instruction found in dead worklist!")((isInstructionTriviallyDead(&I, TLI) && "Live instruction found in dead worklist!" ) ? static_cast<void> (0) : __assert_fail ("isInstructionTriviallyDead(&I, TLI) && \"Live instruction found in dead worklist!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 455, __PRETTY_FUNCTION__)); | |||
456 | ||||
457 | // Don't lose the debug info while deleting the instructions. | |||
458 | salvageDebugInfo(I); | |||
459 | ||||
460 | // Null out all of the instruction's operands to see if any operand becomes | |||
461 | // dead as we go. | |||
462 | for (Use &OpU : I.operands()) { | |||
463 | Value *OpV = OpU.get(); | |||
464 | OpU.set(nullptr); | |||
465 | ||||
466 | if (!OpV->use_empty()) | |||
467 | continue; | |||
468 | ||||
469 | // If the operand is an instruction that became dead as we nulled out the | |||
470 | // operand, and if it is 'trivially' dead, delete it in a future loop | |||
471 | // iteration. | |||
472 | if (Instruction *OpI = dyn_cast<Instruction>(OpV)) | |||
473 | if (isInstructionTriviallyDead(OpI, TLI)) | |||
474 | DeadInsts.push_back(OpI); | |||
475 | } | |||
476 | if (MSSAU) | |||
477 | MSSAU->removeMemoryAccess(&I); | |||
478 | ||||
479 | I.eraseFromParent(); | |||
480 | } | |||
481 | } | |||
482 | ||||
483 | bool llvm::replaceDbgUsesWithUndef(Instruction *I) { | |||
484 | SmallVector<DbgVariableIntrinsic *, 1> DbgUsers; | |||
485 | findDbgUsers(DbgUsers, I); | |||
486 | for (auto *DII : DbgUsers) { | |||
487 | Value *Undef = UndefValue::get(I->getType()); | |||
488 | DII->setOperand(0, MetadataAsValue::get(DII->getContext(), | |||
489 | ValueAsMetadata::get(Undef))); | |||
490 | } | |||
491 | return !DbgUsers.empty(); | |||
492 | } | |||
493 | ||||
494 | /// areAllUsesEqual - Check whether the uses of a value are all the same. | |||
495 | /// This is similar to Instruction::hasOneUse() except this will also return | |||
496 | /// true when there are no uses or multiple uses that all refer to the same | |||
497 | /// value. | |||
498 | static bool areAllUsesEqual(Instruction *I) { | |||
499 | Value::user_iterator UI = I->user_begin(); | |||
500 | Value::user_iterator UE = I->user_end(); | |||
501 | if (UI == UE) | |||
502 | return true; | |||
503 | ||||
504 | User *TheUse = *UI; | |||
505 | for (++UI; UI != UE; ++UI) { | |||
506 | if (*UI != TheUse) | |||
507 | return false; | |||
508 | } | |||
509 | return true; | |||
510 | } | |||
511 | ||||
512 | /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively | |||
513 | /// dead PHI node, due to being a def-use chain of single-use nodes that | |||
514 | /// either forms a cycle or is terminated by a trivially dead instruction, | |||
515 | /// delete it. If that makes any of its operands trivially dead, delete them | |||
516 | /// too, recursively. Return true if a change was made. | |||
517 | bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN, | |||
518 | const TargetLibraryInfo *TLI) { | |||
519 | SmallPtrSet<Instruction*, 4> Visited; | |||
520 | for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects(); | |||
521 | I = cast<Instruction>(*I->user_begin())) { | |||
522 | if (I->use_empty()) | |||
523 | return RecursivelyDeleteTriviallyDeadInstructions(I, TLI); | |||
524 | ||||
525 | // If we find an instruction more than once, we're on a cycle that | |||
526 | // won't prove fruitful. | |||
527 | if (!Visited.insert(I).second) { | |||
528 | // Break the cycle and delete the instruction and its operands. | |||
529 | I->replaceAllUsesWith(UndefValue::get(I->getType())); | |||
530 | (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI); | |||
531 | return true; | |||
532 | } | |||
533 | } | |||
534 | return false; | |||
535 | } | |||
536 | ||||
537 | static bool | |||
538 | simplifyAndDCEInstruction(Instruction *I, | |||
539 | SmallSetVector<Instruction *, 16> &WorkList, | |||
540 | const DataLayout &DL, | |||
541 | const TargetLibraryInfo *TLI) { | |||
542 | if (isInstructionTriviallyDead(I, TLI)) { | |||
543 | salvageDebugInfo(*I); | |||
544 | ||||
545 | // Null out all of the instruction's operands to see if any operand becomes | |||
546 | // dead as we go. | |||
547 | for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { | |||
548 | Value *OpV = I->getOperand(i); | |||
549 | I->setOperand(i, nullptr); | |||
550 | ||||
551 | if (!OpV->use_empty() || I == OpV) | |||
552 | continue; | |||
553 | ||||
554 | // If the operand is an instruction that became dead as we nulled out the | |||
555 | // operand, and if it is 'trivially' dead, delete it in a future loop | |||
556 | // iteration. | |||
557 | if (Instruction *OpI = dyn_cast<Instruction>(OpV)) | |||
558 | if (isInstructionTriviallyDead(OpI, TLI)) | |||
559 | WorkList.insert(OpI); | |||
560 | } | |||
561 | ||||
562 | I->eraseFromParent(); | |||
563 | ||||
564 | return true; | |||
565 | } | |||
566 | ||||
567 | if (Value *SimpleV = SimplifyInstruction(I, DL)) { | |||
568 | // Add the users to the worklist. CAREFUL: an instruction can use itself, | |||
569 | // in the case of a phi node. | |||
570 | for (User *U : I->users()) { | |||
571 | if (U != I) { | |||
572 | WorkList.insert(cast<Instruction>(U)); | |||
573 | } | |||
574 | } | |||
575 | ||||
576 | // Replace the instruction with its simplified value. | |||
577 | bool Changed = false; | |||
578 | if (!I->use_empty()) { | |||
579 | I->replaceAllUsesWith(SimpleV); | |||
580 | Changed = true; | |||
581 | } | |||
582 | if (isInstructionTriviallyDead(I, TLI)) { | |||
583 | I->eraseFromParent(); | |||
584 | Changed = true; | |||
585 | } | |||
586 | return Changed; | |||
587 | } | |||
588 | return false; | |||
589 | } | |||
590 | ||||
591 | /// SimplifyInstructionsInBlock - Scan the specified basic block and try to | |||
592 | /// simplify any instructions in it and recursively delete dead instructions. | |||
593 | /// | |||
594 | /// This returns true if it changed the code, note that it can delete | |||
595 | /// instructions in other blocks as well in this block. | |||
596 | bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB, | |||
597 | const TargetLibraryInfo *TLI) { | |||
598 | bool MadeChange = false; | |||
599 | const DataLayout &DL = BB->getModule()->getDataLayout(); | |||
600 | ||||
601 | #ifndef NDEBUG | |||
602 | // In debug builds, ensure that the terminator of the block is never replaced | |||
603 | // or deleted by these simplifications. The idea of simplification is that it | |||
604 | // cannot introduce new instructions, and there is no way to replace the | |||
605 | // terminator of a block without introducing a new instruction. | |||
606 | AssertingVH<Instruction> TerminatorVH(&BB->back()); | |||
607 | #endif | |||
608 | ||||
609 | SmallSetVector<Instruction *, 16> WorkList; | |||
610 | // Iterate over the original function, only adding insts to the worklist | |||
611 | // if they actually need to be revisited. This avoids having to pre-init | |||
612 | // the worklist with the entire function's worth of instructions. | |||
613 | for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end()); | |||
614 | BI != E;) { | |||
615 | assert(!BI->isTerminator())((!BI->isTerminator()) ? static_cast<void> (0) : __assert_fail ("!BI->isTerminator()", "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 615, __PRETTY_FUNCTION__)); | |||
616 | Instruction *I = &*BI; | |||
617 | ++BI; | |||
618 | ||||
619 | // We're visiting this instruction now, so make sure it's not in the | |||
620 | // worklist from an earlier visit. | |||
621 | if (!WorkList.count(I)) | |||
622 | MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI); | |||
623 | } | |||
624 | ||||
625 | while (!WorkList.empty()) { | |||
626 | Instruction *I = WorkList.pop_back_val(); | |||
627 | MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI); | |||
628 | } | |||
629 | return MadeChange; | |||
630 | } | |||
631 | ||||
632 | //===----------------------------------------------------------------------===// | |||
633 | // Control Flow Graph Restructuring. | |||
634 | // | |||
635 | ||||
636 | /// RemovePredecessorAndSimplify - Like BasicBlock::removePredecessor, this | |||
637 | /// method is called when we're about to delete Pred as a predecessor of BB. If | |||
638 | /// BB contains any PHI nodes, this drops the entries in the PHI nodes for Pred. | |||
639 | /// | |||
640 | /// Unlike the removePredecessor method, this attempts to simplify uses of PHI | |||
641 | /// nodes that collapse into identity values. For example, if we have: | |||
642 | /// x = phi(1, 0, 0, 0) | |||
643 | /// y = and x, z | |||
644 | /// | |||
645 | /// .. and delete the predecessor corresponding to the '1', this will attempt to | |||
646 | /// recursively fold the and to 0. | |||
647 | void llvm::RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred, | |||
648 | DomTreeUpdater *DTU) { | |||
649 | // This only adjusts blocks with PHI nodes. | |||
650 | if (!isa<PHINode>(BB->begin())) | |||
651 | return; | |||
652 | ||||
653 | // Remove the entries for Pred from the PHI nodes in BB, but do not simplify | |||
654 | // them down. This will leave us with single entry phi nodes and other phis | |||
655 | // that can be removed. | |||
656 | BB->removePredecessor(Pred, true); | |||
657 | ||||
658 | WeakTrackingVH PhiIt = &BB->front(); | |||
659 | while (PHINode *PN = dyn_cast<PHINode>(PhiIt)) { | |||
660 | PhiIt = &*++BasicBlock::iterator(cast<Instruction>(PhiIt)); | |||
661 | Value *OldPhiIt = PhiIt; | |||
662 | ||||
663 | if (!recursivelySimplifyInstruction(PN)) | |||
664 | continue; | |||
665 | ||||
666 | // If recursive simplification ended up deleting the next PHI node we would | |||
667 | // iterate to, then our iterator is invalid, restart scanning from the top | |||
668 | // of the block. | |||
669 | if (PhiIt != OldPhiIt) PhiIt = &BB->front(); | |||
670 | } | |||
671 | if (DTU) | |||
672 | DTU->applyUpdatesPermissive({{DominatorTree::Delete, Pred, BB}}); | |||
673 | } | |||
674 | ||||
675 | /// MergeBasicBlockIntoOnlyPred - DestBB is a block with one predecessor and its | |||
676 | /// predecessor is known to have one successor (DestBB!). Eliminate the edge | |||
677 | /// between them, moving the instructions in the predecessor into DestBB and | |||
678 | /// deleting the predecessor block. | |||
679 | void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB, | |||
680 | DomTreeUpdater *DTU) { | |||
681 | ||||
682 | // If BB has single-entry PHI nodes, fold them. | |||
683 | while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) { | |||
684 | Value *NewVal = PN->getIncomingValue(0); | |||
685 | // Replace self referencing PHI with undef, it must be dead. | |||
686 | if (NewVal == PN) NewVal = UndefValue::get(PN->getType()); | |||
687 | PN->replaceAllUsesWith(NewVal); | |||
688 | PN->eraseFromParent(); | |||
689 | } | |||
690 | ||||
691 | BasicBlock *PredBB = DestBB->getSinglePredecessor(); | |||
692 | assert(PredBB && "Block doesn't have a single predecessor!")((PredBB && "Block doesn't have a single predecessor!" ) ? static_cast<void> (0) : __assert_fail ("PredBB && \"Block doesn't have a single predecessor!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 692, __PRETTY_FUNCTION__)); | |||
693 | ||||
694 | bool ReplaceEntryBB = false; | |||
695 | if (PredBB == &DestBB->getParent()->getEntryBlock()) | |||
696 | ReplaceEntryBB = true; | |||
697 | ||||
698 | // DTU updates: Collect all the edges that enter | |||
699 | // PredBB. These dominator edges will be redirected to DestBB. | |||
700 | SmallVector<DominatorTree::UpdateType, 32> Updates; | |||
701 | ||||
702 | if (DTU) { | |||
703 | Updates.push_back({DominatorTree::Delete, PredBB, DestBB}); | |||
704 | for (auto I = pred_begin(PredBB), E = pred_end(PredBB); I != E; ++I) { | |||
705 | Updates.push_back({DominatorTree::Delete, *I, PredBB}); | |||
706 | // This predecessor of PredBB may already have DestBB as a successor. | |||
707 | if (llvm::find(successors(*I), DestBB) == succ_end(*I)) | |||
708 | Updates.push_back({DominatorTree::Insert, *I, DestBB}); | |||
709 | } | |||
710 | } | |||
711 | ||||
712 | // Zap anything that took the address of DestBB. Not doing this will give the | |||
713 | // address an invalid value. | |||
714 | if (DestBB->hasAddressTaken()) { | |||
715 | BlockAddress *BA = BlockAddress::get(DestBB); | |||
716 | Constant *Replacement = | |||
717 | ConstantInt::get(Type::getInt32Ty(BA->getContext()), 1); | |||
718 | BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement, | |||
719 | BA->getType())); | |||
720 | BA->destroyConstant(); | |||
721 | } | |||
722 | ||||
723 | // Anything that branched to PredBB now branches to DestBB. | |||
724 | PredBB->replaceAllUsesWith(DestBB); | |||
725 | ||||
726 | // Splice all the instructions from PredBB to DestBB. | |||
727 | PredBB->getTerminator()->eraseFromParent(); | |||
728 | DestBB->getInstList().splice(DestBB->begin(), PredBB->getInstList()); | |||
729 | new UnreachableInst(PredBB->getContext(), PredBB); | |||
730 | ||||
731 | // If the PredBB is the entry block of the function, move DestBB up to | |||
732 | // become the entry block after we erase PredBB. | |||
733 | if (ReplaceEntryBB) | |||
734 | DestBB->moveAfter(PredBB); | |||
735 | ||||
736 | if (DTU) { | |||
737 | assert(PredBB->getInstList().size() == 1 &&((PredBB->getInstList().size() == 1 && isa<UnreachableInst >(PredBB->getTerminator()) && "The successor list of PredBB isn't empty before " "applying corresponding DTU updates.") ? static_cast<void > (0) : __assert_fail ("PredBB->getInstList().size() == 1 && isa<UnreachableInst>(PredBB->getTerminator()) && \"The successor list of PredBB isn't empty before \" \"applying corresponding DTU updates.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 740, __PRETTY_FUNCTION__)) | |||
738 | isa<UnreachableInst>(PredBB->getTerminator()) &&((PredBB->getInstList().size() == 1 && isa<UnreachableInst >(PredBB->getTerminator()) && "The successor list of PredBB isn't empty before " "applying corresponding DTU updates.") ? static_cast<void > (0) : __assert_fail ("PredBB->getInstList().size() == 1 && isa<UnreachableInst>(PredBB->getTerminator()) && \"The successor list of PredBB isn't empty before \" \"applying corresponding DTU updates.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 740, __PRETTY_FUNCTION__)) | |||
739 | "The successor list of PredBB isn't empty before "((PredBB->getInstList().size() == 1 && isa<UnreachableInst >(PredBB->getTerminator()) && "The successor list of PredBB isn't empty before " "applying corresponding DTU updates.") ? static_cast<void > (0) : __assert_fail ("PredBB->getInstList().size() == 1 && isa<UnreachableInst>(PredBB->getTerminator()) && \"The successor list of PredBB isn't empty before \" \"applying corresponding DTU updates.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 740, __PRETTY_FUNCTION__)) | |||
740 | "applying corresponding DTU updates.")((PredBB->getInstList().size() == 1 && isa<UnreachableInst >(PredBB->getTerminator()) && "The successor list of PredBB isn't empty before " "applying corresponding DTU updates.") ? static_cast<void > (0) : __assert_fail ("PredBB->getInstList().size() == 1 && isa<UnreachableInst>(PredBB->getTerminator()) && \"The successor list of PredBB isn't empty before \" \"applying corresponding DTU updates.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 740, __PRETTY_FUNCTION__)); | |||
741 | DTU->applyUpdatesPermissive(Updates); | |||
742 | DTU->deleteBB(PredBB); | |||
743 | // Recalculation of DomTree is needed when updating a forward DomTree and | |||
744 | // the Entry BB is replaced. | |||
745 | if (ReplaceEntryBB && DTU->hasDomTree()) { | |||
746 | // The entry block was removed and there is no external interface for | |||
747 | // the dominator tree to be notified of this change. In this corner-case | |||
748 | // we recalculate the entire tree. | |||
749 | DTU->recalculate(*(DestBB->getParent())); | |||
750 | } | |||
751 | } | |||
752 | ||||
753 | else { | |||
754 | PredBB->eraseFromParent(); // Nuke BB if DTU is nullptr. | |||
755 | } | |||
756 | } | |||
757 | ||||
758 | /// CanMergeValues - Return true if we can choose one of these values to use | |||
759 | /// in place of the other. Note that we will always choose the non-undef | |||
760 | /// value to keep. | |||
761 | static bool CanMergeValues(Value *First, Value *Second) { | |||
762 | return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second); | |||
763 | } | |||
764 | ||||
765 | /// CanPropagatePredecessorsForPHIs - Return true if we can fold BB, an | |||
766 | /// almost-empty BB ending in an unconditional branch to Succ, into Succ. | |||
767 | /// | |||
768 | /// Assumption: Succ is the single successor for BB. | |||
769 | static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) { | |||
770 | assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!")((*succ_begin(BB) == Succ && "Succ is not successor of BB!" ) ? static_cast<void> (0) : __assert_fail ("*succ_begin(BB) == Succ && \"Succ is not successor of BB!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 770, __PRETTY_FUNCTION__)); | |||
771 | ||||
772 | LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("local")) { dbgs() << "Looking to fold " << BB-> getName() << " into " << Succ->getName() << "\n"; } } while (false) | |||
773 | << Succ->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("local")) { dbgs() << "Looking to fold " << BB-> getName() << " into " << Succ->getName() << "\n"; } } while (false); | |||
774 | // Shortcut, if there is only a single predecessor it must be BB and merging | |||
775 | // is always safe | |||
776 | if (Succ->getSinglePredecessor()) return true; | |||
777 | ||||
778 | // Make a list of the predecessors of BB | |||
779 | SmallPtrSet<BasicBlock*, 16> BBPreds(pred_begin(BB), pred_end(BB)); | |||
780 | ||||
781 | // Look at all the phi nodes in Succ, to see if they present a conflict when | |||
782 | // merging these blocks | |||
783 | for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) { | |||
784 | PHINode *PN = cast<PHINode>(I); | |||
785 | ||||
786 | // If the incoming value from BB is again a PHINode in | |||
787 | // BB which has the same incoming value for *PI as PN does, we can | |||
788 | // merge the phi nodes and then the blocks can still be merged | |||
789 | PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB)); | |||
790 | if (BBPN && BBPN->getParent() == BB) { | |||
791 | for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) { | |||
792 | BasicBlock *IBB = PN->getIncomingBlock(PI); | |||
793 | if (BBPreds.count(IBB) && | |||
794 | !CanMergeValues(BBPN->getIncomingValueForBlock(IBB), | |||
795 | PN->getIncomingValue(PI))) { | |||
796 | LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("local")) { dbgs() << "Can't fold, phi node " << PN->getName() << " in " << Succ->getName() << " is conflicting with " << BBPN->getName() << " with regard to common predecessor " << IBB-> getName() << "\n"; } } while (false) | |||
797 | << "Can't fold, phi node " << PN->getName() << " in "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("local")) { dbgs() << "Can't fold, phi node " << PN->getName() << " in " << Succ->getName() << " is conflicting with " << BBPN->getName() << " with regard to common predecessor " << IBB-> getName() << "\n"; } } while (false) | |||
798 | << Succ->getName() << " is conflicting with "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("local")) { dbgs() << "Can't fold, phi node " << PN->getName() << " in " << Succ->getName() << " is conflicting with " << BBPN->getName() << " with regard to common predecessor " << IBB-> getName() << "\n"; } } while (false) | |||
799 | << BBPN->getName() << " with regard to common predecessor "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("local")) { dbgs() << "Can't fold, phi node " << PN->getName() << " in " << Succ->getName() << " is conflicting with " << BBPN->getName() << " with regard to common predecessor " << IBB-> getName() << "\n"; } } while (false) | |||
800 | << IBB->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("local")) { dbgs() << "Can't fold, phi node " << PN->getName() << " in " << Succ->getName() << " is conflicting with " << BBPN->getName() << " with regard to common predecessor " << IBB-> getName() << "\n"; } } while (false); | |||
801 | return false; | |||
802 | } | |||
803 | } | |||
804 | } else { | |||
805 | Value* Val = PN->getIncomingValueForBlock(BB); | |||
806 | for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) { | |||
807 | // See if the incoming value for the common predecessor is equal to the | |||
808 | // one for BB, in which case this phi node will not prevent the merging | |||
809 | // of the block. | |||
810 | BasicBlock *IBB = PN->getIncomingBlock(PI); | |||
811 | if (BBPreds.count(IBB) && | |||
812 | !CanMergeValues(Val, PN->getIncomingValue(PI))) { | |||
813 | LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("local")) { dbgs() << "Can't fold, phi node " << PN->getName() << " in " << Succ->getName() << " is conflicting with regard to common " << "predecessor " << IBB->getName() << "\n"; } } while (false) | |||
814 | << " in " << Succ->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("local")) { dbgs() << "Can't fold, phi node " << PN->getName() << " in " << Succ->getName() << " is conflicting with regard to common " << "predecessor " << IBB->getName() << "\n"; } } while (false) | |||
815 | << " is conflicting with regard to common "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("local")) { dbgs() << "Can't fold, phi node " << PN->getName() << " in " << Succ->getName() << " is conflicting with regard to common " << "predecessor " << IBB->getName() << "\n"; } } while (false) | |||
816 | << "predecessor " << IBB->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("local")) { dbgs() << "Can't fold, phi node " << PN->getName() << " in " << Succ->getName() << " is conflicting with regard to common " << "predecessor " << IBB->getName() << "\n"; } } while (false); | |||
817 | return false; | |||
818 | } | |||
819 | } | |||
820 | } | |||
821 | } | |||
822 | ||||
823 | return true; | |||
824 | } | |||
825 | ||||
826 | using PredBlockVector = SmallVector<BasicBlock *, 16>; | |||
827 | using IncomingValueMap = DenseMap<BasicBlock *, Value *>; | |||
828 | ||||
829 | /// Determines the value to use as the phi node input for a block. | |||
830 | /// | |||
831 | /// Select between \p OldVal any value that we know flows from \p BB | |||
832 | /// to a particular phi on the basis of which one (if either) is not | |||
833 | /// undef. Update IncomingValues based on the selected value. | |||
834 | /// | |||
835 | /// \param OldVal The value we are considering selecting. | |||
836 | /// \param BB The block that the value flows in from. | |||
837 | /// \param IncomingValues A map from block-to-value for other phi inputs | |||
838 | /// that we have examined. | |||
839 | /// | |||
840 | /// \returns the selected value. | |||
841 | static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB, | |||
842 | IncomingValueMap &IncomingValues) { | |||
843 | if (!isa<UndefValue>(OldVal)) { | |||
844 | assert((!IncomingValues.count(BB) ||(((!IncomingValues.count(BB) || IncomingValues.find(BB)->second == OldVal) && "Expected OldVal to match incoming value from BB!" ) ? static_cast<void> (0) : __assert_fail ("(!IncomingValues.count(BB) || IncomingValues.find(BB)->second == OldVal) && \"Expected OldVal to match incoming value from BB!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 846, __PRETTY_FUNCTION__)) | |||
845 | IncomingValues.find(BB)->second == OldVal) &&(((!IncomingValues.count(BB) || IncomingValues.find(BB)->second == OldVal) && "Expected OldVal to match incoming value from BB!" ) ? static_cast<void> (0) : __assert_fail ("(!IncomingValues.count(BB) || IncomingValues.find(BB)->second == OldVal) && \"Expected OldVal to match incoming value from BB!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 846, __PRETTY_FUNCTION__)) | |||
846 | "Expected OldVal to match incoming value from BB!")(((!IncomingValues.count(BB) || IncomingValues.find(BB)->second == OldVal) && "Expected OldVal to match incoming value from BB!" ) ? static_cast<void> (0) : __assert_fail ("(!IncomingValues.count(BB) || IncomingValues.find(BB)->second == OldVal) && \"Expected OldVal to match incoming value from BB!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 846, __PRETTY_FUNCTION__)); | |||
847 | ||||
848 | IncomingValues.insert(std::make_pair(BB, OldVal)); | |||
849 | return OldVal; | |||
850 | } | |||
851 | ||||
852 | IncomingValueMap::const_iterator It = IncomingValues.find(BB); | |||
853 | if (It != IncomingValues.end()) return It->second; | |||
854 | ||||
855 | return OldVal; | |||
856 | } | |||
857 | ||||
858 | /// Create a map from block to value for the operands of a | |||
859 | /// given phi. | |||
860 | /// | |||
861 | /// Create a map from block to value for each non-undef value flowing | |||
862 | /// into \p PN. | |||
863 | /// | |||
864 | /// \param PN The phi we are collecting the map for. | |||
865 | /// \param IncomingValues [out] The map from block to value for this phi. | |||
866 | static void gatherIncomingValuesToPhi(PHINode *PN, | |||
867 | IncomingValueMap &IncomingValues) { | |||
868 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { | |||
869 | BasicBlock *BB = PN->getIncomingBlock(i); | |||
870 | Value *V = PN->getIncomingValue(i); | |||
871 | ||||
872 | if (!isa<UndefValue>(V)) | |||
873 | IncomingValues.insert(std::make_pair(BB, V)); | |||
874 | } | |||
875 | } | |||
876 | ||||
877 | /// Replace the incoming undef values to a phi with the values | |||
878 | /// from a block-to-value map. | |||
879 | /// | |||
880 | /// \param PN The phi we are replacing the undefs in. | |||
881 | /// \param IncomingValues A map from block to value. | |||
882 | static void replaceUndefValuesInPhi(PHINode *PN, | |||
883 | const IncomingValueMap &IncomingValues) { | |||
884 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { | |||
885 | Value *V = PN->getIncomingValue(i); | |||
886 | ||||
887 | if (!isa<UndefValue>(V)) continue; | |||
888 | ||||
889 | BasicBlock *BB = PN->getIncomingBlock(i); | |||
890 | IncomingValueMap::const_iterator It = IncomingValues.find(BB); | |||
891 | if (It == IncomingValues.end()) continue; | |||
892 | ||||
893 | PN->setIncomingValue(i, It->second); | |||
894 | } | |||
895 | } | |||
896 | ||||
897 | /// Replace a value flowing from a block to a phi with | |||
898 | /// potentially multiple instances of that value flowing from the | |||
899 | /// block's predecessors to the phi. | |||
900 | /// | |||
901 | /// \param BB The block with the value flowing into the phi. | |||
902 | /// \param BBPreds The predecessors of BB. | |||
903 | /// \param PN The phi that we are updating. | |||
904 | static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB, | |||
905 | const PredBlockVector &BBPreds, | |||
906 | PHINode *PN) { | |||
907 | Value *OldVal = PN->removeIncomingValue(BB, false); | |||
908 | assert(OldVal && "No entry in PHI for Pred BB!")((OldVal && "No entry in PHI for Pred BB!") ? static_cast <void> (0) : __assert_fail ("OldVal && \"No entry in PHI for Pred BB!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 908, __PRETTY_FUNCTION__)); | |||
909 | ||||
910 | IncomingValueMap IncomingValues; | |||
911 | ||||
912 | // We are merging two blocks - BB, and the block containing PN - and | |||
913 | // as a result we need to redirect edges from the predecessors of BB | |||
914 | // to go to the block containing PN, and update PN | |||
915 | // accordingly. Since we allow merging blocks in the case where the | |||
916 | // predecessor and successor blocks both share some predecessors, | |||
917 | // and where some of those common predecessors might have undef | |||
918 | // values flowing into PN, we want to rewrite those values to be | |||
919 | // consistent with the non-undef values. | |||
920 | ||||
921 | gatherIncomingValuesToPhi(PN, IncomingValues); | |||
922 | ||||
923 | // If this incoming value is one of the PHI nodes in BB, the new entries | |||
924 | // in the PHI node are the entries from the old PHI. | |||
925 | if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) { | |||
926 | PHINode *OldValPN = cast<PHINode>(OldVal); | |||
927 | for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) { | |||
928 | // Note that, since we are merging phi nodes and BB and Succ might | |||
929 | // have common predecessors, we could end up with a phi node with | |||
930 | // identical incoming branches. This will be cleaned up later (and | |||
931 | // will trigger asserts if we try to clean it up now, without also | |||
932 | // simplifying the corresponding conditional branch). | |||
933 | BasicBlock *PredBB = OldValPN->getIncomingBlock(i); | |||
934 | Value *PredVal = OldValPN->getIncomingValue(i); | |||
935 | Value *Selected = selectIncomingValueForBlock(PredVal, PredBB, | |||
936 | IncomingValues); | |||
937 | ||||
938 | // And add a new incoming value for this predecessor for the | |||
939 | // newly retargeted branch. | |||
940 | PN->addIncoming(Selected, PredBB); | |||
941 | } | |||
942 | } else { | |||
943 | for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) { | |||
944 | // Update existing incoming values in PN for this | |||
945 | // predecessor of BB. | |||
946 | BasicBlock *PredBB = BBPreds[i]; | |||
947 | Value *Selected = selectIncomingValueForBlock(OldVal, PredBB, | |||
948 | IncomingValues); | |||
949 | ||||
950 | // And add a new incoming value for this predecessor for the | |||
951 | // newly retargeted branch. | |||
952 | PN->addIncoming(Selected, PredBB); | |||
953 | } | |||
954 | } | |||
955 | ||||
956 | replaceUndefValuesInPhi(PN, IncomingValues); | |||
957 | } | |||
958 | ||||
959 | /// TryToSimplifyUncondBranchFromEmptyBlock - BB is known to contain an | |||
960 | /// unconditional branch, and contains no instructions other than PHI nodes, | |||
961 | /// potential side-effect free intrinsics and the branch. If possible, | |||
962 | /// eliminate BB by rewriting all the predecessors to branch to the successor | |||
963 | /// block and return true. If we can't transform, return false. | |||
964 | bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB, | |||
965 | DomTreeUpdater *DTU) { | |||
966 | assert(BB != &BB->getParent()->getEntryBlock() &&((BB != &BB->getParent()->getEntryBlock() && "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!" ) ? static_cast<void> (0) : __assert_fail ("BB != &BB->getParent()->getEntryBlock() && \"TryToSimplifyUncondBranchFromEmptyBlock called on entry block!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 967, __PRETTY_FUNCTION__)) | |||
967 | "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!")((BB != &BB->getParent()->getEntryBlock() && "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!" ) ? static_cast<void> (0) : __assert_fail ("BB != &BB->getParent()->getEntryBlock() && \"TryToSimplifyUncondBranchFromEmptyBlock called on entry block!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 967, __PRETTY_FUNCTION__)); | |||
968 | ||||
969 | // We can't eliminate infinite loops. | |||
970 | BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0); | |||
971 | if (BB == Succ) return false; | |||
972 | ||||
973 | // Check to see if merging these blocks would cause conflicts for any of the | |||
974 | // phi nodes in BB or Succ. If not, we can safely merge. | |||
975 | if (!CanPropagatePredecessorsForPHIs(BB, Succ)) return false; | |||
976 | ||||
977 | // Check for cases where Succ has multiple predecessors and a PHI node in BB | |||
978 | // has uses which will not disappear when the PHI nodes are merged. It is | |||
979 | // possible to handle such cases, but difficult: it requires checking whether | |||
980 | // BB dominates Succ, which is non-trivial to calculate in the case where | |||
981 | // Succ has multiple predecessors. Also, it requires checking whether | |||
982 | // constructing the necessary self-referential PHI node doesn't introduce any | |||
983 | // conflicts; this isn't too difficult, but the previous code for doing this | |||
984 | // was incorrect. | |||
985 | // | |||
986 | // Note that if this check finds a live use, BB dominates Succ, so BB is | |||
987 | // something like a loop pre-header (or rarely, a part of an irreducible CFG); | |||
988 | // folding the branch isn't profitable in that case anyway. | |||
989 | if (!Succ->getSinglePredecessor()) { | |||
990 | BasicBlock::iterator BBI = BB->begin(); | |||
991 | while (isa<PHINode>(*BBI)) { | |||
992 | for (Use &U : BBI->uses()) { | |||
993 | if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) { | |||
994 | if (PN->getIncomingBlock(U) != BB) | |||
995 | return false; | |||
996 | } else { | |||
997 | return false; | |||
998 | } | |||
999 | } | |||
1000 | ++BBI; | |||
1001 | } | |||
1002 | } | |||
1003 | ||||
1004 | // We cannot fold the block if it's a branch to an already present callbr | |||
1005 | // successor because that creates duplicate successors. | |||
1006 | for (auto I = pred_begin(BB), E = pred_end(BB); I != E; ++I) { | |||
1007 | if (auto *CBI = dyn_cast<CallBrInst>((*I)->getTerminator())) { | |||
1008 | if (Succ == CBI->getDefaultDest()) | |||
1009 | return false; | |||
1010 | for (unsigned i = 0, e = CBI->getNumIndirectDests(); i != e; ++i) | |||
1011 | if (Succ == CBI->getIndirectDest(i)) | |||
1012 | return false; | |||
1013 | } | |||
1014 | } | |||
1015 | ||||
1016 | LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("local")) { dbgs() << "Killing Trivial BB: \n" << *BB; } } while (false); | |||
1017 | ||||
1018 | SmallVector<DominatorTree::UpdateType, 32> Updates; | |||
1019 | if (DTU) { | |||
1020 | Updates.push_back({DominatorTree::Delete, BB, Succ}); | |||
1021 | // All predecessors of BB will be moved to Succ. | |||
1022 | for (auto I = pred_begin(BB), E = pred_end(BB); I != E; ++I) { | |||
1023 | Updates.push_back({DominatorTree::Delete, *I, BB}); | |||
1024 | // This predecessor of BB may already have Succ as a successor. | |||
1025 | if (llvm::find(successors(*I), Succ) == succ_end(*I)) | |||
1026 | Updates.push_back({DominatorTree::Insert, *I, Succ}); | |||
1027 | } | |||
1028 | } | |||
1029 | ||||
1030 | if (isa<PHINode>(Succ->begin())) { | |||
1031 | // If there is more than one pred of succ, and there are PHI nodes in | |||
1032 | // the successor, then we need to add incoming edges for the PHI nodes | |||
1033 | // | |||
1034 | const PredBlockVector BBPreds(pred_begin(BB), pred_end(BB)); | |||
1035 | ||||
1036 | // Loop over all of the PHI nodes in the successor of BB. | |||
1037 | for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) { | |||
1038 | PHINode *PN = cast<PHINode>(I); | |||
1039 | ||||
1040 | redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN); | |||
1041 | } | |||
1042 | } | |||
1043 | ||||
1044 | if (Succ->getSinglePredecessor()) { | |||
1045 | // BB is the only predecessor of Succ, so Succ will end up with exactly | |||
1046 | // the same predecessors BB had. | |||
1047 | ||||
1048 | // Copy over any phi, debug or lifetime instruction. | |||
1049 | BB->getTerminator()->eraseFromParent(); | |||
1050 | Succ->getInstList().splice(Succ->getFirstNonPHI()->getIterator(), | |||
1051 | BB->getInstList()); | |||
1052 | } else { | |||
1053 | while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) { | |||
1054 | // We explicitly check for such uses in CanPropagatePredecessorsForPHIs. | |||
1055 | assert(PN->use_empty() && "There shouldn't be any uses here!")((PN->use_empty() && "There shouldn't be any uses here!" ) ? static_cast<void> (0) : __assert_fail ("PN->use_empty() && \"There shouldn't be any uses here!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 1055, __PRETTY_FUNCTION__)); | |||
1056 | PN->eraseFromParent(); | |||
1057 | } | |||
1058 | } | |||
1059 | ||||
1060 | // If the unconditional branch we replaced contains llvm.loop metadata, we | |||
1061 | // add the metadata to the branch instructions in the predecessors. | |||
1062 | unsigned LoopMDKind = BB->getContext().getMDKindID("llvm.loop"); | |||
1063 | Instruction *TI = BB->getTerminator(); | |||
1064 | if (TI) | |||
1065 | if (MDNode *LoopMD = TI->getMetadata(LoopMDKind)) | |||
1066 | for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) { | |||
1067 | BasicBlock *Pred = *PI; | |||
1068 | Pred->getTerminator()->setMetadata(LoopMDKind, LoopMD); | |||
1069 | } | |||
1070 | ||||
1071 | // Everything that jumped to BB now goes to Succ. | |||
1072 | BB->replaceAllUsesWith(Succ); | |||
1073 | if (!Succ->hasName()) Succ->takeName(BB); | |||
1074 | ||||
1075 | // Clear the successor list of BB to match updates applying to DTU later. | |||
1076 | if (BB->getTerminator()) | |||
1077 | BB->getInstList().pop_back(); | |||
1078 | new UnreachableInst(BB->getContext(), BB); | |||
1079 | assert(succ_empty(BB) && "The successor list of BB isn't empty before "((succ_empty(BB) && "The successor list of BB isn't empty before " "applying corresponding DTU updates.") ? static_cast<void > (0) : __assert_fail ("succ_empty(BB) && \"The successor list of BB isn't empty before \" \"applying corresponding DTU updates.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 1080, __PRETTY_FUNCTION__)) | |||
1080 | "applying corresponding DTU updates.")((succ_empty(BB) && "The successor list of BB isn't empty before " "applying corresponding DTU updates.") ? static_cast<void > (0) : __assert_fail ("succ_empty(BB) && \"The successor list of BB isn't empty before \" \"applying corresponding DTU updates.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 1080, __PRETTY_FUNCTION__)); | |||
1081 | ||||
1082 | if (DTU) { | |||
1083 | DTU->applyUpdatesPermissive(Updates); | |||
1084 | DTU->deleteBB(BB); | |||
1085 | } else { | |||
1086 | BB->eraseFromParent(); // Delete the old basic block. | |||
1087 | } | |||
1088 | return true; | |||
1089 | } | |||
1090 | ||||
1091 | /// EliminateDuplicatePHINodes - Check for and eliminate duplicate PHI | |||
1092 | /// nodes in this block. This doesn't try to be clever about PHI nodes | |||
1093 | /// which differ only in the order of the incoming values, but instcombine | |||
1094 | /// orders them so it usually won't matter. | |||
1095 | bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) { | |||
1096 | // This implementation doesn't currently consider undef operands | |||
1097 | // specially. Theoretically, two phis which are identical except for | |||
1098 | // one having an undef where the other doesn't could be collapsed. | |||
1099 | ||||
1100 | struct PHIDenseMapInfo { | |||
1101 | static PHINode *getEmptyKey() { | |||
1102 | return DenseMapInfo<PHINode *>::getEmptyKey(); | |||
1103 | } | |||
1104 | ||||
1105 | static PHINode *getTombstoneKey() { | |||
1106 | return DenseMapInfo<PHINode *>::getTombstoneKey(); | |||
1107 | } | |||
1108 | ||||
1109 | static unsigned getHashValue(PHINode *PN) { | |||
1110 | // Compute a hash value on the operands. Instcombine will likely have | |||
1111 | // sorted them, which helps expose duplicates, but we have to check all | |||
1112 | // the operands to be safe in case instcombine hasn't run. | |||
1113 | return static_cast<unsigned>(hash_combine( | |||
1114 | hash_combine_range(PN->value_op_begin(), PN->value_op_end()), | |||
1115 | hash_combine_range(PN->block_begin(), PN->block_end()))); | |||
1116 | } | |||
1117 | ||||
1118 | static bool isEqual(PHINode *LHS, PHINode *RHS) { | |||
1119 | if (LHS == getEmptyKey() || LHS == getTombstoneKey() || | |||
1120 | RHS == getEmptyKey() || RHS == getTombstoneKey()) | |||
1121 | return LHS == RHS; | |||
1122 | return LHS->isIdenticalTo(RHS); | |||
1123 | } | |||
1124 | }; | |||
1125 | ||||
1126 | // Set of unique PHINodes. | |||
1127 | DenseSet<PHINode *, PHIDenseMapInfo> PHISet; | |||
1128 | ||||
1129 | // Examine each PHI. | |||
1130 | bool Changed = false; | |||
1131 | for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) { | |||
1132 | auto Inserted = PHISet.insert(PN); | |||
1133 | if (!Inserted.second) { | |||
1134 | // A duplicate. Replace this PHI with its duplicate. | |||
1135 | PN->replaceAllUsesWith(*Inserted.first); | |||
1136 | PN->eraseFromParent(); | |||
1137 | Changed = true; | |||
1138 | ||||
1139 | // The RAUW can change PHIs that we already visited. Start over from the | |||
1140 | // beginning. | |||
1141 | PHISet.clear(); | |||
1142 | I = BB->begin(); | |||
1143 | } | |||
1144 | } | |||
1145 | ||||
1146 | return Changed; | |||
1147 | } | |||
1148 | ||||
1149 | /// enforceKnownAlignment - If the specified pointer points to an object that | |||
1150 | /// we control, modify the object's alignment to PrefAlign. This isn't | |||
1151 | /// often possible though. If alignment is important, a more reliable approach | |||
1152 | /// is to simply align all global variables and allocation instructions to | |||
1153 | /// their preferred alignment from the beginning. | |||
1154 | static unsigned enforceKnownAlignment(Value *V, unsigned Align, | |||
1155 | unsigned PrefAlign, | |||
1156 | const DataLayout &DL) { | |||
1157 | assert(PrefAlign > Align)((PrefAlign > Align) ? static_cast<void> (0) : __assert_fail ("PrefAlign > Align", "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 1157, __PRETTY_FUNCTION__)); | |||
1158 | ||||
1159 | V = V->stripPointerCasts(); | |||
1160 | ||||
1161 | if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) { | |||
1162 | // TODO: ideally, computeKnownBits ought to have used | |||
1163 | // AllocaInst::getAlignment() in its computation already, making | |||
1164 | // the below max redundant. But, as it turns out, | |||
1165 | // stripPointerCasts recurses through infinite layers of bitcasts, | |||
1166 | // while computeKnownBits is not allowed to traverse more than 6 | |||
1167 | // levels. | |||
1168 | Align = std::max(AI->getAlignment(), Align); | |||
1169 | if (PrefAlign <= Align) | |||
1170 | return Align; | |||
1171 | ||||
1172 | // If the preferred alignment is greater than the natural stack alignment | |||
1173 | // then don't round up. This avoids dynamic stack realignment. | |||
1174 | if (DL.exceedsNaturalStackAlignment(PrefAlign)) | |||
1175 | return Align; | |||
1176 | AI->setAlignment(PrefAlign); | |||
1177 | return PrefAlign; | |||
1178 | } | |||
1179 | ||||
1180 | if (auto *GO = dyn_cast<GlobalObject>(V)) { | |||
1181 | // TODO: as above, this shouldn't be necessary. | |||
1182 | Align = std::max(GO->getAlignment(), Align); | |||
1183 | if (PrefAlign <= Align) | |||
1184 | return Align; | |||
1185 | ||||
1186 | // If there is a large requested alignment and we can, bump up the alignment | |||
1187 | // of the global. If the memory we set aside for the global may not be the | |||
1188 | // memory used by the final program then it is impossible for us to reliably | |||
1189 | // enforce the preferred alignment. | |||
1190 | if (!GO->canIncreaseAlignment()) | |||
1191 | return Align; | |||
1192 | ||||
1193 | GO->setAlignment(PrefAlign); | |||
1194 | return PrefAlign; | |||
1195 | } | |||
1196 | ||||
1197 | return Align; | |||
1198 | } | |||
1199 | ||||
1200 | unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign, | |||
1201 | const DataLayout &DL, | |||
1202 | const Instruction *CxtI, | |||
1203 | AssumptionCache *AC, | |||
1204 | const DominatorTree *DT) { | |||
1205 | assert(V->getType()->isPointerTy() &&((V->getType()->isPointerTy() && "getOrEnforceKnownAlignment expects a pointer!" ) ? static_cast<void> (0) : __assert_fail ("V->getType()->isPointerTy() && \"getOrEnforceKnownAlignment expects a pointer!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 1206, __PRETTY_FUNCTION__)) | |||
1206 | "getOrEnforceKnownAlignment expects a pointer!")((V->getType()->isPointerTy() && "getOrEnforceKnownAlignment expects a pointer!" ) ? static_cast<void> (0) : __assert_fail ("V->getType()->isPointerTy() && \"getOrEnforceKnownAlignment expects a pointer!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 1206, __PRETTY_FUNCTION__)); | |||
1207 | ||||
1208 | KnownBits Known = computeKnownBits(V, DL, 0, AC, CxtI, DT); | |||
1209 | unsigned TrailZ = Known.countMinTrailingZeros(); | |||
1210 | ||||
1211 | // Avoid trouble with ridiculously large TrailZ values, such as | |||
1212 | // those computed from a null pointer. | |||
1213 | TrailZ = std::min(TrailZ, unsigned(sizeof(unsigned) * CHAR_BIT8 - 1)); | |||
1214 | ||||
1215 | unsigned Align = 1u << std::min(Known.getBitWidth() - 1, TrailZ); | |||
1216 | ||||
1217 | // LLVM doesn't support alignments larger than this currently. | |||
1218 | Align = std::min(Align, +Value::MaximumAlignment); | |||
1219 | ||||
1220 | if (PrefAlign > Align) | |||
1221 | Align = enforceKnownAlignment(V, Align, PrefAlign, DL); | |||
1222 | ||||
1223 | // We don't need to make any adjustment. | |||
1224 | return Align; | |||
1225 | } | |||
1226 | ||||
1227 | ///===---------------------------------------------------------------------===// | |||
1228 | /// Dbg Intrinsic utilities | |||
1229 | /// | |||
1230 | ||||
1231 | /// See if there is a dbg.value intrinsic for DIVar before I. | |||
1232 | static bool LdStHasDebugValue(DILocalVariable *DIVar, DIExpression *DIExpr, | |||
1233 | Instruction *I) { | |||
1234 | // Since we can't guarantee that the original dbg.declare instrinsic | |||
1235 | // is removed by LowerDbgDeclare(), we need to make sure that we are | |||
1236 | // not inserting the same dbg.value intrinsic over and over. | |||
1237 | BasicBlock::InstListType::iterator PrevI(I); | |||
1238 | if (PrevI != I->getParent()->getInstList().begin()) { | |||
1239 | --PrevI; | |||
1240 | if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(PrevI)) | |||
1241 | if (DVI->getValue() == I->getOperand(0) && | |||
1242 | DVI->getVariable() == DIVar && | |||
1243 | DVI->getExpression() == DIExpr) | |||
1244 | return true; | |||
1245 | } | |||
1246 | return false; | |||
1247 | } | |||
1248 | ||||
1249 | /// See if there is a dbg.value intrinsic for DIVar for the PHI node. | |||
1250 | static bool PhiHasDebugValue(DILocalVariable *DIVar, | |||
1251 | DIExpression *DIExpr, | |||
1252 | PHINode *APN) { | |||
1253 | // Since we can't guarantee that the original dbg.declare instrinsic | |||
1254 | // is removed by LowerDbgDeclare(), we need to make sure that we are | |||
1255 | // not inserting the same dbg.value intrinsic over and over. | |||
1256 | SmallVector<DbgValueInst *, 1> DbgValues; | |||
1257 | findDbgValues(DbgValues, APN); | |||
1258 | for (auto *DVI : DbgValues) { | |||
1259 | assert(DVI->getValue() == APN)((DVI->getValue() == APN) ? static_cast<void> (0) : __assert_fail ("DVI->getValue() == APN", "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 1259, __PRETTY_FUNCTION__)); | |||
1260 | if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr)) | |||
1261 | return true; | |||
1262 | } | |||
1263 | return false; | |||
1264 | } | |||
1265 | ||||
1266 | /// Check if the alloc size of \p ValTy is large enough to cover the variable | |||
1267 | /// (or fragment of the variable) described by \p DII. | |||
1268 | /// | |||
1269 | /// This is primarily intended as a helper for the different | |||
1270 | /// ConvertDebugDeclareToDebugValue functions. The dbg.declare/dbg.addr that is | |||
1271 | /// converted describes an alloca'd variable, so we need to use the | |||
1272 | /// alloc size of the value when doing the comparison. E.g. an i1 value will be | |||
1273 | /// identified as covering an n-bit fragment, if the store size of i1 is at | |||
1274 | /// least n bits. | |||
1275 | static bool valueCoversEntireFragment(Type *ValTy, DbgVariableIntrinsic *DII) { | |||
1276 | const DataLayout &DL = DII->getModule()->getDataLayout(); | |||
1277 | uint64_t ValueSize = DL.getTypeAllocSizeInBits(ValTy); | |||
1278 | if (auto FragmentSize = DII->getFragmentSizeInBits()) | |||
1279 | return ValueSize >= *FragmentSize; | |||
1280 | // We can't always calculate the size of the DI variable (e.g. if it is a | |||
1281 | // VLA). Try to use the size of the alloca that the dbg intrinsic describes | |||
1282 | // intead. | |||
1283 | if (DII->isAddressOfVariable()) | |||
1284 | if (auto *AI = dyn_cast_or_null<AllocaInst>(DII->getVariableLocation())) | |||
1285 | if (auto FragmentSize = AI->getAllocationSizeInBits(DL)) | |||
1286 | return ValueSize >= *FragmentSize; | |||
1287 | // Could not determine size of variable. Conservatively return false. | |||
1288 | return false; | |||
1289 | } | |||
1290 | ||||
1291 | /// Produce a DebugLoc to use for each dbg.declare/inst pair that are promoted | |||
1292 | /// to a dbg.value. Because no machine insts can come from debug intrinsics, | |||
1293 | /// only the scope and inlinedAt is significant. Zero line numbers are used in | |||
1294 | /// case this DebugLoc leaks into any adjacent instructions. | |||
1295 | static DebugLoc getDebugValueLoc(DbgVariableIntrinsic *DII, Instruction *Src) { | |||
1296 | // Original dbg.declare must have a location. | |||
1297 | DebugLoc DeclareLoc = DII->getDebugLoc(); | |||
1298 | MDNode *Scope = DeclareLoc.getScope(); | |||
1299 | DILocation *InlinedAt = DeclareLoc.getInlinedAt(); | |||
1300 | // Produce an unknown location with the correct scope / inlinedAt fields. | |||
1301 | return DebugLoc::get(0, 0, Scope, InlinedAt); | |||
1302 | } | |||
1303 | ||||
1304 | /// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value | |||
1305 | /// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic. | |||
1306 | void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, | |||
1307 | StoreInst *SI, DIBuilder &Builder) { | |||
1308 | assert(DII->isAddressOfVariable())((DII->isAddressOfVariable()) ? static_cast<void> (0 ) : __assert_fail ("DII->isAddressOfVariable()", "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 1308, __PRETTY_FUNCTION__)); | |||
1309 | auto *DIVar = DII->getVariable(); | |||
1310 | assert(DIVar && "Missing variable")((DIVar && "Missing variable") ? static_cast<void> (0) : __assert_fail ("DIVar && \"Missing variable\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 1310, __PRETTY_FUNCTION__)); | |||
1311 | auto *DIExpr = DII->getExpression(); | |||
1312 | Value *DV = SI->getValueOperand(); | |||
1313 | ||||
1314 | DebugLoc NewLoc = getDebugValueLoc(DII, SI); | |||
1315 | ||||
1316 | if (!valueCoversEntireFragment(DV->getType(), DII)) { | |||
1317 | // FIXME: If storing to a part of the variable described by the dbg.declare, | |||
1318 | // then we want to insert a dbg.value for the corresponding fragment. | |||
1319 | LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("local")) { dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DII << '\n'; } } while (false) | |||
1320 | << *DII << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("local")) { dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DII << '\n'; } } while (false); | |||
1321 | // For now, when there is a store to parts of the variable (but we do not | |||
1322 | // know which part) we insert an dbg.value instrinsic to indicate that we | |||
1323 | // know nothing about the variable's content. | |||
1324 | DV = UndefValue::get(DV->getType()); | |||
1325 | if (!LdStHasDebugValue(DIVar, DIExpr, SI)) | |||
1326 | Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc, SI); | |||
1327 | return; | |||
1328 | } | |||
1329 | ||||
1330 | if (!LdStHasDebugValue(DIVar, DIExpr, SI)) | |||
1331 | Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc, SI); | |||
1332 | } | |||
1333 | ||||
1334 | /// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value | |||
1335 | /// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic. | |||
1336 | void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, | |||
1337 | LoadInst *LI, DIBuilder &Builder) { | |||
1338 | auto *DIVar = DII->getVariable(); | |||
1339 | auto *DIExpr = DII->getExpression(); | |||
1340 | assert(DIVar && "Missing variable")((DIVar && "Missing variable") ? static_cast<void> (0) : __assert_fail ("DIVar && \"Missing variable\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 1340, __PRETTY_FUNCTION__)); | |||
1341 | ||||
1342 | if (LdStHasDebugValue(DIVar, DIExpr, LI)) | |||
1343 | return; | |||
1344 | ||||
1345 | if (!valueCoversEntireFragment(LI->getType(), DII)) { | |||
1346 | // FIXME: If only referring to a part of the variable described by the | |||
1347 | // dbg.declare, then we want to insert a dbg.value for the corresponding | |||
1348 | // fragment. | |||
1349 | LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("local")) { dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DII << '\n'; } } while (false) | |||
1350 | << *DII << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("local")) { dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DII << '\n'; } } while (false); | |||
1351 | return; | |||
1352 | } | |||
1353 | ||||
1354 | DebugLoc NewLoc = getDebugValueLoc(DII, nullptr); | |||
1355 | ||||
1356 | // We are now tracking the loaded value instead of the address. In the | |||
1357 | // future if multi-location support is added to the IR, it might be | |||
1358 | // preferable to keep tracking both the loaded value and the original | |||
1359 | // address in case the alloca can not be elided. | |||
1360 | Instruction *DbgValue = Builder.insertDbgValueIntrinsic( | |||
1361 | LI, DIVar, DIExpr, NewLoc, (Instruction *)nullptr); | |||
1362 | DbgValue->insertAfter(LI); | |||
1363 | } | |||
1364 | ||||
1365 | /// Inserts a llvm.dbg.value intrinsic after a phi that has an associated | |||
1366 | /// llvm.dbg.declare or llvm.dbg.addr intrinsic. | |||
1367 | void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, | |||
1368 | PHINode *APN, DIBuilder &Builder) { | |||
1369 | auto *DIVar = DII->getVariable(); | |||
1370 | auto *DIExpr = DII->getExpression(); | |||
1371 | assert(DIVar && "Missing variable")((DIVar && "Missing variable") ? static_cast<void> (0) : __assert_fail ("DIVar && \"Missing variable\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 1371, __PRETTY_FUNCTION__)); | |||
1372 | ||||
1373 | if (PhiHasDebugValue(DIVar, DIExpr, APN)) | |||
1374 | return; | |||
1375 | ||||
1376 | if (!valueCoversEntireFragment(APN->getType(), DII)) { | |||
1377 | // FIXME: If only referring to a part of the variable described by the | |||
1378 | // dbg.declare, then we want to insert a dbg.value for the corresponding | |||
1379 | // fragment. | |||
1380 | LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("local")) { dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DII << '\n'; } } while (false) | |||
1381 | << *DII << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("local")) { dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DII << '\n'; } } while (false); | |||
1382 | return; | |||
1383 | } | |||
1384 | ||||
1385 | BasicBlock *BB = APN->getParent(); | |||
1386 | auto InsertionPt = BB->getFirstInsertionPt(); | |||
1387 | ||||
1388 | DebugLoc NewLoc = getDebugValueLoc(DII, nullptr); | |||
1389 | ||||
1390 | // The block may be a catchswitch block, which does not have a valid | |||
1391 | // insertion point. | |||
1392 | // FIXME: Insert dbg.value markers in the successors when appropriate. | |||
1393 | if (InsertionPt != BB->end()) | |||
1394 | Builder.insertDbgValueIntrinsic(APN, DIVar, DIExpr, NewLoc, &*InsertionPt); | |||
1395 | } | |||
1396 | ||||
1397 | /// Determine whether this alloca is either a VLA or an array. | |||
1398 | static bool isArray(AllocaInst *AI) { | |||
1399 | return AI->isArrayAllocation() || | |||
1400 | AI->getType()->getElementType()->isArrayTy(); | |||
1401 | } | |||
1402 | ||||
1403 | /// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set | |||
1404 | /// of llvm.dbg.value intrinsics. | |||
1405 | bool llvm::LowerDbgDeclare(Function &F) { | |||
1406 | DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false); | |||
1407 | SmallVector<DbgDeclareInst *, 4> Dbgs; | |||
1408 | for (auto &FI : F) | |||
1409 | for (Instruction &BI : FI) | |||
1410 | if (auto DDI = dyn_cast<DbgDeclareInst>(&BI)) | |||
1411 | Dbgs.push_back(DDI); | |||
1412 | ||||
1413 | if (Dbgs.empty()) | |||
1414 | return false; | |||
1415 | ||||
1416 | for (auto &I : Dbgs) { | |||
1417 | DbgDeclareInst *DDI = I; | |||
1418 | AllocaInst *AI = dyn_cast_or_null<AllocaInst>(DDI->getAddress()); | |||
1419 | // If this is an alloca for a scalar variable, insert a dbg.value | |||
1420 | // at each load and store to the alloca and erase the dbg.declare. | |||
1421 | // The dbg.values allow tracking a variable even if it is not | |||
1422 | // stored on the stack, while the dbg.declare can only describe | |||
1423 | // the stack slot (and at a lexical-scope granularity). Later | |||
1424 | // passes will attempt to elide the stack slot. | |||
1425 | if (!AI || isArray(AI)) | |||
1426 | continue; | |||
1427 | ||||
1428 | // A volatile load/store means that the alloca can't be elided anyway. | |||
1429 | if (llvm::any_of(AI->users(), [](User *U) -> bool { | |||
1430 | if (LoadInst *LI = dyn_cast<LoadInst>(U)) | |||
1431 | return LI->isVolatile(); | |||
1432 | if (StoreInst *SI = dyn_cast<StoreInst>(U)) | |||
1433 | return SI->isVolatile(); | |||
1434 | return false; | |||
1435 | })) | |||
1436 | continue; | |||
1437 | ||||
1438 | for (auto &AIUse : AI->uses()) { | |||
1439 | User *U = AIUse.getUser(); | |||
1440 | if (StoreInst *SI = dyn_cast<StoreInst>(U)) { | |||
1441 | if (AIUse.getOperandNo() == 1) | |||
1442 | ConvertDebugDeclareToDebugValue(DDI, SI, DIB); | |||
1443 | } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) { | |||
1444 | ConvertDebugDeclareToDebugValue(DDI, LI, DIB); | |||
1445 | } else if (CallInst *CI = dyn_cast<CallInst>(U)) { | |||
1446 | // This is a call by-value or some other instruction that takes a | |||
1447 | // pointer to the variable. Insert a *value* intrinsic that describes | |||
1448 | // the variable by dereferencing the alloca. | |||
1449 | DebugLoc NewLoc = getDebugValueLoc(DDI, nullptr); | |||
1450 | auto *DerefExpr = | |||
1451 | DIExpression::append(DDI->getExpression(), dwarf::DW_OP_deref); | |||
1452 | DIB.insertDbgValueIntrinsic(AI, DDI->getVariable(), DerefExpr, NewLoc, | |||
1453 | CI); | |||
1454 | } | |||
1455 | } | |||
1456 | DDI->eraseFromParent(); | |||
1457 | } | |||
1458 | return true; | |||
1459 | } | |||
1460 | ||||
1461 | /// Propagate dbg.value intrinsics through the newly inserted PHIs. | |||
1462 | void llvm::insertDebugValuesForPHIs(BasicBlock *BB, | |||
1463 | SmallVectorImpl<PHINode *> &InsertedPHIs) { | |||
1464 | assert(BB && "No BasicBlock to clone dbg.value(s) from.")((BB && "No BasicBlock to clone dbg.value(s) from.") ? static_cast<void> (0) : __assert_fail ("BB && \"No BasicBlock to clone dbg.value(s) from.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 1464, __PRETTY_FUNCTION__)); | |||
1465 | if (InsertedPHIs.size() == 0) | |||
1466 | return; | |||
1467 | ||||
1468 | // Map existing PHI nodes to their dbg.values. | |||
1469 | ValueToValueMapTy DbgValueMap; | |||
1470 | for (auto &I : *BB) { | |||
1471 | if (auto DbgII = dyn_cast<DbgVariableIntrinsic>(&I)) { | |||
1472 | if (auto *Loc = dyn_cast_or_null<PHINode>(DbgII->getVariableLocation())) | |||
1473 | DbgValueMap.insert({Loc, DbgII}); | |||
1474 | } | |||
1475 | } | |||
1476 | if (DbgValueMap.size() == 0) | |||
1477 | return; | |||
1478 | ||||
1479 | // Then iterate through the new PHIs and look to see if they use one of the | |||
1480 | // previously mapped PHIs. If so, insert a new dbg.value intrinsic that will | |||
1481 | // propagate the info through the new PHI. | |||
1482 | LLVMContext &C = BB->getContext(); | |||
1483 | for (auto PHI : InsertedPHIs) { | |||
1484 | BasicBlock *Parent = PHI->getParent(); | |||
1485 | // Avoid inserting an intrinsic into an EH block. | |||
1486 | if (Parent->getFirstNonPHI()->isEHPad()) | |||
1487 | continue; | |||
1488 | auto PhiMAV = MetadataAsValue::get(C, ValueAsMetadata::get(PHI)); | |||
1489 | for (auto VI : PHI->operand_values()) { | |||
1490 | auto V = DbgValueMap.find(VI); | |||
1491 | if (V != DbgValueMap.end()) { | |||
1492 | auto *DbgII = cast<DbgVariableIntrinsic>(V->second); | |||
1493 | Instruction *NewDbgII = DbgII->clone(); | |||
1494 | NewDbgII->setOperand(0, PhiMAV); | |||
1495 | auto InsertionPt = Parent->getFirstInsertionPt(); | |||
1496 | assert(InsertionPt != Parent->end() && "Ill-formed basic block")((InsertionPt != Parent->end() && "Ill-formed basic block" ) ? static_cast<void> (0) : __assert_fail ("InsertionPt != Parent->end() && \"Ill-formed basic block\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 1496, __PRETTY_FUNCTION__)); | |||
1497 | NewDbgII->insertBefore(&*InsertionPt); | |||
1498 | } | |||
1499 | } | |||
1500 | } | |||
1501 | } | |||
1502 | ||||
1503 | /// Finds all intrinsics declaring local variables as living in the memory that | |||
1504 | /// 'V' points to. This may include a mix of dbg.declare and | |||
1505 | /// dbg.addr intrinsics. | |||
1506 | TinyPtrVector<DbgVariableIntrinsic *> llvm::FindDbgAddrUses(Value *V) { | |||
1507 | // This function is hot. Check whether the value has any metadata to avoid a | |||
1508 | // DenseMap lookup. | |||
1509 | if (!V->isUsedByMetadata()) | |||
1510 | return {}; | |||
1511 | auto *L = LocalAsMetadata::getIfExists(V); | |||
1512 | if (!L) | |||
1513 | return {}; | |||
1514 | auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L); | |||
1515 | if (!MDV) | |||
1516 | return {}; | |||
1517 | ||||
1518 | TinyPtrVector<DbgVariableIntrinsic *> Declares; | |||
1519 | for (User *U : MDV->users()) { | |||
1520 | if (auto *DII = dyn_cast<DbgVariableIntrinsic>(U)) | |||
1521 | if (DII->isAddressOfVariable()) | |||
1522 | Declares.push_back(DII); | |||
1523 | } | |||
1524 | ||||
1525 | return Declares; | |||
1526 | } | |||
1527 | ||||
1528 | void llvm::findDbgValues(SmallVectorImpl<DbgValueInst *> &DbgValues, Value *V) { | |||
1529 | // This function is hot. Check whether the value has any metadata to avoid a | |||
1530 | // DenseMap lookup. | |||
1531 | if (!V->isUsedByMetadata()) | |||
1532 | return; | |||
1533 | if (auto *L = LocalAsMetadata::getIfExists(V)) | |||
1534 | if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L)) | |||
1535 | for (User *U : MDV->users()) | |||
1536 | if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U)) | |||
1537 | DbgValues.push_back(DVI); | |||
1538 | } | |||
1539 | ||||
1540 | void llvm::findDbgUsers(SmallVectorImpl<DbgVariableIntrinsic *> &DbgUsers, | |||
1541 | Value *V) { | |||
1542 | // This function is hot. Check whether the value has any metadata to avoid a | |||
1543 | // DenseMap lookup. | |||
1544 | if (!V->isUsedByMetadata()) | |||
1545 | return; | |||
1546 | if (auto *L = LocalAsMetadata::getIfExists(V)) | |||
1547 | if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L)) | |||
1548 | for (User *U : MDV->users()) | |||
1549 | if (DbgVariableIntrinsic *DII = dyn_cast<DbgVariableIntrinsic>(U)) | |||
1550 | DbgUsers.push_back(DII); | |||
1551 | } | |||
1552 | ||||
1553 | bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress, | |||
1554 | Instruction *InsertBefore, DIBuilder &Builder, | |||
1555 | uint8_t DIExprFlags, int Offset) { | |||
1556 | auto DbgAddrs = FindDbgAddrUses(Address); | |||
1557 | for (DbgVariableIntrinsic *DII : DbgAddrs) { | |||
1558 | DebugLoc Loc = DII->getDebugLoc(); | |||
1559 | auto *DIVar = DII->getVariable(); | |||
1560 | auto *DIExpr = DII->getExpression(); | |||
1561 | assert(DIVar && "Missing variable")((DIVar && "Missing variable") ? static_cast<void> (0) : __assert_fail ("DIVar && \"Missing variable\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 1561, __PRETTY_FUNCTION__)); | |||
1562 | DIExpr = DIExpression::prepend(DIExpr, DIExprFlags, Offset); | |||
1563 | // Insert llvm.dbg.declare immediately before InsertBefore, and remove old | |||
1564 | // llvm.dbg.declare. | |||
1565 | Builder.insertDeclare(NewAddress, DIVar, DIExpr, Loc, InsertBefore); | |||
1566 | if (DII == InsertBefore) | |||
1567 | InsertBefore = InsertBefore->getNextNode(); | |||
1568 | DII->eraseFromParent(); | |||
1569 | } | |||
1570 | return !DbgAddrs.empty(); | |||
1571 | } | |||
1572 | ||||
1573 | bool llvm::replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress, | |||
1574 | DIBuilder &Builder, uint8_t DIExprFlags, | |||
1575 | int Offset) { | |||
1576 | return replaceDbgDeclare(AI, NewAllocaAddress, AI->getNextNode(), Builder, | |||
1577 | DIExprFlags, Offset); | |||
1578 | } | |||
1579 | ||||
1580 | static void replaceOneDbgValueForAlloca(DbgValueInst *DVI, Value *NewAddress, | |||
1581 | DIBuilder &Builder, int Offset) { | |||
1582 | DebugLoc Loc = DVI->getDebugLoc(); | |||
1583 | auto *DIVar = DVI->getVariable(); | |||
1584 | auto *DIExpr = DVI->getExpression(); | |||
1585 | assert(DIVar && "Missing variable")((DIVar && "Missing variable") ? static_cast<void> (0) : __assert_fail ("DIVar && \"Missing variable\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 1585, __PRETTY_FUNCTION__)); | |||
1586 | ||||
1587 | // This is an alloca-based llvm.dbg.value. The first thing it should do with | |||
1588 | // the alloca pointer is dereference it. Otherwise we don't know how to handle | |||
1589 | // it and give up. | |||
1590 | if (!DIExpr || DIExpr->getNumElements() < 1 || | |||
1591 | DIExpr->getElement(0) != dwarf::DW_OP_deref) | |||
1592 | return; | |||
1593 | ||||
1594 | // Insert the offset immediately after the first deref. | |||
1595 | // We could just change the offset argument of dbg.value, but it's unsigned... | |||
1596 | if (Offset) { | |||
1597 | SmallVector<uint64_t, 4> Ops; | |||
1598 | Ops.push_back(dwarf::DW_OP_deref); | |||
1599 | DIExpression::appendOffset(Ops, Offset); | |||
1600 | Ops.append(DIExpr->elements_begin() + 1, DIExpr->elements_end()); | |||
1601 | DIExpr = Builder.createExpression(Ops); | |||
1602 | } | |||
1603 | ||||
1604 | Builder.insertDbgValueIntrinsic(NewAddress, DIVar, DIExpr, Loc, DVI); | |||
1605 | DVI->eraseFromParent(); | |||
1606 | } | |||
1607 | ||||
1608 | void llvm::replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress, | |||
1609 | DIBuilder &Builder, int Offset) { | |||
1610 | if (auto *L = LocalAsMetadata::getIfExists(AI)) | |||
1611 | if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L)) | |||
1612 | for (auto UI = MDV->use_begin(), UE = MDV->use_end(); UI != UE;) { | |||
1613 | Use &U = *UI++; | |||
1614 | if (auto *DVI = dyn_cast<DbgValueInst>(U.getUser())) | |||
1615 | replaceOneDbgValueForAlloca(DVI, NewAllocaAddress, Builder, Offset); | |||
1616 | } | |||
1617 | } | |||
1618 | ||||
1619 | /// Wrap \p V in a ValueAsMetadata instance. | |||
1620 | static MetadataAsValue *wrapValueInMetadata(LLVMContext &C, Value *V) { | |||
1621 | return MetadataAsValue::get(C, ValueAsMetadata::get(V)); | |||
1622 | } | |||
1623 | ||||
1624 | bool llvm::salvageDebugInfo(Instruction &I) { | |||
1625 | SmallVector<DbgVariableIntrinsic *, 1> DbgUsers; | |||
1626 | findDbgUsers(DbgUsers, &I); | |||
1627 | if (DbgUsers.empty()) | |||
1628 | return false; | |||
1629 | ||||
1630 | return salvageDebugInfoForDbgValues(I, DbgUsers); | |||
1631 | } | |||
1632 | ||||
1633 | bool llvm::salvageDebugInfoForDbgValues( | |||
1634 | Instruction &I, ArrayRef<DbgVariableIntrinsic *> DbgUsers) { | |||
1635 | auto &Ctx = I.getContext(); | |||
1636 | auto wrapMD = [&](Value *V) { return wrapValueInMetadata(Ctx, V); }; | |||
1637 | ||||
1638 | for (auto *DII : DbgUsers) { | |||
1639 | // Do not add DW_OP_stack_value for DbgDeclare and DbgAddr, because they | |||
1640 | // are implicitly pointing out the value as a DWARF memory location | |||
1641 | // description. | |||
1642 | bool StackValue = isa<DbgValueInst>(DII); | |||
1643 | ||||
1644 | DIExpression *DIExpr = | |||
1645 | salvageDebugInfoImpl(I, DII->getExpression(), StackValue); | |||
1646 | ||||
1647 | // salvageDebugInfoImpl should fail on examining the first element of | |||
1648 | // DbgUsers, or none of them. | |||
1649 | if (!DIExpr) | |||
1650 | return false; | |||
1651 | ||||
1652 | DII->setOperand(0, wrapMD(I.getOperand(0))); | |||
1653 | DII->setOperand(2, MetadataAsValue::get(Ctx, DIExpr)); | |||
1654 | LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("local")) { dbgs() << "SALVAGE: " << *DII << '\n'; } } while (false); | |||
1655 | } | |||
1656 | ||||
1657 | return true; | |||
1658 | } | |||
1659 | ||||
1660 | DIExpression *llvm::salvageDebugInfoImpl(Instruction &I, | |||
1661 | DIExpression *SrcDIExpr, | |||
1662 | bool WithStackValue) { | |||
1663 | auto &M = *I.getModule(); | |||
1664 | auto &DL = M.getDataLayout(); | |||
1665 | ||||
1666 | // Apply a vector of opcodes to the source DIExpression. | |||
1667 | auto doSalvage = [&](SmallVectorImpl<uint64_t> &Ops) -> DIExpression * { | |||
1668 | DIExpression *DIExpr = SrcDIExpr; | |||
1669 | if (!Ops.empty()) { | |||
1670 | DIExpr = DIExpression::prependOpcodes(DIExpr, Ops, WithStackValue); | |||
1671 | } | |||
1672 | return DIExpr; | |||
1673 | }; | |||
1674 | ||||
1675 | // Apply the given offset to the source DIExpression. | |||
1676 | auto applyOffset = [&](uint64_t Offset) -> DIExpression * { | |||
1677 | SmallVector<uint64_t, 8> Ops; | |||
1678 | DIExpression::appendOffset(Ops, Offset); | |||
1679 | return doSalvage(Ops); | |||
1680 | }; | |||
1681 | ||||
1682 | // initializer-list helper for applying operators to the source DIExpression. | |||
1683 | auto applyOps = | |||
1684 | [&](std::initializer_list<uint64_t> Opcodes) -> DIExpression * { | |||
1685 | SmallVector<uint64_t, 8> Ops(Opcodes); | |||
1686 | return doSalvage(Ops); | |||
1687 | }; | |||
1688 | ||||
1689 | if (auto *CI = dyn_cast<CastInst>(&I)) { | |||
1690 | // No-op casts and zexts are irrelevant for debug info. | |||
1691 | if (CI->isNoopCast(DL) || isa<ZExtInst>(&I)) | |||
1692 | return SrcDIExpr; | |||
1693 | return nullptr; | |||
1694 | } else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { | |||
1695 | unsigned BitWidth = | |||
1696 | M.getDataLayout().getIndexSizeInBits(GEP->getPointerAddressSpace()); | |||
1697 | // Rewrite a constant GEP into a DIExpression. | |||
1698 | APInt Offset(BitWidth, 0); | |||
1699 | if (GEP->accumulateConstantOffset(M.getDataLayout(), Offset)) { | |||
1700 | return applyOffset(Offset.getSExtValue()); | |||
1701 | } else { | |||
1702 | return nullptr; | |||
1703 | } | |||
1704 | } else if (auto *BI = dyn_cast<BinaryOperator>(&I)) { | |||
1705 | // Rewrite binary operations with constant integer operands. | |||
1706 | auto *ConstInt = dyn_cast<ConstantInt>(I.getOperand(1)); | |||
1707 | if (!ConstInt || ConstInt->getBitWidth() > 64) | |||
1708 | return nullptr; | |||
1709 | ||||
1710 | uint64_t Val = ConstInt->getSExtValue(); | |||
1711 | switch (BI->getOpcode()) { | |||
1712 | case Instruction::Add: | |||
1713 | return applyOffset(Val); | |||
1714 | case Instruction::Sub: | |||
1715 | return applyOffset(-int64_t(Val)); | |||
1716 | case Instruction::Mul: | |||
1717 | return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_mul}); | |||
1718 | case Instruction::SDiv: | |||
1719 | return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_div}); | |||
1720 | case Instruction::SRem: | |||
1721 | return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_mod}); | |||
1722 | case Instruction::Or: | |||
1723 | return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_or}); | |||
1724 | case Instruction::And: | |||
1725 | return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_and}); | |||
1726 | case Instruction::Xor: | |||
1727 | return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_xor}); | |||
1728 | case Instruction::Shl: | |||
1729 | return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_shl}); | |||
1730 | case Instruction::LShr: | |||
1731 | return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_shr}); | |||
1732 | case Instruction::AShr: | |||
1733 | return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_shra}); | |||
1734 | default: | |||
1735 | // TODO: Salvage constants from each kind of binop we know about. | |||
1736 | return nullptr; | |||
1737 | } | |||
1738 | // *Not* to do: we should not attempt to salvage load instructions, | |||
1739 | // because the validity and lifetime of a dbg.value containing | |||
1740 | // DW_OP_deref becomes difficult to analyze. See PR40628 for examples. | |||
1741 | } | |||
1742 | return nullptr; | |||
1743 | } | |||
1744 | ||||
1745 | /// A replacement for a dbg.value expression. | |||
1746 | using DbgValReplacement = Optional<DIExpression *>; | |||
1747 | ||||
1748 | /// Point debug users of \p From to \p To using exprs given by \p RewriteExpr, | |||
1749 | /// possibly moving/deleting users to prevent use-before-def. Returns true if | |||
1750 | /// changes are made. | |||
1751 | static bool rewriteDebugUsers( | |||
1752 | Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT, | |||
1753 | function_ref<DbgValReplacement(DbgVariableIntrinsic &DII)> RewriteExpr) { | |||
1754 | // Find debug users of From. | |||
1755 | SmallVector<DbgVariableIntrinsic *, 1> Users; | |||
1756 | findDbgUsers(Users, &From); | |||
1757 | if (Users.empty()) | |||
1758 | return false; | |||
1759 | ||||
1760 | // Prevent use-before-def of To. | |||
1761 | bool Changed = false; | |||
1762 | SmallPtrSet<DbgVariableIntrinsic *, 1> DeleteOrSalvage; | |||
1763 | if (isa<Instruction>(&To)) { | |||
1764 | bool DomPointAfterFrom = From.getNextNonDebugInstruction() == &DomPoint; | |||
1765 | ||||
1766 | for (auto *DII : Users) { | |||
1767 | // It's common to see a debug user between From and DomPoint. Move it | |||
1768 | // after DomPoint to preserve the variable update without any reordering. | |||
1769 | if (DomPointAfterFrom && DII->getNextNonDebugInstruction() == &DomPoint) { | |||
1770 | LLVM_DEBUG(dbgs() << "MOVE: " << *DII << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("local")) { dbgs() << "MOVE: " << *DII << '\n'; } } while (false); | |||
1771 | DII->moveAfter(&DomPoint); | |||
1772 | Changed = true; | |||
1773 | ||||
1774 | // Users which otherwise aren't dominated by the replacement value must | |||
1775 | // be salvaged or deleted. | |||
1776 | } else if (!DT.dominates(&DomPoint, DII)) { | |||
1777 | DeleteOrSalvage.insert(DII); | |||
1778 | } | |||
1779 | } | |||
1780 | } | |||
1781 | ||||
1782 | // Update debug users without use-before-def risk. | |||
1783 | for (auto *DII : Users) { | |||
1784 | if (DeleteOrSalvage.count(DII)) | |||
1785 | continue; | |||
1786 | ||||
1787 | LLVMContext &Ctx = DII->getContext(); | |||
1788 | DbgValReplacement DVR = RewriteExpr(*DII); | |||
1789 | if (!DVR) | |||
1790 | continue; | |||
1791 | ||||
1792 | DII->setOperand(0, wrapValueInMetadata(Ctx, &To)); | |||
1793 | DII->setOperand(2, MetadataAsValue::get(Ctx, *DVR)); | |||
1794 | LLVM_DEBUG(dbgs() << "REWRITE: " << *DII << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("local")) { dbgs() << "REWRITE: " << *DII << '\n'; } } while (false); | |||
1795 | Changed = true; | |||
1796 | } | |||
1797 | ||||
1798 | if (!DeleteOrSalvage.empty()) { | |||
1799 | // Try to salvage the remaining debug users. | |||
1800 | Changed |= salvageDebugInfo(From); | |||
1801 | ||||
1802 | // Delete the debug users which weren't salvaged. | |||
1803 | for (auto *DII : DeleteOrSalvage) { | |||
1804 | if (DII->getVariableLocation() == &From) { | |||
1805 | LLVM_DEBUG(dbgs() << "Erased UseBeforeDef: " << *DII << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("local")) { dbgs() << "Erased UseBeforeDef: " << *DII << '\n'; } } while (false); | |||
1806 | DII->eraseFromParent(); | |||
1807 | Changed = true; | |||
1808 | } | |||
1809 | } | |||
1810 | } | |||
1811 | ||||
1812 | return Changed; | |||
1813 | } | |||
1814 | ||||
1815 | /// Check if a bitcast between a value of type \p FromTy to type \p ToTy would | |||
1816 | /// losslessly preserve the bits and semantics of the value. This predicate is | |||
1817 | /// symmetric, i.e swapping \p FromTy and \p ToTy should give the same result. | |||
1818 | /// | |||
1819 | /// Note that Type::canLosslesslyBitCastTo is not suitable here because it | |||
1820 | /// allows semantically unequivalent bitcasts, such as <2 x i64> -> <4 x i32>, | |||
1821 | /// and also does not allow lossless pointer <-> integer conversions. | |||
1822 | static bool isBitCastSemanticsPreserving(const DataLayout &DL, Type *FromTy, | |||
1823 | Type *ToTy) { | |||
1824 | // Trivially compatible types. | |||
1825 | if (FromTy == ToTy) | |||
1826 | return true; | |||
1827 | ||||
1828 | // Handle compatible pointer <-> integer conversions. | |||
1829 | if (FromTy->isIntOrPtrTy() && ToTy->isIntOrPtrTy()) { | |||
1830 | bool SameSize = DL.getTypeSizeInBits(FromTy) == DL.getTypeSizeInBits(ToTy); | |||
1831 | bool LosslessConversion = !DL.isNonIntegralPointerType(FromTy) && | |||
1832 | !DL.isNonIntegralPointerType(ToTy); | |||
1833 | return SameSize && LosslessConversion; | |||
1834 | } | |||
1835 | ||||
1836 | // TODO: This is not exhaustive. | |||
1837 | return false; | |||
1838 | } | |||
1839 | ||||
1840 | bool llvm::replaceAllDbgUsesWith(Instruction &From, Value &To, | |||
1841 | Instruction &DomPoint, DominatorTree &DT) { | |||
1842 | // Exit early if From has no debug users. | |||
1843 | if (!From.isUsedByMetadata()) | |||
1844 | return false; | |||
1845 | ||||
1846 | assert(&From != &To && "Can't replace something with itself")((&From != &To && "Can't replace something with itself" ) ? static_cast<void> (0) : __assert_fail ("&From != &To && \"Can't replace something with itself\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 1846, __PRETTY_FUNCTION__)); | |||
1847 | ||||
1848 | Type *FromTy = From.getType(); | |||
1849 | Type *ToTy = To.getType(); | |||
1850 | ||||
1851 | auto Identity = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement { | |||
1852 | return DII.getExpression(); | |||
1853 | }; | |||
1854 | ||||
1855 | // Handle no-op conversions. | |||
1856 | Module &M = *From.getModule(); | |||
1857 | const DataLayout &DL = M.getDataLayout(); | |||
1858 | if (isBitCastSemanticsPreserving(DL, FromTy, ToTy)) | |||
1859 | return rewriteDebugUsers(From, To, DomPoint, DT, Identity); | |||
1860 | ||||
1861 | // Handle integer-to-integer widening and narrowing. | |||
1862 | // FIXME: Use DW_OP_convert when it's available everywhere. | |||
1863 | if (FromTy->isIntegerTy() && ToTy->isIntegerTy()) { | |||
1864 | uint64_t FromBits = FromTy->getPrimitiveSizeInBits(); | |||
1865 | uint64_t ToBits = ToTy->getPrimitiveSizeInBits(); | |||
1866 | assert(FromBits != ToBits && "Unexpected no-op conversion")((FromBits != ToBits && "Unexpected no-op conversion" ) ? static_cast<void> (0) : __assert_fail ("FromBits != ToBits && \"Unexpected no-op conversion\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 1866, __PRETTY_FUNCTION__)); | |||
1867 | ||||
1868 | // When the width of the result grows, assume that a debugger will only | |||
1869 | // access the low `FromBits` bits when inspecting the source variable. | |||
1870 | if (FromBits < ToBits) | |||
1871 | return rewriteDebugUsers(From, To, DomPoint, DT, Identity); | |||
1872 | ||||
1873 | // The width of the result has shrunk. Use sign/zero extension to describe | |||
1874 | // the source variable's high bits. | |||
1875 | auto SignOrZeroExt = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement { | |||
1876 | DILocalVariable *Var = DII.getVariable(); | |||
1877 | ||||
1878 | // Without knowing signedness, sign/zero extension isn't possible. | |||
1879 | auto Signedness = Var->getSignedness(); | |||
1880 | if (!Signedness) | |||
1881 | return None; | |||
1882 | ||||
1883 | bool Signed = *Signedness == DIBasicType::Signedness::Signed; | |||
1884 | dwarf::TypeKind TK = Signed ? dwarf::DW_ATE_signed : dwarf::DW_ATE_unsigned; | |||
1885 | SmallVector<uint64_t, 8> Ops({dwarf::DW_OP_LLVM_convert, ToBits, TK, | |||
1886 | dwarf::DW_OP_LLVM_convert, FromBits, TK}); | |||
1887 | return DIExpression::appendToStack(DII.getExpression(), Ops); | |||
1888 | }; | |||
1889 | return rewriteDebugUsers(From, To, DomPoint, DT, SignOrZeroExt); | |||
1890 | } | |||
1891 | ||||
1892 | // TODO: Floating-point conversions, vectors. | |||
1893 | return false; | |||
1894 | } | |||
1895 | ||||
1896 | unsigned llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) { | |||
1897 | unsigned NumDeadInst = 0; | |||
1898 | // Delete the instructions backwards, as it has a reduced likelihood of | |||
1899 | // having to update as many def-use and use-def chains. | |||
1900 | Instruction *EndInst = BB->getTerminator(); // Last not to be deleted. | |||
1901 | while (EndInst != &BB->front()) { | |||
1902 | // Delete the next to last instruction. | |||
1903 | Instruction *Inst = &*--EndInst->getIterator(); | |||
1904 | if (!Inst->use_empty() && !Inst->getType()->isTokenTy()) | |||
1905 | Inst->replaceAllUsesWith(UndefValue::get(Inst->getType())); | |||
1906 | if (Inst->isEHPad() || Inst->getType()->isTokenTy()) { | |||
1907 | EndInst = Inst; | |||
1908 | continue; | |||
1909 | } | |||
1910 | if (!isa<DbgInfoIntrinsic>(Inst)) | |||
1911 | ++NumDeadInst; | |||
1912 | Inst->eraseFromParent(); | |||
1913 | } | |||
1914 | return NumDeadInst; | |||
1915 | } | |||
1916 | ||||
1917 | unsigned llvm::changeToUnreachable(Instruction *I, bool UseLLVMTrap, | |||
1918 | bool PreserveLCSSA, DomTreeUpdater *DTU, | |||
1919 | MemorySSAUpdater *MSSAU) { | |||
1920 | BasicBlock *BB = I->getParent(); | |||
1921 | std::vector <DominatorTree::UpdateType> Updates; | |||
1922 | ||||
1923 | if (MSSAU) | |||
1924 | MSSAU->changeToUnreachable(I); | |||
1925 | ||||
1926 | // Loop over all of the successors, removing BB's entry from any PHI | |||
1927 | // nodes. | |||
1928 | if (DTU) | |||
1929 | Updates.reserve(BB->getTerminator()->getNumSuccessors()); | |||
1930 | for (BasicBlock *Successor : successors(BB)) { | |||
1931 | Successor->removePredecessor(BB, PreserveLCSSA); | |||
1932 | if (DTU) | |||
1933 | Updates.push_back({DominatorTree::Delete, BB, Successor}); | |||
1934 | } | |||
1935 | // Insert a call to llvm.trap right before this. This turns the undefined | |||
1936 | // behavior into a hard fail instead of falling through into random code. | |||
1937 | if (UseLLVMTrap) { | |||
1938 | Function *TrapFn = | |||
1939 | Intrinsic::getDeclaration(BB->getParent()->getParent(), Intrinsic::trap); | |||
1940 | CallInst *CallTrap = CallInst::Create(TrapFn, "", I); | |||
1941 | CallTrap->setDebugLoc(I->getDebugLoc()); | |||
1942 | } | |||
1943 | auto *UI = new UnreachableInst(I->getContext(), I); | |||
1944 | UI->setDebugLoc(I->getDebugLoc()); | |||
1945 | ||||
1946 | // All instructions after this are dead. | |||
1947 | unsigned NumInstrsRemoved = 0; | |||
1948 | BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end(); | |||
1949 | while (BBI != BBE) { | |||
1950 | if (!BBI->use_empty()) | |||
1951 | BBI->replaceAllUsesWith(UndefValue::get(BBI->getType())); | |||
1952 | BB->getInstList().erase(BBI++); | |||
1953 | ++NumInstrsRemoved; | |||
1954 | } | |||
1955 | if (DTU) | |||
1956 | DTU->applyUpdatesPermissive(Updates); | |||
1957 | return NumInstrsRemoved; | |||
1958 | } | |||
1959 | ||||
1960 | /// changeToCall - Convert the specified invoke into a normal call. | |||
1961 | static void changeToCall(InvokeInst *II, DomTreeUpdater *DTU = nullptr) { | |||
1962 | SmallVector<Value*, 8> Args(II->arg_begin(), II->arg_end()); | |||
1963 | SmallVector<OperandBundleDef, 1> OpBundles; | |||
1964 | II->getOperandBundlesAsDefs(OpBundles); | |||
1965 | CallInst *NewCall = CallInst::Create( | |||
1966 | II->getFunctionType(), II->getCalledValue(), Args, OpBundles, "", II); | |||
1967 | NewCall->takeName(II); | |||
1968 | NewCall->setCallingConv(II->getCallingConv()); | |||
1969 | NewCall->setAttributes(II->getAttributes()); | |||
1970 | NewCall->setDebugLoc(II->getDebugLoc()); | |||
1971 | NewCall->copyMetadata(*II); | |||
1972 | II->replaceAllUsesWith(NewCall); | |||
1973 | ||||
1974 | // Follow the call by a branch to the normal destination. | |||
1975 | BasicBlock *NormalDestBB = II->getNormalDest(); | |||
1976 | BranchInst::Create(NormalDestBB, II); | |||
1977 | ||||
1978 | // Update PHI nodes in the unwind destination | |||
1979 | BasicBlock *BB = II->getParent(); | |||
1980 | BasicBlock *UnwindDestBB = II->getUnwindDest(); | |||
1981 | UnwindDestBB->removePredecessor(BB); | |||
1982 | II->eraseFromParent(); | |||
1983 | if (DTU) | |||
1984 | DTU->applyUpdatesPermissive({{DominatorTree::Delete, BB, UnwindDestBB}}); | |||
1985 | } | |||
1986 | ||||
1987 | BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI, | |||
1988 | BasicBlock *UnwindEdge) { | |||
1989 | BasicBlock *BB = CI->getParent(); | |||
1990 | ||||
1991 | // Convert this function call into an invoke instruction. First, split the | |||
1992 | // basic block. | |||
1993 | BasicBlock *Split = | |||
1994 | BB->splitBasicBlock(CI->getIterator(), CI->getName() + ".noexc"); | |||
1995 | ||||
1996 | // Delete the unconditional branch inserted by splitBasicBlock | |||
1997 | BB->getInstList().pop_back(); | |||
1998 | ||||
1999 | // Create the new invoke instruction. | |||
2000 | SmallVector<Value *, 8> InvokeArgs(CI->arg_begin(), CI->arg_end()); | |||
2001 | SmallVector<OperandBundleDef, 1> OpBundles; | |||
2002 | ||||
2003 | CI->getOperandBundlesAsDefs(OpBundles); | |||
2004 | ||||
2005 | // Note: we're round tripping operand bundles through memory here, and that | |||
2006 | // can potentially be avoided with a cleverer API design that we do not have | |||
2007 | // as of this time. | |||
2008 | ||||
2009 | InvokeInst *II = | |||
2010 | InvokeInst::Create(CI->getFunctionType(), CI->getCalledValue(), Split, | |||
2011 | UnwindEdge, InvokeArgs, OpBundles, CI->getName(), BB); | |||
2012 | II->setDebugLoc(CI->getDebugLoc()); | |||
2013 | II->setCallingConv(CI->getCallingConv()); | |||
2014 | II->setAttributes(CI->getAttributes()); | |||
2015 | ||||
2016 | // Make sure that anything using the call now uses the invoke! This also | |||
2017 | // updates the CallGraph if present, because it uses a WeakTrackingVH. | |||
2018 | CI->replaceAllUsesWith(II); | |||
2019 | ||||
2020 | // Delete the original call | |||
2021 | Split->getInstList().pop_front(); | |||
2022 | return Split; | |||
2023 | } | |||
2024 | ||||
2025 | static bool markAliveBlocks(Function &F, | |||
2026 | SmallPtrSetImpl<BasicBlock *> &Reachable, | |||
2027 | DomTreeUpdater *DTU = nullptr) { | |||
2028 | SmallVector<BasicBlock*, 128> Worklist; | |||
2029 | BasicBlock *BB = &F.front(); | |||
2030 | Worklist.push_back(BB); | |||
2031 | Reachable.insert(BB); | |||
2032 | bool Changed = false; | |||
2033 | do { | |||
2034 | BB = Worklist.pop_back_val(); | |||
2035 | ||||
2036 | // Do a quick scan of the basic block, turning any obviously unreachable | |||
2037 | // instructions into LLVM unreachable insts. The instruction combining pass | |||
2038 | // canonicalizes unreachable insts into stores to null or undef. | |||
2039 | for (Instruction &I : *BB) { | |||
2040 | if (auto *CI = dyn_cast<CallInst>(&I)) { | |||
2041 | Value *Callee = CI->getCalledValue(); | |||
2042 | // Handle intrinsic calls. | |||
2043 | if (Function *F = dyn_cast<Function>(Callee)) { | |||
2044 | auto IntrinsicID = F->getIntrinsicID(); | |||
2045 | // Assumptions that are known to be false are equivalent to | |||
2046 | // unreachable. Also, if the condition is undefined, then we make the | |||
2047 | // choice most beneficial to the optimizer, and choose that to also be | |||
2048 | // unreachable. | |||
2049 | if (IntrinsicID == Intrinsic::assume) { | |||
2050 | if (match(CI->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) { | |||
2051 | // Don't insert a call to llvm.trap right before the unreachable. | |||
2052 | changeToUnreachable(CI, false, false, DTU); | |||
2053 | Changed = true; | |||
2054 | break; | |||
2055 | } | |||
2056 | } else if (IntrinsicID == Intrinsic::experimental_guard) { | |||
2057 | // A call to the guard intrinsic bails out of the current | |||
2058 | // compilation unit if the predicate passed to it is false. If the | |||
2059 | // predicate is a constant false, then we know the guard will bail | |||
2060 | // out of the current compile unconditionally, so all code following | |||
2061 | // it is dead. | |||
2062 | // | |||
2063 | // Note: unlike in llvm.assume, it is not "obviously profitable" for | |||
2064 | // guards to treat `undef` as `false` since a guard on `undef` can | |||
2065 | // still be useful for widening. | |||
2066 | if (match(CI->getArgOperand(0), m_Zero())) | |||
2067 | if (!isa<UnreachableInst>(CI->getNextNode())) { | |||
2068 | changeToUnreachable(CI->getNextNode(), /*UseLLVMTrap=*/false, | |||
2069 | false, DTU); | |||
2070 | Changed = true; | |||
2071 | break; | |||
2072 | } | |||
2073 | } | |||
2074 | } else if ((isa<ConstantPointerNull>(Callee) && | |||
2075 | !NullPointerIsDefined(CI->getFunction())) || | |||
2076 | isa<UndefValue>(Callee)) { | |||
2077 | changeToUnreachable(CI, /*UseLLVMTrap=*/false, false, DTU); | |||
2078 | Changed = true; | |||
2079 | break; | |||
2080 | } | |||
2081 | if (CI->doesNotReturn() && !CI->isMustTailCall()) { | |||
2082 | // If we found a call to a no-return function, insert an unreachable | |||
2083 | // instruction after it. Make sure there isn't *already* one there | |||
2084 | // though. | |||
2085 | if (!isa<UnreachableInst>(CI->getNextNode())) { | |||
2086 | // Don't insert a call to llvm.trap right before the unreachable. | |||
2087 | changeToUnreachable(CI->getNextNode(), false, false, DTU); | |||
2088 | Changed = true; | |||
2089 | } | |||
2090 | break; | |||
2091 | } | |||
2092 | } else if (auto *SI = dyn_cast<StoreInst>(&I)) { | |||
2093 | // Store to undef and store to null are undefined and used to signal | |||
2094 | // that they should be changed to unreachable by passes that can't | |||
2095 | // modify the CFG. | |||
2096 | ||||
2097 | // Don't touch volatile stores. | |||
2098 | if (SI->isVolatile()) continue; | |||
2099 | ||||
2100 | Value *Ptr = SI->getOperand(1); | |||
2101 | ||||
2102 | if (isa<UndefValue>(Ptr) || | |||
2103 | (isa<ConstantPointerNull>(Ptr) && | |||
2104 | !NullPointerIsDefined(SI->getFunction(), | |||
2105 | SI->getPointerAddressSpace()))) { | |||
2106 | changeToUnreachable(SI, true, false, DTU); | |||
2107 | Changed = true; | |||
2108 | break; | |||
2109 | } | |||
2110 | } | |||
2111 | } | |||
2112 | ||||
2113 | Instruction *Terminator = BB->getTerminator(); | |||
2114 | if (auto *II = dyn_cast<InvokeInst>(Terminator)) { | |||
2115 | // Turn invokes that call 'nounwind' functions into ordinary calls. | |||
2116 | Value *Callee = II->getCalledValue(); | |||
2117 | if ((isa<ConstantPointerNull>(Callee) && | |||
2118 | !NullPointerIsDefined(BB->getParent())) || | |||
2119 | isa<UndefValue>(Callee)) { | |||
2120 | changeToUnreachable(II, true, false, DTU); | |||
2121 | Changed = true; | |||
2122 | } else if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) { | |||
2123 | if (II->use_empty() && II->onlyReadsMemory()) { | |||
2124 | // jump to the normal destination branch. | |||
2125 | BasicBlock *NormalDestBB = II->getNormalDest(); | |||
2126 | BasicBlock *UnwindDestBB = II->getUnwindDest(); | |||
2127 | BranchInst::Create(NormalDestBB, II); | |||
2128 | UnwindDestBB->removePredecessor(II->getParent()); | |||
2129 | II->eraseFromParent(); | |||
2130 | if (DTU) | |||
2131 | DTU->applyUpdatesPermissive( | |||
2132 | {{DominatorTree::Delete, BB, UnwindDestBB}}); | |||
2133 | } else | |||
2134 | changeToCall(II, DTU); | |||
2135 | Changed = true; | |||
2136 | } | |||
2137 | } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) { | |||
2138 | // Remove catchpads which cannot be reached. | |||
2139 | struct CatchPadDenseMapInfo { | |||
2140 | static CatchPadInst *getEmptyKey() { | |||
2141 | return DenseMapInfo<CatchPadInst *>::getEmptyKey(); | |||
2142 | } | |||
2143 | ||||
2144 | static CatchPadInst *getTombstoneKey() { | |||
2145 | return DenseMapInfo<CatchPadInst *>::getTombstoneKey(); | |||
2146 | } | |||
2147 | ||||
2148 | static unsigned getHashValue(CatchPadInst *CatchPad) { | |||
2149 | return static_cast<unsigned>(hash_combine_range( | |||
2150 | CatchPad->value_op_begin(), CatchPad->value_op_end())); | |||
2151 | } | |||
2152 | ||||
2153 | static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) { | |||
2154 | if (LHS == getEmptyKey() || LHS == getTombstoneKey() || | |||
2155 | RHS == getEmptyKey() || RHS == getTombstoneKey()) | |||
2156 | return LHS == RHS; | |||
2157 | return LHS->isIdenticalTo(RHS); | |||
2158 | } | |||
2159 | }; | |||
2160 | ||||
2161 | // Set of unique CatchPads. | |||
2162 | SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4, | |||
2163 | CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>> | |||
2164 | HandlerSet; | |||
2165 | detail::DenseSetEmpty Empty; | |||
2166 | for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(), | |||
2167 | E = CatchSwitch->handler_end(); | |||
2168 | I != E; ++I) { | |||
2169 | BasicBlock *HandlerBB = *I; | |||
2170 | auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHI()); | |||
2171 | if (!HandlerSet.insert({CatchPad, Empty}).second) { | |||
2172 | CatchSwitch->removeHandler(I); | |||
2173 | --I; | |||
2174 | --E; | |||
2175 | Changed = true; | |||
2176 | } | |||
2177 | } | |||
2178 | } | |||
2179 | ||||
2180 | Changed |= ConstantFoldTerminator(BB, true, nullptr, DTU); | |||
2181 | for (BasicBlock *Successor : successors(BB)) | |||
2182 | if (Reachable.insert(Successor).second) | |||
2183 | Worklist.push_back(Successor); | |||
2184 | } while (!Worklist.empty()); | |||
2185 | return Changed; | |||
2186 | } | |||
2187 | ||||
2188 | void llvm::removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU) { | |||
2189 | Instruction *TI = BB->getTerminator(); | |||
2190 | ||||
2191 | if (auto *II = dyn_cast<InvokeInst>(TI)) { | |||
2192 | changeToCall(II, DTU); | |||
2193 | return; | |||
2194 | } | |||
2195 | ||||
2196 | Instruction *NewTI; | |||
2197 | BasicBlock *UnwindDest; | |||
2198 | ||||
2199 | if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) { | |||
2200 | NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI); | |||
2201 | UnwindDest = CRI->getUnwindDest(); | |||
2202 | } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) { | |||
2203 | auto *NewCatchSwitch = CatchSwitchInst::Create( | |||
2204 | CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(), | |||
2205 | CatchSwitch->getName(), CatchSwitch); | |||
2206 | for (BasicBlock *PadBB : CatchSwitch->handlers()) | |||
2207 | NewCatchSwitch->addHandler(PadBB); | |||
2208 | ||||
2209 | NewTI = NewCatchSwitch; | |||
2210 | UnwindDest = CatchSwitch->getUnwindDest(); | |||
2211 | } else { | |||
2212 | llvm_unreachable("Could not find unwind successor")::llvm::llvm_unreachable_internal("Could not find unwind successor" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 2212); | |||
2213 | } | |||
2214 | ||||
2215 | NewTI->takeName(TI); | |||
2216 | NewTI->setDebugLoc(TI->getDebugLoc()); | |||
2217 | UnwindDest->removePredecessor(BB); | |||
2218 | TI->replaceAllUsesWith(NewTI); | |||
2219 | TI->eraseFromParent(); | |||
2220 | if (DTU) | |||
2221 | DTU->applyUpdatesPermissive({{DominatorTree::Delete, BB, UnwindDest}}); | |||
2222 | } | |||
2223 | ||||
2224 | /// removeUnreachableBlocks - Remove blocks that are not reachable, even | |||
2225 | /// if they are in a dead cycle. Return true if a change was made, false | |||
2226 | /// otherwise. If `LVI` is passed, this function preserves LazyValueInfo | |||
2227 | /// after modifying the CFG. | |||
2228 | bool llvm::removeUnreachableBlocks(Function &F, LazyValueInfo *LVI, | |||
2229 | DomTreeUpdater *DTU, | |||
2230 | MemorySSAUpdater *MSSAU) { | |||
2231 | SmallPtrSet<BasicBlock*, 16> Reachable; | |||
2232 | bool Changed = markAliveBlocks(F, Reachable, DTU); | |||
| ||||
2233 | ||||
2234 | // If there are unreachable blocks in the CFG... | |||
2235 | if (Reachable.size() == F.size()) | |||
2236 | return Changed; | |||
2237 | ||||
2238 | assert(Reachable.size() < F.size())((Reachable.size() < F.size()) ? static_cast<void> ( 0) : __assert_fail ("Reachable.size() < F.size()", "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 2238, __PRETTY_FUNCTION__)); | |||
2239 | NumRemoved += F.size()-Reachable.size(); | |||
2240 | ||||
2241 | SmallPtrSet<BasicBlock *, 16> DeadBlockSet; | |||
2242 | for (Function::iterator I = ++F.begin(), E = F.end(); I != E; ++I) { | |||
2243 | auto *BB = &*I; | |||
2244 | if (Reachable.count(BB)) | |||
2245 | continue; | |||
2246 | DeadBlockSet.insert(BB); | |||
2247 | } | |||
2248 | ||||
2249 | if (MSSAU) | |||
2250 | MSSAU->removeBlocks(DeadBlockSet); | |||
2251 | ||||
2252 | // Loop over all of the basic blocks that are not reachable, dropping all of | |||
2253 | // their internal references. Update DTU and LVI if available. | |||
2254 | std::vector<DominatorTree::UpdateType> Updates; | |||
2255 | for (auto *BB : DeadBlockSet) { | |||
2256 | for (BasicBlock *Successor : successors(BB)) { | |||
2257 | if (!DeadBlockSet.count(Successor)) | |||
2258 | Successor->removePredecessor(BB); | |||
2259 | if (DTU) | |||
2260 | Updates.push_back({DominatorTree::Delete, BB, Successor}); | |||
2261 | } | |||
2262 | if (LVI) | |||
2263 | LVI->eraseBlock(BB); | |||
2264 | BB->dropAllReferences(); | |||
2265 | } | |||
2266 | for (Function::iterator I = ++F.begin(); I != F.end();) { | |||
2267 | auto *BB = &*I; | |||
2268 | if (Reachable.count(BB)) { | |||
2269 | ++I; | |||
2270 | continue; | |||
2271 | } | |||
2272 | if (DTU) { | |||
2273 | // Remove the terminator of BB to clear the successor list of BB. | |||
2274 | if (BB->getTerminator()) | |||
2275 | BB->getInstList().pop_back(); | |||
2276 | new UnreachableInst(BB->getContext(), BB); | |||
2277 | assert(succ_empty(BB) && "The successor list of BB isn't empty before "((succ_empty(BB) && "The successor list of BB isn't empty before " "applying corresponding DTU updates.") ? static_cast<void > (0) : __assert_fail ("succ_empty(BB) && \"The successor list of BB isn't empty before \" \"applying corresponding DTU updates.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 2278, __PRETTY_FUNCTION__)) | |||
2278 | "applying corresponding DTU updates.")((succ_empty(BB) && "The successor list of BB isn't empty before " "applying corresponding DTU updates.") ? static_cast<void > (0) : __assert_fail ("succ_empty(BB) && \"The successor list of BB isn't empty before \" \"applying corresponding DTU updates.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 2278, __PRETTY_FUNCTION__)); | |||
2279 | ++I; | |||
2280 | } else { | |||
2281 | I = F.getBasicBlockList().erase(I); | |||
2282 | } | |||
2283 | } | |||
2284 | ||||
2285 | if (DTU) { | |||
2286 | DTU->applyUpdatesPermissive(Updates); | |||
2287 | bool Deleted = false; | |||
2288 | for (auto *BB : DeadBlockSet) { | |||
2289 | if (DTU->isBBPendingDeletion(BB)) | |||
2290 | --NumRemoved; | |||
2291 | else | |||
2292 | Deleted = true; | |||
2293 | DTU->deleteBB(BB); | |||
2294 | } | |||
2295 | if (!Deleted) | |||
2296 | return false; | |||
2297 | } | |||
2298 | return true; | |||
2299 | } | |||
2300 | ||||
2301 | void llvm::combineMetadata(Instruction *K, const Instruction *J, | |||
2302 | ArrayRef<unsigned> KnownIDs, bool DoesKMove) { | |||
2303 | SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; | |||
2304 | K->dropUnknownNonDebugMetadata(KnownIDs); | |||
2305 | K->getAllMetadataOtherThanDebugLoc(Metadata); | |||
2306 | for (const auto &MD : Metadata) { | |||
2307 | unsigned Kind = MD.first; | |||
2308 | MDNode *JMD = J->getMetadata(Kind); | |||
2309 | MDNode *KMD = MD.second; | |||
2310 | ||||
2311 | switch (Kind) { | |||
2312 | default: | |||
2313 | K->setMetadata(Kind, nullptr); // Remove unknown metadata | |||
2314 | break; | |||
2315 | case LLVMContext::MD_dbg: | |||
2316 | llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg")::llvm::llvm_unreachable_internal("getAllMetadataOtherThanDebugLoc returned a MD_dbg" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 2316); | |||
2317 | case LLVMContext::MD_tbaa: | |||
2318 | K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD)); | |||
2319 | break; | |||
2320 | case LLVMContext::MD_alias_scope: | |||
2321 | K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD)); | |||
2322 | break; | |||
2323 | case LLVMContext::MD_noalias: | |||
2324 | case LLVMContext::MD_mem_parallel_loop_access: | |||
2325 | K->setMetadata(Kind, MDNode::intersect(JMD, KMD)); | |||
2326 | break; | |||
2327 | case LLVMContext::MD_access_group: | |||
2328 | K->setMetadata(LLVMContext::MD_access_group, | |||
2329 | intersectAccessGroups(K, J)); | |||
2330 | break; | |||
2331 | case LLVMContext::MD_range: | |||
2332 | ||||
2333 | // If K does move, use most generic range. Otherwise keep the range of | |||
2334 | // K. | |||
2335 | if (DoesKMove) | |||
2336 | // FIXME: If K does move, we should drop the range info and nonnull. | |||
2337 | // Currently this function is used with DoesKMove in passes | |||
2338 | // doing hoisting/sinking and the current behavior of using the | |||
2339 | // most generic range is correct in those cases. | |||
2340 | K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD)); | |||
2341 | break; | |||
2342 | case LLVMContext::MD_fpmath: | |||
2343 | K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD)); | |||
2344 | break; | |||
2345 | case LLVMContext::MD_invariant_load: | |||
2346 | // Only set the !invariant.load if it is present in both instructions. | |||
2347 | K->setMetadata(Kind, JMD); | |||
2348 | break; | |||
2349 | case LLVMContext::MD_nonnull: | |||
2350 | // If K does move, keep nonull if it is present in both instructions. | |||
2351 | if (DoesKMove) | |||
2352 | K->setMetadata(Kind, JMD); | |||
2353 | break; | |||
2354 | case LLVMContext::MD_invariant_group: | |||
2355 | // Preserve !invariant.group in K. | |||
2356 | break; | |||
2357 | case LLVMContext::MD_align: | |||
2358 | K->setMetadata(Kind, | |||
2359 | MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD)); | |||
2360 | break; | |||
2361 | case LLVMContext::MD_dereferenceable: | |||
2362 | case LLVMContext::MD_dereferenceable_or_null: | |||
2363 | K->setMetadata(Kind, | |||
2364 | MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD)); | |||
2365 | break; | |||
2366 | } | |||
2367 | } | |||
2368 | // Set !invariant.group from J if J has it. If both instructions have it | |||
2369 | // then we will just pick it from J - even when they are different. | |||
2370 | // Also make sure that K is load or store - f.e. combining bitcast with load | |||
2371 | // could produce bitcast with invariant.group metadata, which is invalid. | |||
2372 | // FIXME: we should try to preserve both invariant.group md if they are | |||
2373 | // different, but right now instruction can only have one invariant.group. | |||
2374 | if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group)) | |||
2375 | if (isa<LoadInst>(K) || isa<StoreInst>(K)) | |||
2376 | K->setMetadata(LLVMContext::MD_invariant_group, JMD); | |||
2377 | } | |||
2378 | ||||
2379 | void llvm::combineMetadataForCSE(Instruction *K, const Instruction *J, | |||
2380 | bool KDominatesJ) { | |||
2381 | unsigned KnownIDs[] = { | |||
2382 | LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, | |||
2383 | LLVMContext::MD_noalias, LLVMContext::MD_range, | |||
2384 | LLVMContext::MD_invariant_load, LLVMContext::MD_nonnull, | |||
2385 | LLVMContext::MD_invariant_group, LLVMContext::MD_align, | |||
2386 | LLVMContext::MD_dereferenceable, | |||
2387 | LLVMContext::MD_dereferenceable_or_null, | |||
2388 | LLVMContext::MD_access_group}; | |||
2389 | combineMetadata(K, J, KnownIDs, KDominatesJ); | |||
2390 | } | |||
2391 | ||||
2392 | void llvm::patchReplacementInstruction(Instruction *I, Value *Repl) { | |||
2393 | auto *ReplInst = dyn_cast<Instruction>(Repl); | |||
2394 | if (!ReplInst) | |||
2395 | return; | |||
2396 | ||||
2397 | // Patch the replacement so that it is not more restrictive than the value | |||
2398 | // being replaced. | |||
2399 | // Note that if 'I' is a load being replaced by some operation, | |||
2400 | // for example, by an arithmetic operation, then andIRFlags() | |||
2401 | // would just erase all math flags from the original arithmetic | |||
2402 | // operation, which is clearly not wanted and not needed. | |||
2403 | if (!isa<LoadInst>(I)) | |||
2404 | ReplInst->andIRFlags(I); | |||
2405 | ||||
2406 | // FIXME: If both the original and replacement value are part of the | |||
2407 | // same control-flow region (meaning that the execution of one | |||
2408 | // guarantees the execution of the other), then we can combine the | |||
2409 | // noalias scopes here and do better than the general conservative | |||
2410 | // answer used in combineMetadata(). | |||
2411 | ||||
2412 | // In general, GVN unifies expressions over different control-flow | |||
2413 | // regions, and so we need a conservative combination of the noalias | |||
2414 | // scopes. | |||
2415 | static const unsigned KnownIDs[] = { | |||
2416 | LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, | |||
2417 | LLVMContext::MD_noalias, LLVMContext::MD_range, | |||
2418 | LLVMContext::MD_fpmath, LLVMContext::MD_invariant_load, | |||
2419 | LLVMContext::MD_invariant_group, LLVMContext::MD_nonnull, | |||
2420 | LLVMContext::MD_access_group}; | |||
2421 | combineMetadata(ReplInst, I, KnownIDs, false); | |||
2422 | } | |||
2423 | ||||
2424 | template <typename RootType, typename DominatesFn> | |||
2425 | static unsigned replaceDominatedUsesWith(Value *From, Value *To, | |||
2426 | const RootType &Root, | |||
2427 | const DominatesFn &Dominates) { | |||
2428 | assert(From->getType() == To->getType())((From->getType() == To->getType()) ? static_cast<void > (0) : __assert_fail ("From->getType() == To->getType()" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 2428, __PRETTY_FUNCTION__)); | |||
2429 | ||||
2430 | unsigned Count = 0; | |||
2431 | for (Value::use_iterator UI = From->use_begin(), UE = From->use_end(); | |||
2432 | UI != UE;) { | |||
2433 | Use &U = *UI++; | |||
2434 | if (!Dominates(Root, U)) | |||
2435 | continue; | |||
2436 | U.set(To); | |||
2437 | LLVM_DEBUG(dbgs() << "Replace dominated use of '" << From->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("local")) { dbgs() << "Replace dominated use of '" << From->getName() << "' as " << *To << " in " << *U << "\n"; } } while (false) | |||
2438 | << "' as " << *To << " in " << *U << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("local")) { dbgs() << "Replace dominated use of '" << From->getName() << "' as " << *To << " in " << *U << "\n"; } } while (false); | |||
2439 | ++Count; | |||
2440 | } | |||
2441 | return Count; | |||
2442 | } | |||
2443 | ||||
2444 | unsigned llvm::replaceNonLocalUsesWith(Instruction *From, Value *To) { | |||
2445 | assert(From->getType() == To->getType())((From->getType() == To->getType()) ? static_cast<void > (0) : __assert_fail ("From->getType() == To->getType()" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/Utils/Local.cpp" , 2445, __PRETTY_FUNCTION__)); | |||
2446 | auto *BB = From->getParent(); | |||
2447 | unsigned Count = 0; | |||
2448 | ||||
2449 | for (Value::use_iterator UI = From->use_begin(), UE = From->use_end(); | |||
2450 | UI != UE;) { | |||
2451 | Use &U = *UI++; | |||
2452 | auto *I = cast<Instruction>(U.getUser()); | |||
2453 | if (I->getParent() == BB) | |||
2454 | continue; | |||
2455 | U.set(To); | |||
2456 | ++Count; | |||
2457 | } | |||
2458 | return Count; | |||
2459 | } | |||
2460 | ||||
2461 | unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To, | |||
2462 | DominatorTree &DT, | |||
2463 | const BasicBlockEdge &Root) { | |||
2464 | auto Dominates = [&DT](const BasicBlockEdge &Root, const Use &U) { | |||
2465 | return DT.dominates(Root, U); | |||
2466 | }; | |||
2467 | return ::replaceDominatedUsesWith(From, To, Root, Dominates); | |||
2468 | } | |||
2469 | ||||
2470 | unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To, | |||
2471 | DominatorTree &DT, | |||
2472 | const BasicBlock *BB) { | |||
2473 | auto ProperlyDominates = [&DT](const BasicBlock *BB, const Use &U) { | |||
2474 | auto *I = cast<Instruction>(U.getUser())->getParent(); | |||
2475 | return DT.properlyDominates(BB, I); | |||
2476 | }; | |||
2477 | return ::replaceDominatedUsesWith(From, To, BB, ProperlyDominates); | |||
2478 | } | |||
2479 | ||||
2480 | bool llvm::callsGCLeafFunction(const CallBase *Call, | |||
2481 | const TargetLibraryInfo &TLI) { | |||
2482 | // Check if the function is specifically marked as a gc leaf function. | |||
2483 | if (Call->hasFnAttr("gc-leaf-function")) | |||
2484 | return true; | |||
2485 | if (const Function *F = Call->getCalledFunction()) { | |||
2486 | if (F->hasFnAttribute("gc-leaf-function")) | |||
2487 | return true; | |||
2488 | ||||
2489 | if (auto IID = F->getIntrinsicID()) | |||
2490 | // Most LLVM intrinsics do not take safepoints. | |||
2491 | return IID != Intrinsic::experimental_gc_statepoint && | |||
2492 | IID != Intrinsic::experimental_deoptimize; | |||
2493 | } | |||
2494 | ||||
2495 | // Lib calls can be materialized by some passes, and won't be | |||
2496 | // marked as 'gc-leaf-function.' All available Libcalls are | |||
2497 | // GC-leaf. | |||
2498 | LibFunc LF; | |||
2499 | if (TLI.getLibFunc(ImmutableCallSite(Call), LF)) { | |||
2500 | return TLI.has(LF); | |||
2501 | } | |||
2502 | ||||
2503 | return false; | |||
2504 | } | |||
2505 | ||||
2506 | void llvm::copyNonnullMetadata(const LoadInst &OldLI, MDNode *N, | |||
2507 | LoadInst &NewLI) { | |||
2508 | auto *NewTy = NewLI.getType(); | |||
2509 | ||||
2510 | // This only directly applies if the new type is also a pointer. | |||
2511 | if (NewTy->isPointerTy()) { | |||
2512 | NewLI.setMetadata(LLVMContext::MD_nonnull, N); | |||
2513 | return; | |||
2514 | } | |||
2515 | ||||
2516 | // The only other translation we can do is to integral loads with !range | |||
2517 | // metadata. | |||
2518 | if (!NewTy->isIntegerTy()) | |||
2519 | return; | |||
2520 | ||||
2521 | MDBuilder MDB(NewLI.getContext()); | |||
2522 | const Value *Ptr = OldLI.getPointerOperand(); | |||
2523 | auto *ITy = cast<IntegerType>(NewTy); | |||
2524 | auto *NullInt = ConstantExpr::getPtrToInt( | |||
2525 | ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy); | |||
2526 | auto *NonNullInt = ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1)); | |||
2527 | NewLI.setMetadata(LLVMContext::MD_range, | |||
2528 | MDB.createRange(NonNullInt, NullInt)); | |||
2529 | } | |||
2530 | ||||
2531 | void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI, | |||
2532 | MDNode *N, LoadInst &NewLI) { | |||
2533 | auto *NewTy = NewLI.getType(); | |||
2534 | ||||
2535 | // Give up unless it is converted to a pointer where there is a single very | |||
2536 | // valuable mapping we can do reliably. | |||
2537 | // FIXME: It would be nice to propagate this in more ways, but the type | |||
2538 | // conversions make it hard. | |||
2539 | if (!NewTy->isPointerTy()) | |||
2540 | return; | |||
2541 | ||||
2542 | unsigned BitWidth = DL.getIndexTypeSizeInBits(NewTy); | |||
2543 | if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) { | |||
2544 | MDNode *NN = MDNode::get(OldLI.getContext(), None); | |||
2545 | NewLI.setMetadata(LLVMContext::MD_nonnull, NN); | |||
2546 | } | |||
2547 | } | |||
2548 | ||||
2549 | void llvm::dropDebugUsers(Instruction &I) { | |||
2550 | SmallVector<DbgVariableIntrinsic *, 1> DbgUsers; | |||
2551 | findDbgUsers(DbgUsers, &I); | |||
2552 | for (auto *DII : DbgUsers) | |||
2553 | DII->eraseFromParent(); | |||
2554 | } | |||
2555 | ||||
2556 | void llvm::hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt, | |||
2557 | BasicBlock *BB) { | |||
2558 | // Since we are moving the instructions out of its basic block, we do not | |||
2559 | // retain their original debug locations (DILocations) and debug intrinsic | |||
2560 | // instructions. | |||
2561 | // | |||
2562 | // Doing so would degrade the debugging experience and adversely affect the | |||
2563 | // accuracy of profiling information. | |||
2564 | // | |||
2565 | // Currently, when hoisting the instructions, we take the following actions: | |||
2566 | // - Remove their debug intrinsic instructions. | |||
2567 | // - Set their debug locations to the values from the insertion point. | |||
2568 | // | |||
2569 | // As per PR39141 (comment #8), the more fundamental reason why the dbg.values | |||
2570 | // need to be deleted, is because there will not be any instructions with a | |||
2571 | // DILocation in either branch left after performing the transformation. We | |||
2572 | // can only insert a dbg.value after the two branches are joined again. | |||
2573 | // | |||
2574 | // See PR38762, PR39243 for more details. | |||
2575 | // | |||
2576 | // TODO: Extend llvm.dbg.value to take more than one SSA Value (PR39141) to | |||
2577 | // encode predicated DIExpressions that yield different results on different | |||
2578 | // code paths. | |||
2579 | for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) { | |||
2580 | Instruction *I = &*II; | |||
2581 | I->dropUnknownNonDebugMetadata(); | |||
2582 | if (I->isUsedByMetadata()) | |||
2583 | dropDebugUsers(*I); | |||
2584 | if (isa<DbgInfoIntrinsic>(I)) { | |||
2585 | // Remove DbgInfo Intrinsics. | |||
2586 | II = I->eraseFromParent(); | |||
2587 | continue; | |||
2588 | } | |||
2589 | I->setDebugLoc(InsertPt->getDebugLoc()); | |||
2590 | ++II; | |||
2591 | } | |||
2592 | DomBlock->getInstList().splice(InsertPt->getIterator(), BB->getInstList(), | |||
2593 | BB->begin(), | |||
2594 | BB->getTerminator()->getIterator()); | |||
2595 | } | |||
2596 | ||||
2597 | namespace { | |||
2598 | ||||
2599 | /// A potential constituent of a bitreverse or bswap expression. See | |||
2600 | /// collectBitParts for a fuller explanation. | |||
2601 | struct BitPart { | |||
2602 | BitPart(Value *P, unsigned BW) : Provider(P) { | |||
2603 | Provenance.resize(BW); | |||
2604 | } | |||
2605 | ||||
2606 | /// The Value that this is a bitreverse/bswap of. | |||
2607 | Value *Provider; | |||
2608 | ||||
2609 | /// The "provenance" of each bit. Provenance[A] = B means that bit A | |||
2610 | /// in Provider becomes bit B in the result of this expression. | |||
2611 | SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128. | |||
2612 | ||||
2613 | enum { Unset = -1 }; | |||
2614 | }; | |||
2615 | ||||
2616 | } // end anonymous namespace | |||
2617 | ||||
2618 | /// Analyze the specified subexpression and see if it is capable of providing | |||
2619 | /// pieces of a bswap or bitreverse. The subexpression provides a potential | |||
2620 | /// piece of a bswap or bitreverse if it can be proven that each non-zero bit in | |||
2621 | /// the output of the expression came from a corresponding bit in some other | |||
2622 | /// value. This function is recursive, and the end result is a mapping of | |||
2623 | /// bitnumber to bitnumber. It is the caller's responsibility to validate that | |||
2624 | /// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse. | |||
2625 | /// | |||
2626 | /// For example, if the current subexpression if "(shl i32 %X, 24)" then we know | |||
2627 | /// that the expression deposits the low byte of %X into the high byte of the | |||
2628 | /// result and that all other bits are zero. This expression is accepted and a | |||
2629 | /// BitPart is returned with Provider set to %X and Provenance[24-31] set to | |||
2630 | /// [0-7]. | |||
2631 | /// | |||
2632 | /// To avoid revisiting values, the BitPart results are memoized into the | |||
2633 | /// provided map. To avoid unnecessary copying of BitParts, BitParts are | |||
2634 | /// constructed in-place in the \c BPS map. Because of this \c BPS needs to | |||
2635 | /// store BitParts objects, not pointers. As we need the concept of a nullptr | |||
2636 | /// BitParts (Value has been analyzed and the analysis failed), we an Optional | |||
2637 | /// type instead to provide the same functionality. | |||
2638 | /// | |||
2639 | /// Because we pass around references into \c BPS, we must use a container that | |||
2640 | /// does not invalidate internal references (std::map instead of DenseMap). | |||
2641 | static const Optional<BitPart> & | |||
2642 | collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals, | |||
2643 | std::map<Value *, Optional<BitPart>> &BPS, int Depth) { | |||
2644 | auto I = BPS.find(V); | |||
2645 | if (I != BPS.end()) | |||
2646 | return I->second; | |||
2647 | ||||
2648 | auto &Result = BPS[V] = None; | |||
2649 | auto BitWidth = cast<IntegerType>(V->getType())->getBitWidth(); | |||
2650 | ||||
2651 | // Prevent stack overflow by limiting the recursion depth | |||
2652 | if (Depth == BitPartRecursionMaxDepth) { | |||
2653 | LLVM_DEBUG(dbgs() << "collectBitParts max recursion depth reached.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("local")) { dbgs() << "collectBitParts max recursion depth reached.\n" ; } } while (false); | |||
2654 | return Result; | |||
2655 | } | |||
2656 | ||||
2657 | if (Instruction *I = dyn_cast<Instruction>(V)) { | |||
2658 | // If this is an or instruction, it may be an inner node of the bswap. | |||
2659 | if (I->getOpcode() == Instruction::Or) { | |||
2660 | auto &A = collectBitParts(I->getOperand(0), MatchBSwaps, | |||
2661 | MatchBitReversals, BPS, Depth + 1); | |||
2662 | auto &B = collectBitParts(I->getOperand(1), MatchBSwaps, | |||
2663 | MatchBitReversals, BPS, Depth + 1); | |||
2664 | if (!A || !B) | |||
2665 | return Result; | |||
2666 | ||||
2667 | // Try and merge the two together. | |||
2668 | if (!A->Provider || A->Provider != B->Provider) | |||
2669 | return Result; | |||
2670 | ||||
2671 | Result = BitPart(A->Provider, BitWidth); | |||
2672 | for (unsigned i = 0; i < A->Provenance.size(); ++i) { | |||
2673 | if (A->Provenance[i] != BitPart::Unset && | |||
2674 | B->Provenance[i] != BitPart::Unset && | |||
2675 | A->Provenance[i] != B->Provenance[i]) | |||
2676 | return Result = None; | |||
2677 | ||||
2678 | if (A->Provenance[i] == BitPart::Unset) | |||
2679 | Result->Provenance[i] = B->Provenance[i]; | |||
2680 | else | |||
2681 | Result->Provenance[i] = A->Provenance[i]; | |||
2682 | } | |||
2683 | ||||
2684 | return Result; | |||
2685 | } | |||
2686 | ||||
2687 | // If this is a logical shift by a constant, recurse then shift the result. | |||
2688 | if (I->isLogicalShift() && isa<ConstantInt>(I->getOperand(1))) { | |||
2689 | unsigned BitShift = | |||
2690 | cast<ConstantInt>(I->getOperand(1))->getLimitedValue(~0U); | |||
2691 | // Ensure the shift amount is defined. | |||
2692 | if (BitShift > BitWidth) | |||
2693 | return Result; | |||
2694 | ||||
2695 | auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps, | |||
2696 | MatchBitReversals, BPS, Depth + 1); | |||
2697 | if (!Res) | |||
2698 | return Result; | |||
2699 | Result = Res; | |||
2700 | ||||
2701 | // Perform the "shift" on BitProvenance. | |||
2702 | auto &P = Result->Provenance; | |||
2703 | if (I->getOpcode() == Instruction::Shl) { | |||
2704 | P.erase(std::prev(P.end(), BitShift), P.end()); | |||
2705 | P.insert(P.begin(), BitShift, BitPart::Unset); | |||
2706 | } else { | |||
2707 | P.erase(P.begin(), std::next(P.begin(), BitShift)); | |||
2708 | P.insert(P.end(), BitShift, BitPart::Unset); | |||
2709 | } | |||
2710 | ||||
2711 | return Result; | |||
2712 | } | |||
2713 | ||||
2714 | // If this is a logical 'and' with a mask that clears bits, recurse then | |||
2715 | // unset the appropriate bits. | |||
2716 | if (I->getOpcode() == Instruction::And && | |||
2717 | isa<ConstantInt>(I->getOperand(1))) { | |||
2718 | APInt Bit(I->getType()->getPrimitiveSizeInBits(), 1); | |||
2719 | const APInt &AndMask = cast<ConstantInt>(I->getOperand(1))->getValue(); | |||
2720 | ||||
2721 | // Check that the mask allows a multiple of 8 bits for a bswap, for an | |||
2722 | // early exit. | |||
2723 | unsigned NumMaskedBits = AndMask.countPopulation(); | |||
2724 | if (!MatchBitReversals && NumMaskedBits % 8 != 0) | |||
2725 | return Result; | |||
2726 | ||||
2727 | auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps, | |||
2728 | MatchBitReversals, BPS, Depth + 1); | |||
2729 | if (!Res) | |||
2730 | return Result; | |||
2731 | Result = Res; | |||
2732 | ||||
2733 | for (unsigned i = 0; i < BitWidth; ++i, Bit <<= 1) | |||
2734 | // If the AndMask is zero for this bit, clear the bit. | |||
2735 | if ((AndMask & Bit) == 0) | |||
2736 | Result->Provenance[i] = BitPart::Unset; | |||
2737 | return Result; | |||
2738 | } | |||
2739 | ||||
2740 | // If this is a zext instruction zero extend the result. | |||
2741 | if (I->getOpcode() == Instruction::ZExt) { | |||
2742 | auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps, | |||
2743 | MatchBitReversals, BPS, Depth + 1); | |||
2744 | if (!Res) | |||
2745 | return Result; | |||
2746 | ||||
2747 | Result = BitPart(Res->Provider, BitWidth); | |||
2748 | auto NarrowBitWidth = | |||
2749 | cast<IntegerType>(cast<ZExtInst>(I)->getSrcTy())->getBitWidth(); | |||
2750 | for (unsigned i = 0; i < NarrowBitWidth; ++i) | |||
2751 | Result->Provenance[i] = Res->Provenance[i]; | |||
2752 | for (unsigned i = NarrowBitWidth; i < BitWidth; ++i) | |||
2753 | Result->Provenance[i] = BitPart::Unset; | |||
2754 | return Result; | |||
2755 | } | |||
2756 | } | |||
2757 | ||||
2758 | // Okay, we got to something that isn't a shift, 'or' or 'and'. This must be | |||
2759 | // the input value to the bswap/bitreverse. | |||
2760 | Result = BitPart(V, BitWidth); | |||
2761 | for (unsigned i = 0; i < BitWidth; ++i) | |||
2762 | Result->Provenance[i] = i; | |||
2763 | return Result; | |||
2764 | } | |||
2765 | ||||
2766 | static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To, | |||
2767 | unsigned BitWidth) { | |||
2768 | if (From % 8 != To % 8) | |||
2769 | return false; | |||
2770 | // Convert from bit indices to byte indices and check for a byte reversal. | |||
2771 | From >>= 3; | |||
2772 | To >>= 3; | |||
2773 | BitWidth >>= 3; | |||
2774 | return From == BitWidth - To - 1; | |||
2775 | } | |||
2776 | ||||
2777 | static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To, | |||
2778 | unsigned BitWidth) { | |||
2779 | return From == BitWidth - To - 1; | |||
2780 | } | |||
2781 | ||||
2782 | bool llvm::recognizeBSwapOrBitReverseIdiom( | |||
2783 | Instruction *I, bool MatchBSwaps, bool MatchBitReversals, | |||
2784 | SmallVectorImpl<Instruction *> &InsertedInsts) { | |||
2785 | if (Operator::getOpcode(I) != Instruction::Or) | |||
2786 | return false; | |||
2787 | if (!MatchBSwaps && !MatchBitReversals) | |||
2788 | return false; | |||
2789 | IntegerType *ITy = dyn_cast<IntegerType>(I->getType()); | |||
2790 | if (!ITy || ITy->getBitWidth() > 128) | |||
2791 | return false; // Can't do vectors or integers > 128 bits. | |||
2792 | unsigned BW = ITy->getBitWidth(); | |||
2793 | ||||
2794 | unsigned DemandedBW = BW; | |||
2795 | IntegerType *DemandedTy = ITy; | |||
2796 | if (I->hasOneUse()) { | |||
2797 | if (TruncInst *Trunc = dyn_cast<TruncInst>(I->user_back())) { | |||
2798 | DemandedTy = cast<IntegerType>(Trunc->getType()); | |||
2799 | DemandedBW = DemandedTy->getBitWidth(); | |||
2800 | } | |||
2801 | } | |||
2802 | ||||
2803 | // Try to find all the pieces corresponding to the bswap. | |||
2804 | std::map<Value *, Optional<BitPart>> BPS; | |||
2805 | auto Res = collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS, 0); | |||
2806 | if (!Res) | |||
2807 | return false; | |||
2808 | auto &BitProvenance = Res->Provenance; | |||
2809 | ||||
2810 | // Now, is the bit permutation correct for a bswap or a bitreverse? We can | |||
2811 | // only byteswap values with an even number of bytes. | |||
2812 | bool OKForBSwap = DemandedBW % 16 == 0, OKForBitReverse = true; | |||
2813 | for (unsigned i = 0; i < DemandedBW; ++i) { | |||
2814 | OKForBSwap &= | |||
2815 | bitTransformIsCorrectForBSwap(BitProvenance[i], i, DemandedBW); | |||
2816 | OKForBitReverse &= | |||
2817 | bitTransformIsCorrectForBitReverse(BitProvenance[i], i, DemandedBW); | |||
2818 | } | |||
2819 | ||||
2820 | Intrinsic::ID Intrin; | |||
2821 | if (OKForBSwap && MatchBSwaps) | |||
2822 | Intrin = Intrinsic::bswap; | |||
2823 | else if (OKForBitReverse && MatchBitReversals) | |||
2824 | Intrin = Intrinsic::bitreverse; | |||
2825 | else | |||
2826 | return false; | |||
2827 | ||||
2828 | if (ITy != DemandedTy) { | |||
2829 | Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, DemandedTy); | |||
2830 | Value *Provider = Res->Provider; | |||
2831 | IntegerType *ProviderTy = cast<IntegerType>(Provider->getType()); | |||
2832 | // We may need to truncate the provider. | |||
2833 | if (DemandedTy != ProviderTy) { | |||
2834 | auto *Trunc = CastInst::Create(Instruction::Trunc, Provider, DemandedTy, | |||
2835 | "trunc", I); | |||
2836 | InsertedInsts.push_back(Trunc); | |||
2837 | Provider = Trunc; | |||
2838 | } | |||
2839 | auto *CI = CallInst::Create(F, Provider, "rev", I); | |||
2840 | InsertedInsts.push_back(CI); | |||
2841 | auto *ExtInst = CastInst::Create(Instruction::ZExt, CI, ITy, "zext", I); | |||
2842 | InsertedInsts.push_back(ExtInst); | |||
2843 | return true; | |||
2844 | } | |||
2845 | ||||
2846 | Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, ITy); | |||
2847 | InsertedInsts.push_back(CallInst::Create(F, Res->Provider, "rev", I)); | |||
2848 | return true; | |||
2849 | } | |||
2850 | ||||
2851 | // CodeGen has special handling for some string functions that may replace | |||
2852 | // them with target-specific intrinsics. Since that'd skip our interceptors | |||
2853 | // in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses, | |||
2854 | // we mark affected calls as NoBuiltin, which will disable optimization | |||
2855 | // in CodeGen. | |||
2856 | void llvm::maybeMarkSanitizerLibraryCallNoBuiltin( | |||
2857 | CallInst *CI, const TargetLibraryInfo *TLI) { | |||
2858 | Function *F = CI->getCalledFunction(); | |||
2859 | LibFunc Func; | |||
2860 | if (F && !F->hasLocalLinkage() && F->hasName() && | |||
2861 | TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) && | |||
2862 | !F->doesNotAccessMemory()) | |||
2863 | CI->addAttribute(AttributeList::FunctionIndex, Attribute::NoBuiltin); | |||
2864 | } | |||
2865 | ||||
2866 | bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) { | |||
2867 | // We can't have a PHI with a metadata type. | |||
2868 | if (I->getOperand(OpIdx)->getType()->isMetadataTy()) | |||
2869 | return false; | |||
2870 | ||||
2871 | // Early exit. | |||
2872 | if (!isa<Constant>(I->getOperand(OpIdx))) | |||
2873 | return true; | |||
2874 | ||||
2875 | switch (I->getOpcode()) { | |||
2876 | default: | |||
2877 | return true; | |||
2878 | case Instruction::Call: | |||
2879 | case Instruction::Invoke: | |||
2880 | // Can't handle inline asm. Skip it. | |||
2881 | if (isa<InlineAsm>(ImmutableCallSite(I).getCalledValue())) | |||
2882 | return false; | |||
2883 | // Many arithmetic intrinsics have no issue taking a | |||
2884 | // variable, however it's hard to distingish these from | |||
2885 | // specials such as @llvm.frameaddress that require a constant. | |||
2886 | if (isa<IntrinsicInst>(I)) | |||
2887 | return false; | |||
2888 | ||||
2889 | // Constant bundle operands may need to retain their constant-ness for | |||
2890 | // correctness. | |||
2891 | if (ImmutableCallSite(I).isBundleOperand(OpIdx)) | |||
2892 | return false; | |||
2893 | return true; | |||
2894 | case Instruction::ShuffleVector: | |||
2895 | // Shufflevector masks are constant. | |||
2896 | return OpIdx != 2; | |||
2897 | case Instruction::Switch: | |||
2898 | case Instruction::ExtractValue: | |||
2899 | // All operands apart from the first are constant. | |||
2900 | return OpIdx == 0; | |||
2901 | case Instruction::InsertValue: | |||
2902 | // All operands apart from the first and the second are constant. | |||
2903 | return OpIdx < 2; | |||
2904 | case Instruction::Alloca: | |||
2905 | // Static allocas (constant size in the entry block) are handled by | |||
2906 | // prologue/epilogue insertion so they're free anyway. We definitely don't | |||
2907 | // want to make them non-constant. | |||
2908 | return !cast<AllocaInst>(I)->isStaticAlloca(); | |||
2909 | case Instruction::GetElementPtr: | |||
2910 | if (OpIdx == 0) | |||
2911 | return true; | |||
2912 | gep_type_iterator It = gep_type_begin(I); | |||
2913 | for (auto E = std::next(It, OpIdx); It != E; ++It) | |||
2914 | if (It.isStruct()) | |||
2915 | return false; | |||
2916 | return true; | |||
2917 | } | |||
2918 | } | |||
2919 | ||||
2920 | using AllocaForValueMapTy = DenseMap<Value *, AllocaInst *>; | |||
2921 | AllocaInst *llvm::findAllocaForValue(Value *V, | |||
2922 | AllocaForValueMapTy &AllocaForValue) { | |||
2923 | if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) | |||
2924 | return AI; | |||
2925 | // See if we've already calculated (or started to calculate) alloca for a | |||
2926 | // given value. | |||
2927 | AllocaForValueMapTy::iterator I = AllocaForValue.find(V); | |||
2928 | if (I != AllocaForValue.end()) | |||
2929 | return I->second; | |||
2930 | // Store 0 while we're calculating alloca for value V to avoid | |||
2931 | // infinite recursion if the value references itself. | |||
2932 | AllocaForValue[V] = nullptr; | |||
2933 | AllocaInst *Res = nullptr; | |||
2934 | if (CastInst *CI = dyn_cast<CastInst>(V)) | |||
2935 | Res = findAllocaForValue(CI->getOperand(0), AllocaForValue); | |||
2936 | else if (PHINode *PN = dyn_cast<PHINode>(V)) { | |||
2937 | for (Value *IncValue : PN->incoming_values()) { | |||
2938 | // Allow self-referencing phi-nodes. | |||
2939 | if (IncValue == PN) | |||
2940 | continue; | |||
2941 | AllocaInst *IncValueAI = findAllocaForValue(IncValue, AllocaForValue); | |||
2942 | // AI for incoming values should exist and should all be equal. | |||
2943 | if (IncValueAI == nullptr || (Res != nullptr && IncValueAI != Res)) | |||
2944 | return nullptr; | |||
2945 | Res = IncValueAI; | |||
2946 | } | |||
2947 | } else if (GetElementPtrInst *EP = dyn_cast<GetElementPtrInst>(V)) { | |||
2948 | Res = findAllocaForValue(EP->getPointerOperand(), AllocaForValue); | |||
2949 | } else { | |||
2950 | LLVM_DEBUG(dbgs() << "Alloca search cancelled on unknown instruction: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("local")) { dbgs() << "Alloca search cancelled on unknown instruction: " << *V << "\n"; } } while (false) | |||
2951 | << *V << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("local")) { dbgs() << "Alloca search cancelled on unknown instruction: " << *V << "\n"; } } while (false); | |||
2952 | } | |||
2953 | if (Res) | |||
2954 | AllocaForValue[V] = Res; | |||
2955 | return Res; | |||
2956 | } |
1 | //===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file exposes the class definitions of all of the subclasses of the |
10 | // Instruction class. This is meant to be an easy way to get access to all |
11 | // instruction subclasses. |
12 | // |
13 | //===----------------------------------------------------------------------===// |
14 | |
15 | #ifndef LLVM_IR_INSTRUCTIONS_H |
16 | #define LLVM_IR_INSTRUCTIONS_H |
17 | |
18 | #include "llvm/ADT/ArrayRef.h" |
19 | #include "llvm/ADT/None.h" |
20 | #include "llvm/ADT/STLExtras.h" |
21 | #include "llvm/ADT/SmallVector.h" |
22 | #include "llvm/ADT/StringRef.h" |
23 | #include "llvm/ADT/Twine.h" |
24 | #include "llvm/ADT/iterator.h" |
25 | #include "llvm/ADT/iterator_range.h" |
26 | #include "llvm/IR/Attributes.h" |
27 | #include "llvm/IR/BasicBlock.h" |
28 | #include "llvm/IR/CallingConv.h" |
29 | #include "llvm/IR/Constant.h" |
30 | #include "llvm/IR/DerivedTypes.h" |
31 | #include "llvm/IR/Function.h" |
32 | #include "llvm/IR/InstrTypes.h" |
33 | #include "llvm/IR/Instruction.h" |
34 | #include "llvm/IR/OperandTraits.h" |
35 | #include "llvm/IR/Type.h" |
36 | #include "llvm/IR/Use.h" |
37 | #include "llvm/IR/User.h" |
38 | #include "llvm/IR/Value.h" |
39 | #include "llvm/Support/AtomicOrdering.h" |
40 | #include "llvm/Support/Casting.h" |
41 | #include "llvm/Support/ErrorHandling.h" |
42 | #include <cassert> |
43 | #include <cstddef> |
44 | #include <cstdint> |
45 | #include <iterator> |
46 | |
47 | namespace llvm { |
48 | |
49 | class APInt; |
50 | class ConstantInt; |
51 | class DataLayout; |
52 | class LLVMContext; |
53 | |
54 | //===----------------------------------------------------------------------===// |
55 | // AllocaInst Class |
56 | //===----------------------------------------------------------------------===// |
57 | |
58 | /// an instruction to allocate memory on the stack |
59 | class AllocaInst : public UnaryInstruction { |
60 | Type *AllocatedType; |
61 | |
62 | protected: |
63 | // Note: Instruction needs to be a friend here to call cloneImpl. |
64 | friend class Instruction; |
65 | |
66 | AllocaInst *cloneImpl() const; |
67 | |
68 | public: |
69 | explicit AllocaInst(Type *Ty, unsigned AddrSpace, |
70 | Value *ArraySize = nullptr, |
71 | const Twine &Name = "", |
72 | Instruction *InsertBefore = nullptr); |
73 | AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, |
74 | const Twine &Name, BasicBlock *InsertAtEnd); |
75 | |
76 | AllocaInst(Type *Ty, unsigned AddrSpace, |
77 | const Twine &Name, Instruction *InsertBefore = nullptr); |
78 | AllocaInst(Type *Ty, unsigned AddrSpace, |
79 | const Twine &Name, BasicBlock *InsertAtEnd); |
80 | |
81 | AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, unsigned Align, |
82 | const Twine &Name = "", Instruction *InsertBefore = nullptr); |
83 | AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, unsigned Align, |
84 | const Twine &Name, BasicBlock *InsertAtEnd); |
85 | |
86 | /// Return true if there is an allocation size parameter to the allocation |
87 | /// instruction that is not 1. |
88 | bool isArrayAllocation() const; |
89 | |
90 | /// Get the number of elements allocated. For a simple allocation of a single |
91 | /// element, this will return a constant 1 value. |
92 | const Value *getArraySize() const { return getOperand(0); } |
93 | Value *getArraySize() { return getOperand(0); } |
94 | |
95 | /// Overload to return most specific pointer type. |
96 | PointerType *getType() const { |
97 | return cast<PointerType>(Instruction::getType()); |
98 | } |
99 | |
100 | /// Get allocation size in bits. Returns None if size can't be determined, |
101 | /// e.g. in case of a VLA. |
102 | Optional<uint64_t> getAllocationSizeInBits(const DataLayout &DL) const; |
103 | |
104 | /// Return the type that is being allocated by the instruction. |
105 | Type *getAllocatedType() const { return AllocatedType; } |
106 | /// for use only in special circumstances that need to generically |
107 | /// transform a whole instruction (eg: IR linking and vectorization). |
108 | void setAllocatedType(Type *Ty) { AllocatedType = Ty; } |
109 | |
110 | /// Return the alignment of the memory that is being allocated by the |
111 | /// instruction. |
112 | unsigned getAlignment() const { |
113 | return (1u << (getSubclassDataFromInstruction() & 31)) >> 1; |
114 | } |
115 | void setAlignment(unsigned Align); |
116 | |
117 | /// Return true if this alloca is in the entry block of the function and is a |
118 | /// constant size. If so, the code generator will fold it into the |
119 | /// prolog/epilog code, so it is basically free. |
120 | bool isStaticAlloca() const; |
121 | |
122 | /// Return true if this alloca is used as an inalloca argument to a call. Such |
123 | /// allocas are never considered static even if they are in the entry block. |
124 | bool isUsedWithInAlloca() const { |
125 | return getSubclassDataFromInstruction() & 32; |
126 | } |
127 | |
128 | /// Specify whether this alloca is used to represent the arguments to a call. |
129 | void setUsedWithInAlloca(bool V) { |
130 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~32) | |
131 | (V ? 32 : 0)); |
132 | } |
133 | |
134 | /// Return true if this alloca is used as a swifterror argument to a call. |
135 | bool isSwiftError() const { |
136 | return getSubclassDataFromInstruction() & 64; |
137 | } |
138 | |
139 | /// Specify whether this alloca is used to represent a swifterror. |
140 | void setSwiftError(bool V) { |
141 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~64) | |
142 | (V ? 64 : 0)); |
143 | } |
144 | |
145 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
146 | static bool classof(const Instruction *I) { |
147 | return (I->getOpcode() == Instruction::Alloca); |
148 | } |
149 | static bool classof(const Value *V) { |
150 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
151 | } |
152 | |
153 | private: |
154 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
155 | // method so that subclasses cannot accidentally use it. |
156 | void setInstructionSubclassData(unsigned short D) { |
157 | Instruction::setInstructionSubclassData(D); |
158 | } |
159 | }; |
160 | |
161 | //===----------------------------------------------------------------------===// |
162 | // LoadInst Class |
163 | //===----------------------------------------------------------------------===// |
164 | |
165 | /// An instruction for reading from memory. This uses the SubclassData field in |
166 | /// Value to store whether or not the load is volatile. |
167 | class LoadInst : public UnaryInstruction { |
168 | void AssertOK(); |
169 | |
170 | protected: |
171 | // Note: Instruction needs to be a friend here to call cloneImpl. |
172 | friend class Instruction; |
173 | |
174 | LoadInst *cloneImpl() const; |
175 | |
176 | public: |
177 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr = "", |
178 | Instruction *InsertBefore = nullptr); |
179 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd); |
180 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
181 | Instruction *InsertBefore = nullptr); |
182 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
183 | BasicBlock *InsertAtEnd); |
184 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
185 | unsigned Align, Instruction *InsertBefore = nullptr); |
186 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
187 | unsigned Align, BasicBlock *InsertAtEnd); |
188 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
189 | unsigned Align, AtomicOrdering Order, |
190 | SyncScope::ID SSID = SyncScope::System, |
191 | Instruction *InsertBefore = nullptr); |
192 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
193 | unsigned Align, AtomicOrdering Order, SyncScope::ID SSID, |
194 | BasicBlock *InsertAtEnd); |
195 | |
196 | // Deprecated [opaque pointer types] |
197 | explicit LoadInst(Value *Ptr, const Twine &NameStr = "", |
198 | Instruction *InsertBefore = nullptr) |
199 | : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr, |
200 | InsertBefore) {} |
201 | LoadInst(Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd) |
202 | : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr, |
203 | InsertAtEnd) {} |
204 | LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, |
205 | Instruction *InsertBefore = nullptr) |
206 | : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr, |
207 | isVolatile, InsertBefore) {} |
208 | LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, |
209 | BasicBlock *InsertAtEnd) |
210 | : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr, |
211 | isVolatile, InsertAtEnd) {} |
212 | LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align, |
213 | Instruction *InsertBefore = nullptr) |
214 | : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr, |
215 | isVolatile, Align, InsertBefore) {} |
216 | LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align, |
217 | BasicBlock *InsertAtEnd) |
218 | : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr, |
219 | isVolatile, Align, InsertAtEnd) {} |
220 | LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align, |
221 | AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System, |
222 | Instruction *InsertBefore = nullptr) |
223 | : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr, |
224 | isVolatile, Align, Order, SSID, InsertBefore) {} |
225 | LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align, |
226 | AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd) |
227 | : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr, |
228 | isVolatile, Align, Order, SSID, InsertAtEnd) {} |
229 | |
230 | /// Return true if this is a load from a volatile memory location. |
231 | bool isVolatile() const { return getSubclassDataFromInstruction() & 1; } |
232 | |
233 | /// Specify whether this is a volatile load or not. |
234 | void setVolatile(bool V) { |
235 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | |
236 | (V ? 1 : 0)); |
237 | } |
238 | |
239 | /// Return the alignment of the access that is being performed. |
240 | unsigned getAlignment() const { |
241 | return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1; |
242 | } |
243 | |
244 | void setAlignment(unsigned Align); |
245 | |
246 | /// Returns the ordering constraint of this load instruction. |
247 | AtomicOrdering getOrdering() const { |
248 | return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7); |
249 | } |
250 | |
251 | /// Sets the ordering constraint of this load instruction. May not be Release |
252 | /// or AcquireRelease. |
253 | void setOrdering(AtomicOrdering Ordering) { |
254 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) | |
255 | ((unsigned)Ordering << 7)); |
256 | } |
257 | |
258 | /// Returns the synchronization scope ID of this load instruction. |
259 | SyncScope::ID getSyncScopeID() const { |
260 | return SSID; |
261 | } |
262 | |
263 | /// Sets the synchronization scope ID of this load instruction. |
264 | void setSyncScopeID(SyncScope::ID SSID) { |
265 | this->SSID = SSID; |
266 | } |
267 | |
268 | /// Sets the ordering constraint and the synchronization scope ID of this load |
269 | /// instruction. |
270 | void setAtomic(AtomicOrdering Ordering, |
271 | SyncScope::ID SSID = SyncScope::System) { |
272 | setOrdering(Ordering); |
273 | setSyncScopeID(SSID); |
274 | } |
275 | |
276 | bool isSimple() const { return !isAtomic() && !isVolatile(); } |
277 | |
278 | bool isUnordered() const { |
279 | return (getOrdering() == AtomicOrdering::NotAtomic || |
280 | getOrdering() == AtomicOrdering::Unordered) && |
281 | !isVolatile(); |
282 | } |
283 | |
284 | Value *getPointerOperand() { return getOperand(0); } |
285 | const Value *getPointerOperand() const { return getOperand(0); } |
286 | static unsigned getPointerOperandIndex() { return 0U; } |
287 | Type *getPointerOperandType() const { return getPointerOperand()->getType(); } |
288 | |
289 | /// Returns the address space of the pointer operand. |
290 | unsigned getPointerAddressSpace() const { |
291 | return getPointerOperandType()->getPointerAddressSpace(); |
292 | } |
293 | |
294 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
295 | static bool classof(const Instruction *I) { |
296 | return I->getOpcode() == Instruction::Load; |
297 | } |
298 | static bool classof(const Value *V) { |
299 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
300 | } |
301 | |
302 | private: |
303 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
304 | // method so that subclasses cannot accidentally use it. |
305 | void setInstructionSubclassData(unsigned short D) { |
306 | Instruction::setInstructionSubclassData(D); |
307 | } |
308 | |
309 | /// The synchronization scope ID of this load instruction. Not quite enough |
310 | /// room in SubClassData for everything, so synchronization scope ID gets its |
311 | /// own field. |
312 | SyncScope::ID SSID; |
313 | }; |
314 | |
315 | //===----------------------------------------------------------------------===// |
316 | // StoreInst Class |
317 | //===----------------------------------------------------------------------===// |
318 | |
319 | /// An instruction for storing to memory. |
320 | class StoreInst : public Instruction { |
321 | void AssertOK(); |
322 | |
323 | protected: |
324 | // Note: Instruction needs to be a friend here to call cloneImpl. |
325 | friend class Instruction; |
326 | |
327 | StoreInst *cloneImpl() const; |
328 | |
329 | public: |
330 | StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore); |
331 | StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd); |
332 | StoreInst(Value *Val, Value *Ptr, bool isVolatile = false, |
333 | Instruction *InsertBefore = nullptr); |
334 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd); |
335 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, |
336 | unsigned Align, Instruction *InsertBefore = nullptr); |
337 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, |
338 | unsigned Align, BasicBlock *InsertAtEnd); |
339 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, |
340 | unsigned Align, AtomicOrdering Order, |
341 | SyncScope::ID SSID = SyncScope::System, |
342 | Instruction *InsertBefore = nullptr); |
343 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, |
344 | unsigned Align, AtomicOrdering Order, SyncScope::ID SSID, |
345 | BasicBlock *InsertAtEnd); |
346 | |
347 | // allocate space for exactly two operands |
348 | void *operator new(size_t s) { |
349 | return User::operator new(s, 2); |
350 | } |
351 | |
352 | /// Return true if this is a store to a volatile memory location. |
353 | bool isVolatile() const { return getSubclassDataFromInstruction() & 1; } |
354 | |
355 | /// Specify whether this is a volatile store or not. |
356 | void setVolatile(bool V) { |
357 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | |
358 | (V ? 1 : 0)); |
359 | } |
360 | |
361 | /// Transparently provide more efficient getOperand methods. |
362 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
363 | |
364 | /// Return the alignment of the access that is being performed |
365 | unsigned getAlignment() const { |
366 | return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1; |
367 | } |
368 | |
369 | void setAlignment(unsigned Align); |
370 | |
371 | /// Returns the ordering constraint of this store instruction. |
372 | AtomicOrdering getOrdering() const { |
373 | return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7); |
374 | } |
375 | |
376 | /// Sets the ordering constraint of this store instruction. May not be |
377 | /// Acquire or AcquireRelease. |
378 | void setOrdering(AtomicOrdering Ordering) { |
379 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) | |
380 | ((unsigned)Ordering << 7)); |
381 | } |
382 | |
383 | /// Returns the synchronization scope ID of this store instruction. |
384 | SyncScope::ID getSyncScopeID() const { |
385 | return SSID; |
386 | } |
387 | |
388 | /// Sets the synchronization scope ID of this store instruction. |
389 | void setSyncScopeID(SyncScope::ID SSID) { |
390 | this->SSID = SSID; |
391 | } |
392 | |
393 | /// Sets the ordering constraint and the synchronization scope ID of this |
394 | /// store instruction. |
395 | void setAtomic(AtomicOrdering Ordering, |
396 | SyncScope::ID SSID = SyncScope::System) { |
397 | setOrdering(Ordering); |
398 | setSyncScopeID(SSID); |
399 | } |
400 | |
401 | bool isSimple() const { return !isAtomic() && !isVolatile(); } |
402 | |
403 | bool isUnordered() const { |
404 | return (getOrdering() == AtomicOrdering::NotAtomic || |
405 | getOrdering() == AtomicOrdering::Unordered) && |
406 | !isVolatile(); |
407 | } |
408 | |
409 | Value *getValueOperand() { return getOperand(0); } |
410 | const Value *getValueOperand() const { return getOperand(0); } |
411 | |
412 | Value *getPointerOperand() { return getOperand(1); } |
413 | const Value *getPointerOperand() const { return getOperand(1); } |
414 | static unsigned getPointerOperandIndex() { return 1U; } |
415 | Type *getPointerOperandType() const { return getPointerOperand()->getType(); } |
416 | |
417 | /// Returns the address space of the pointer operand. |
418 | unsigned getPointerAddressSpace() const { |
419 | return getPointerOperandType()->getPointerAddressSpace(); |
420 | } |
421 | |
422 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
423 | static bool classof(const Instruction *I) { |
424 | return I->getOpcode() == Instruction::Store; |
425 | } |
426 | static bool classof(const Value *V) { |
427 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
428 | } |
429 | |
430 | private: |
431 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
432 | // method so that subclasses cannot accidentally use it. |
433 | void setInstructionSubclassData(unsigned short D) { |
434 | Instruction::setInstructionSubclassData(D); |
435 | } |
436 | |
437 | /// The synchronization scope ID of this store instruction. Not quite enough |
438 | /// room in SubClassData for everything, so synchronization scope ID gets its |
439 | /// own field. |
440 | SyncScope::ID SSID; |
441 | }; |
442 | |
443 | template <> |
444 | struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> { |
445 | }; |
446 | |
447 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits <StoreInst>::op_begin(this); } StoreInst::const_op_iterator StoreInst::op_begin() const { return OperandTraits<StoreInst >::op_begin(const_cast<StoreInst*>(this)); } StoreInst ::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst >::op_end(this); } StoreInst::const_op_iterator StoreInst:: op_end() const { return OperandTraits<StoreInst>::op_end (const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand (unsigned i_nocapture) const { ((i_nocapture < OperandTraits <StoreInst>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 447, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<StoreInst>::op_begin(const_cast<StoreInst *>(this))[i_nocapture].get()); } void StoreInst::setOperand (unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture < OperandTraits<StoreInst>::operands(this) && "setOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 447, __PRETTY_FUNCTION__)); OperandTraits<StoreInst>:: op_begin(this)[i_nocapture] = Val_nocapture; } unsigned StoreInst ::getNumOperands() const { return OperandTraits<StoreInst> ::operands(this); } template <int Idx_nocapture> Use & StoreInst::Op() { return this->OpFrom<Idx_nocapture> (this); } template <int Idx_nocapture> const Use &StoreInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } |
448 | |
449 | //===----------------------------------------------------------------------===// |
450 | // FenceInst Class |
451 | //===----------------------------------------------------------------------===// |
452 | |
453 | /// An instruction for ordering other memory operations. |
454 | class FenceInst : public Instruction { |
455 | void Init(AtomicOrdering Ordering, SyncScope::ID SSID); |
456 | |
457 | protected: |
458 | // Note: Instruction needs to be a friend here to call cloneImpl. |
459 | friend class Instruction; |
460 | |
461 | FenceInst *cloneImpl() const; |
462 | |
463 | public: |
464 | // Ordering may only be Acquire, Release, AcquireRelease, or |
465 | // SequentiallyConsistent. |
466 | FenceInst(LLVMContext &C, AtomicOrdering Ordering, |
467 | SyncScope::ID SSID = SyncScope::System, |
468 | Instruction *InsertBefore = nullptr); |
469 | FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID, |
470 | BasicBlock *InsertAtEnd); |
471 | |
472 | // allocate space for exactly zero operands |
473 | void *operator new(size_t s) { |
474 | return User::operator new(s, 0); |
475 | } |
476 | |
477 | /// Returns the ordering constraint of this fence instruction. |
478 | AtomicOrdering getOrdering() const { |
479 | return AtomicOrdering(getSubclassDataFromInstruction() >> 1); |
480 | } |
481 | |
482 | /// Sets the ordering constraint of this fence instruction. May only be |
483 | /// Acquire, Release, AcquireRelease, or SequentiallyConsistent. |
484 | void setOrdering(AtomicOrdering Ordering) { |
485 | setInstructionSubclassData((getSubclassDataFromInstruction() & 1) | |
486 | ((unsigned)Ordering << 1)); |
487 | } |
488 | |
489 | /// Returns the synchronization scope ID of this fence instruction. |
490 | SyncScope::ID getSyncScopeID() const { |
491 | return SSID; |
492 | } |
493 | |
494 | /// Sets the synchronization scope ID of this fence instruction. |
495 | void setSyncScopeID(SyncScope::ID SSID) { |
496 | this->SSID = SSID; |
497 | } |
498 | |
499 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
500 | static bool classof(const Instruction *I) { |
501 | return I->getOpcode() == Instruction::Fence; |
502 | } |
503 | static bool classof(const Value *V) { |
504 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
505 | } |
506 | |
507 | private: |
508 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
509 | // method so that subclasses cannot accidentally use it. |
510 | void setInstructionSubclassData(unsigned short D) { |
511 | Instruction::setInstructionSubclassData(D); |
512 | } |
513 | |
514 | /// The synchronization scope ID of this fence instruction. Not quite enough |
515 | /// room in SubClassData for everything, so synchronization scope ID gets its |
516 | /// own field. |
517 | SyncScope::ID SSID; |
518 | }; |
519 | |
520 | //===----------------------------------------------------------------------===// |
521 | // AtomicCmpXchgInst Class |
522 | //===----------------------------------------------------------------------===// |
523 | |
524 | /// an instruction that atomically checks whether a |
525 | /// specified value is in a memory location, and, if it is, stores a new value |
526 | /// there. Returns the value that was loaded. |
527 | /// |
528 | class AtomicCmpXchgInst : public Instruction { |
529 | void Init(Value *Ptr, Value *Cmp, Value *NewVal, |
530 | AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, |
531 | SyncScope::ID SSID); |
532 | |
533 | protected: |
534 | // Note: Instruction needs to be a friend here to call cloneImpl. |
535 | friend class Instruction; |
536 | |
537 | AtomicCmpXchgInst *cloneImpl() const; |
538 | |
539 | public: |
540 | AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, |
541 | AtomicOrdering SuccessOrdering, |
542 | AtomicOrdering FailureOrdering, |
543 | SyncScope::ID SSID, Instruction *InsertBefore = nullptr); |
544 | AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, |
545 | AtomicOrdering SuccessOrdering, |
546 | AtomicOrdering FailureOrdering, |
547 | SyncScope::ID SSID, BasicBlock *InsertAtEnd); |
548 | |
549 | // allocate space for exactly three operands |
550 | void *operator new(size_t s) { |
551 | return User::operator new(s, 3); |
552 | } |
553 | |
554 | /// Return true if this is a cmpxchg from a volatile memory |
555 | /// location. |
556 | /// |
557 | bool isVolatile() const { |
558 | return getSubclassDataFromInstruction() & 1; |
559 | } |
560 | |
561 | /// Specify whether this is a volatile cmpxchg. |
562 | /// |
563 | void setVolatile(bool V) { |
564 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | |
565 | (unsigned)V); |
566 | } |
567 | |
568 | /// Return true if this cmpxchg may spuriously fail. |
569 | bool isWeak() const { |
570 | return getSubclassDataFromInstruction() & 0x100; |
571 | } |
572 | |
573 | void setWeak(bool IsWeak) { |
574 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x100) | |
575 | (IsWeak << 8)); |
576 | } |
577 | |
578 | /// Transparently provide more efficient getOperand methods. |
579 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
580 | |
581 | /// Returns the success ordering constraint of this cmpxchg instruction. |
582 | AtomicOrdering getSuccessOrdering() const { |
583 | return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7); |
584 | } |
585 | |
586 | /// Sets the success ordering constraint of this cmpxchg instruction. |
587 | void setSuccessOrdering(AtomicOrdering Ordering) { |
588 | assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic." ) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 589, __PRETTY_FUNCTION__)) |
589 | "CmpXchg instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic." ) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 589, __PRETTY_FUNCTION__)); |
590 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x1c) | |
591 | ((unsigned)Ordering << 2)); |
592 | } |
593 | |
594 | /// Returns the failure ordering constraint of this cmpxchg instruction. |
595 | AtomicOrdering getFailureOrdering() const { |
596 | return AtomicOrdering((getSubclassDataFromInstruction() >> 5) & 7); |
597 | } |
598 | |
599 | /// Sets the failure ordering constraint of this cmpxchg instruction. |
600 | void setFailureOrdering(AtomicOrdering Ordering) { |
601 | assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic." ) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 602, __PRETTY_FUNCTION__)) |
602 | "CmpXchg instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic." ) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 602, __PRETTY_FUNCTION__)); |
603 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~0xe0) | |
604 | ((unsigned)Ordering << 5)); |
605 | } |
606 | |
607 | /// Returns the synchronization scope ID of this cmpxchg instruction. |
608 | SyncScope::ID getSyncScopeID() const { |
609 | return SSID; |
610 | } |
611 | |
612 | /// Sets the synchronization scope ID of this cmpxchg instruction. |
613 | void setSyncScopeID(SyncScope::ID SSID) { |
614 | this->SSID = SSID; |
615 | } |
616 | |
617 | Value *getPointerOperand() { return getOperand(0); } |
618 | const Value *getPointerOperand() const { return getOperand(0); } |
619 | static unsigned getPointerOperandIndex() { return 0U; } |
620 | |
621 | Value *getCompareOperand() { return getOperand(1); } |
622 | const Value *getCompareOperand() const { return getOperand(1); } |
623 | |
624 | Value *getNewValOperand() { return getOperand(2); } |
625 | const Value *getNewValOperand() const { return getOperand(2); } |
626 | |
627 | /// Returns the address space of the pointer operand. |
628 | unsigned getPointerAddressSpace() const { |
629 | return getPointerOperand()->getType()->getPointerAddressSpace(); |
630 | } |
631 | |
632 | /// Returns the strongest permitted ordering on failure, given the |
633 | /// desired ordering on success. |
634 | /// |
635 | /// If the comparison in a cmpxchg operation fails, there is no atomic store |
636 | /// so release semantics cannot be provided. So this function drops explicit |
637 | /// Release requests from the AtomicOrdering. A SequentiallyConsistent |
638 | /// operation would remain SequentiallyConsistent. |
639 | static AtomicOrdering |
640 | getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) { |
641 | switch (SuccessOrdering) { |
642 | default: |
643 | llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 643); |
644 | case AtomicOrdering::Release: |
645 | case AtomicOrdering::Monotonic: |
646 | return AtomicOrdering::Monotonic; |
647 | case AtomicOrdering::AcquireRelease: |
648 | case AtomicOrdering::Acquire: |
649 | return AtomicOrdering::Acquire; |
650 | case AtomicOrdering::SequentiallyConsistent: |
651 | return AtomicOrdering::SequentiallyConsistent; |
652 | } |
653 | } |
654 | |
655 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
656 | static bool classof(const Instruction *I) { |
657 | return I->getOpcode() == Instruction::AtomicCmpXchg; |
658 | } |
659 | static bool classof(const Value *V) { |
660 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
661 | } |
662 | |
663 | private: |
664 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
665 | // method so that subclasses cannot accidentally use it. |
666 | void setInstructionSubclassData(unsigned short D) { |
667 | Instruction::setInstructionSubclassData(D); |
668 | } |
669 | |
670 | /// The synchronization scope ID of this cmpxchg instruction. Not quite |
671 | /// enough room in SubClassData for everything, so synchronization scope ID |
672 | /// gets its own field. |
673 | SyncScope::ID SSID; |
674 | }; |
675 | |
676 | template <> |
677 | struct OperandTraits<AtomicCmpXchgInst> : |
678 | public FixedNumOperandTraits<AtomicCmpXchgInst, 3> { |
679 | }; |
680 | |
681 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() { return OperandTraits<AtomicCmpXchgInst>::op_begin(this ); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst:: op_begin() const { return OperandTraits<AtomicCmpXchgInst> ::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst ::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits <AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst:: const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits <AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst *>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<AtomicCmpXchgInst >::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 681, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<AtomicCmpXchgInst>::op_begin(const_cast <AtomicCmpXchgInst*>(this))[i_nocapture].get()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture, Value *Val_nocapture ) { ((i_nocapture < OperandTraits<AtomicCmpXchgInst> ::operands(this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 681, __PRETTY_FUNCTION__)); OperandTraits<AtomicCmpXchgInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned AtomicCmpXchgInst::getNumOperands() const { return OperandTraits <AtomicCmpXchgInst>::operands(this); } template <int Idx_nocapture> Use &AtomicCmpXchgInst::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &AtomicCmpXchgInst::Op() const { return this ->OpFrom<Idx_nocapture>(this); } |
682 | |
683 | //===----------------------------------------------------------------------===// |
684 | // AtomicRMWInst Class |
685 | //===----------------------------------------------------------------------===// |
686 | |
687 | /// an instruction that atomically reads a memory location, |
688 | /// combines it with another value, and then stores the result back. Returns |
689 | /// the old value. |
690 | /// |
691 | class AtomicRMWInst : public Instruction { |
692 | protected: |
693 | // Note: Instruction needs to be a friend here to call cloneImpl. |
694 | friend class Instruction; |
695 | |
696 | AtomicRMWInst *cloneImpl() const; |
697 | |
698 | public: |
699 | /// This enumeration lists the possible modifications atomicrmw can make. In |
700 | /// the descriptions, 'p' is the pointer to the instruction's memory location, |
701 | /// 'old' is the initial value of *p, and 'v' is the other value passed to the |
702 | /// instruction. These instructions always return 'old'. |
703 | enum BinOp { |
704 | /// *p = v |
705 | Xchg, |
706 | /// *p = old + v |
707 | Add, |
708 | /// *p = old - v |
709 | Sub, |
710 | /// *p = old & v |
711 | And, |
712 | /// *p = ~(old & v) |
713 | Nand, |
714 | /// *p = old | v |
715 | Or, |
716 | /// *p = old ^ v |
717 | Xor, |
718 | /// *p = old >signed v ? old : v |
719 | Max, |
720 | /// *p = old <signed v ? old : v |
721 | Min, |
722 | /// *p = old >unsigned v ? old : v |
723 | UMax, |
724 | /// *p = old <unsigned v ? old : v |
725 | UMin, |
726 | |
727 | /// *p = old + v |
728 | FAdd, |
729 | |
730 | /// *p = old - v |
731 | FSub, |
732 | |
733 | FIRST_BINOP = Xchg, |
734 | LAST_BINOP = FSub, |
735 | BAD_BINOP |
736 | }; |
737 | |
738 | AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, |
739 | AtomicOrdering Ordering, SyncScope::ID SSID, |
740 | Instruction *InsertBefore = nullptr); |
741 | AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, |
742 | AtomicOrdering Ordering, SyncScope::ID SSID, |
743 | BasicBlock *InsertAtEnd); |
744 | |
745 | // allocate space for exactly two operands |
746 | void *operator new(size_t s) { |
747 | return User::operator new(s, 2); |
748 | } |
749 | |
750 | BinOp getOperation() const { |
751 | return static_cast<BinOp>(getSubclassDataFromInstruction() >> 5); |
752 | } |
753 | |
754 | static StringRef getOperationName(BinOp Op); |
755 | |
756 | static bool isFPOperation(BinOp Op) { |
757 | switch (Op) { |
758 | case AtomicRMWInst::FAdd: |
759 | case AtomicRMWInst::FSub: |
760 | return true; |
761 | default: |
762 | return false; |
763 | } |
764 | } |
765 | |
766 | void setOperation(BinOp Operation) { |
767 | unsigned short SubclassData = getSubclassDataFromInstruction(); |
768 | setInstructionSubclassData((SubclassData & 31) | |
769 | (Operation << 5)); |
770 | } |
771 | |
772 | /// Return true if this is a RMW on a volatile memory location. |
773 | /// |
774 | bool isVolatile() const { |
775 | return getSubclassDataFromInstruction() & 1; |
776 | } |
777 | |
778 | /// Specify whether this is a volatile RMW or not. |
779 | /// |
780 | void setVolatile(bool V) { |
781 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | |
782 | (unsigned)V); |
783 | } |
784 | |
785 | /// Transparently provide more efficient getOperand methods. |
786 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
787 | |
788 | /// Returns the ordering constraint of this rmw instruction. |
789 | AtomicOrdering getOrdering() const { |
790 | return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7); |
791 | } |
792 | |
793 | /// Sets the ordering constraint of this rmw instruction. |
794 | void setOrdering(AtomicOrdering Ordering) { |
795 | assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic." ) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 796, __PRETTY_FUNCTION__)) |
796 | "atomicrmw instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic." ) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 796, __PRETTY_FUNCTION__)); |
797 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 2)) | |
798 | ((unsigned)Ordering << 2)); |
799 | } |
800 | |
801 | /// Returns the synchronization scope ID of this rmw instruction. |
802 | SyncScope::ID getSyncScopeID() const { |
803 | return SSID; |
804 | } |
805 | |
806 | /// Sets the synchronization scope ID of this rmw instruction. |
807 | void setSyncScopeID(SyncScope::ID SSID) { |
808 | this->SSID = SSID; |
809 | } |
810 | |
811 | Value *getPointerOperand() { return getOperand(0); } |
812 | const Value *getPointerOperand() const { return getOperand(0); } |
813 | static unsigned getPointerOperandIndex() { return 0U; } |
814 | |
815 | Value *getValOperand() { return getOperand(1); } |
816 | const Value *getValOperand() const { return getOperand(1); } |
817 | |
818 | /// Returns the address space of the pointer operand. |
819 | unsigned getPointerAddressSpace() const { |
820 | return getPointerOperand()->getType()->getPointerAddressSpace(); |
821 | } |
822 | |
823 | bool isFloatingPointOperation() const { |
824 | return isFPOperation(getOperation()); |
825 | } |
826 | |
827 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
828 | static bool classof(const Instruction *I) { |
829 | return I->getOpcode() == Instruction::AtomicRMW; |
830 | } |
831 | static bool classof(const Value *V) { |
832 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
833 | } |
834 | |
835 | private: |
836 | void Init(BinOp Operation, Value *Ptr, Value *Val, |
837 | AtomicOrdering Ordering, SyncScope::ID SSID); |
838 | |
839 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
840 | // method so that subclasses cannot accidentally use it. |
841 | void setInstructionSubclassData(unsigned short D) { |
842 | Instruction::setInstructionSubclassData(D); |
843 | } |
844 | |
845 | /// The synchronization scope ID of this rmw instruction. Not quite enough |
846 | /// room in SubClassData for everything, so synchronization scope ID gets its |
847 | /// own field. |
848 | SyncScope::ID SSID; |
849 | }; |
850 | |
851 | template <> |
852 | struct OperandTraits<AtomicRMWInst> |
853 | : public FixedNumOperandTraits<AtomicRMWInst,2> { |
854 | }; |
855 | |
856 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst ::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits <AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*> (this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end() { return OperandTraits<AtomicRMWInst>::op_end(this); } AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const { return OperandTraits<AtomicRMWInst>::op_end(const_cast <AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand (unsigned i_nocapture) const { ((i_nocapture < OperandTraits <AtomicRMWInst>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 856, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<AtomicRMWInst>::op_begin(const_cast< AtomicRMWInst*>(this))[i_nocapture].get()); } void AtomicRMWInst ::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (( i_nocapture < OperandTraits<AtomicRMWInst>::operands (this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 856, __PRETTY_FUNCTION__)); OperandTraits<AtomicRMWInst> ::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned AtomicRMWInst ::getNumOperands() const { return OperandTraits<AtomicRMWInst >::operands(this); } template <int Idx_nocapture> Use &AtomicRMWInst::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & AtomicRMWInst::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
857 | |
858 | //===----------------------------------------------------------------------===// |
859 | // GetElementPtrInst Class |
860 | //===----------------------------------------------------------------------===// |
861 | |
862 | // checkGEPType - Simple wrapper function to give a better assertion failure |
863 | // message on bad indexes for a gep instruction. |
864 | // |
865 | inline Type *checkGEPType(Type *Ty) { |
866 | assert(Ty && "Invalid GetElementPtrInst indices for type!")((Ty && "Invalid GetElementPtrInst indices for type!" ) ? static_cast<void> (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 866, __PRETTY_FUNCTION__)); |
867 | return Ty; |
868 | } |
869 | |
870 | /// an instruction for type-safe pointer arithmetic to |
871 | /// access elements of arrays and structs |
872 | /// |
873 | class GetElementPtrInst : public Instruction { |
874 | Type *SourceElementType; |
875 | Type *ResultElementType; |
876 | |
877 | GetElementPtrInst(const GetElementPtrInst &GEPI); |
878 | |
879 | /// Constructors - Create a getelementptr instruction with a base pointer an |
880 | /// list of indices. The first ctor can optionally insert before an existing |
881 | /// instruction, the second appends the new instruction to the specified |
882 | /// BasicBlock. |
883 | inline GetElementPtrInst(Type *PointeeType, Value *Ptr, |
884 | ArrayRef<Value *> IdxList, unsigned Values, |
885 | const Twine &NameStr, Instruction *InsertBefore); |
886 | inline GetElementPtrInst(Type *PointeeType, Value *Ptr, |
887 | ArrayRef<Value *> IdxList, unsigned Values, |
888 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
889 | |
890 | void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr); |
891 | |
892 | protected: |
893 | // Note: Instruction needs to be a friend here to call cloneImpl. |
894 | friend class Instruction; |
895 | |
896 | GetElementPtrInst *cloneImpl() const; |
897 | |
898 | public: |
899 | static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, |
900 | ArrayRef<Value *> IdxList, |
901 | const Twine &NameStr = "", |
902 | Instruction *InsertBefore = nullptr) { |
903 | unsigned Values = 1 + unsigned(IdxList.size()); |
904 | if (!PointeeType) |
905 | PointeeType = |
906 | cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(); |
907 | else |
908 | assert(((PointeeType == cast<PointerType>(Ptr->getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 910, __PRETTY_FUNCTION__)) |
909 | PointeeType ==((PointeeType == cast<PointerType>(Ptr->getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 910, __PRETTY_FUNCTION__)) |
910 | cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())((PointeeType == cast<PointerType>(Ptr->getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 910, __PRETTY_FUNCTION__)); |
911 | return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, |
912 | NameStr, InsertBefore); |
913 | } |
914 | |
915 | static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, |
916 | ArrayRef<Value *> IdxList, |
917 | const Twine &NameStr, |
918 | BasicBlock *InsertAtEnd) { |
919 | unsigned Values = 1 + unsigned(IdxList.size()); |
920 | if (!PointeeType) |
921 | PointeeType = |
922 | cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(); |
923 | else |
924 | assert(((PointeeType == cast<PointerType>(Ptr->getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 926, __PRETTY_FUNCTION__)) |
925 | PointeeType ==((PointeeType == cast<PointerType>(Ptr->getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 926, __PRETTY_FUNCTION__)) |
926 | cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())((PointeeType == cast<PointerType>(Ptr->getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 926, __PRETTY_FUNCTION__)); |
927 | return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, |
928 | NameStr, InsertAtEnd); |
929 | } |
930 | |
931 | /// Create an "inbounds" getelementptr. See the documentation for the |
932 | /// "inbounds" flag in LangRef.html for details. |
933 | static GetElementPtrInst *CreateInBounds(Value *Ptr, |
934 | ArrayRef<Value *> IdxList, |
935 | const Twine &NameStr = "", |
936 | Instruction *InsertBefore = nullptr){ |
937 | return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertBefore); |
938 | } |
939 | |
940 | static GetElementPtrInst * |
941 | CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList, |
942 | const Twine &NameStr = "", |
943 | Instruction *InsertBefore = nullptr) { |
944 | GetElementPtrInst *GEP = |
945 | Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore); |
946 | GEP->setIsInBounds(true); |
947 | return GEP; |
948 | } |
949 | |
950 | static GetElementPtrInst *CreateInBounds(Value *Ptr, |
951 | ArrayRef<Value *> IdxList, |
952 | const Twine &NameStr, |
953 | BasicBlock *InsertAtEnd) { |
954 | return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertAtEnd); |
955 | } |
956 | |
957 | static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr, |
958 | ArrayRef<Value *> IdxList, |
959 | const Twine &NameStr, |
960 | BasicBlock *InsertAtEnd) { |
961 | GetElementPtrInst *GEP = |
962 | Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd); |
963 | GEP->setIsInBounds(true); |
964 | return GEP; |
965 | } |
966 | |
967 | /// Transparently provide more efficient getOperand methods. |
968 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
969 | |
970 | Type *getSourceElementType() const { return SourceElementType; } |
971 | |
972 | void setSourceElementType(Type *Ty) { SourceElementType = Ty; } |
973 | void setResultElementType(Type *Ty) { ResultElementType = Ty; } |
974 | |
975 | Type *getResultElementType() const { |
976 | assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 977, __PRETTY_FUNCTION__)) |
977 | cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 977, __PRETTY_FUNCTION__)); |
978 | return ResultElementType; |
979 | } |
980 | |
981 | /// Returns the address space of this instruction's pointer type. |
982 | unsigned getAddressSpace() const { |
983 | // Note that this is always the same as the pointer operand's address space |
984 | // and that is cheaper to compute, so cheat here. |
985 | return getPointerAddressSpace(); |
986 | } |
987 | |
988 | /// Returns the type of the element that would be loaded with |
989 | /// a load instruction with the specified parameters. |
990 | /// |
991 | /// Null is returned if the indices are invalid for the specified |
992 | /// pointer type. |
993 | /// |
994 | static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList); |
995 | static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList); |
996 | static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList); |
997 | |
998 | inline op_iterator idx_begin() { return op_begin()+1; } |
999 | inline const_op_iterator idx_begin() const { return op_begin()+1; } |
1000 | inline op_iterator idx_end() { return op_end(); } |
1001 | inline const_op_iterator idx_end() const { return op_end(); } |
1002 | |
1003 | inline iterator_range<op_iterator> indices() { |
1004 | return make_range(idx_begin(), idx_end()); |
1005 | } |
1006 | |
1007 | inline iterator_range<const_op_iterator> indices() const { |
1008 | return make_range(idx_begin(), idx_end()); |
1009 | } |
1010 | |
1011 | Value *getPointerOperand() { |
1012 | return getOperand(0); |
1013 | } |
1014 | const Value *getPointerOperand() const { |
1015 | return getOperand(0); |
1016 | } |
1017 | static unsigned getPointerOperandIndex() { |
1018 | return 0U; // get index for modifying correct operand. |
1019 | } |
1020 | |
1021 | /// Method to return the pointer operand as a |
1022 | /// PointerType. |
1023 | Type *getPointerOperandType() const { |
1024 | return getPointerOperand()->getType(); |
1025 | } |
1026 | |
1027 | /// Returns the address space of the pointer operand. |
1028 | unsigned getPointerAddressSpace() const { |
1029 | return getPointerOperandType()->getPointerAddressSpace(); |
1030 | } |
1031 | |
1032 | /// Returns the pointer type returned by the GEP |
1033 | /// instruction, which may be a vector of pointers. |
1034 | static Type *getGEPReturnType(Value *Ptr, ArrayRef<Value *> IdxList) { |
1035 | return getGEPReturnType( |
1036 | cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(), |
1037 | Ptr, IdxList); |
1038 | } |
1039 | static Type *getGEPReturnType(Type *ElTy, Value *Ptr, |
1040 | ArrayRef<Value *> IdxList) { |
1041 | Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)), |
1042 | Ptr->getType()->getPointerAddressSpace()); |
1043 | // Vector GEP |
1044 | if (Ptr->getType()->isVectorTy()) { |
1045 | unsigned NumElem = Ptr->getType()->getVectorNumElements(); |
1046 | return VectorType::get(PtrTy, NumElem); |
1047 | } |
1048 | for (Value *Index : IdxList) |
1049 | if (Index->getType()->isVectorTy()) { |
1050 | unsigned NumElem = Index->getType()->getVectorNumElements(); |
1051 | return VectorType::get(PtrTy, NumElem); |
1052 | } |
1053 | // Scalar GEP |
1054 | return PtrTy; |
1055 | } |
1056 | |
1057 | unsigned getNumIndices() const { // Note: always non-negative |
1058 | return getNumOperands() - 1; |
1059 | } |
1060 | |
1061 | bool hasIndices() const { |
1062 | return getNumOperands() > 1; |
1063 | } |
1064 | |
1065 | /// Return true if all of the indices of this GEP are |
1066 | /// zeros. If so, the result pointer and the first operand have the same |
1067 | /// value, just potentially different types. |
1068 | bool hasAllZeroIndices() const; |
1069 | |
1070 | /// Return true if all of the indices of this GEP are |
1071 | /// constant integers. If so, the result pointer and the first operand have |
1072 | /// a constant offset between them. |
1073 | bool hasAllConstantIndices() const; |
1074 | |
1075 | /// Set or clear the inbounds flag on this GEP instruction. |
1076 | /// See LangRef.html for the meaning of inbounds on a getelementptr. |
1077 | void setIsInBounds(bool b = true); |
1078 | |
1079 | /// Determine whether the GEP has the inbounds flag. |
1080 | bool isInBounds() const; |
1081 | |
1082 | /// Accumulate the constant address offset of this GEP if possible. |
1083 | /// |
1084 | /// This routine accepts an APInt into which it will accumulate the constant |
1085 | /// offset of this GEP if the GEP is in fact constant. If the GEP is not |
1086 | /// all-constant, it returns false and the value of the offset APInt is |
1087 | /// undefined (it is *not* preserved!). The APInt passed into this routine |
1088 | /// must be at least as wide as the IntPtr type for the address space of |
1089 | /// the base GEP pointer. |
1090 | bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const; |
1091 | |
1092 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1093 | static bool classof(const Instruction *I) { |
1094 | return (I->getOpcode() == Instruction::GetElementPtr); |
1095 | } |
1096 | static bool classof(const Value *V) { |
1097 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1098 | } |
1099 | }; |
1100 | |
1101 | template <> |
1102 | struct OperandTraits<GetElementPtrInst> : |
1103 | public VariadicOperandTraits<GetElementPtrInst, 1> { |
1104 | }; |
1105 | |
1106 | GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, |
1107 | ArrayRef<Value *> IdxList, unsigned Values, |
1108 | const Twine &NameStr, |
1109 | Instruction *InsertBefore) |
1110 | : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, |
1111 | OperandTraits<GetElementPtrInst>::op_end(this) - Values, |
1112 | Values, InsertBefore), |
1113 | SourceElementType(PointeeType), |
1114 | ResultElementType(getIndexedType(PointeeType, IdxList)) { |
1115 | assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 1116, __PRETTY_FUNCTION__)) |
1116 | cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 1116, __PRETTY_FUNCTION__)); |
1117 | init(Ptr, IdxList, NameStr); |
1118 | } |
1119 | |
1120 | GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, |
1121 | ArrayRef<Value *> IdxList, unsigned Values, |
1122 | const Twine &NameStr, |
1123 | BasicBlock *InsertAtEnd) |
1124 | : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, |
1125 | OperandTraits<GetElementPtrInst>::op_end(this) - Values, |
1126 | Values, InsertAtEnd), |
1127 | SourceElementType(PointeeType), |
1128 | ResultElementType(getIndexedType(PointeeType, IdxList)) { |
1129 | assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 1130, __PRETTY_FUNCTION__)) |
1130 | cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 1130, __PRETTY_FUNCTION__)); |
1131 | init(Ptr, IdxList, NameStr); |
1132 | } |
1133 | |
1134 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() { return OperandTraits<GetElementPtrInst>::op_begin(this ); } GetElementPtrInst::const_op_iterator GetElementPtrInst:: op_begin() const { return OperandTraits<GetElementPtrInst> ::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst ::op_iterator GetElementPtrInst::op_end() { return OperandTraits <GetElementPtrInst>::op_end(this); } GetElementPtrInst:: const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits <GetElementPtrInst>::op_end(const_cast<GetElementPtrInst *>(this)); } Value *GetElementPtrInst::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<GetElementPtrInst >::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 1134, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<GetElementPtrInst>::op_begin(const_cast <GetElementPtrInst*>(this))[i_nocapture].get()); } void GetElementPtrInst::setOperand(unsigned i_nocapture, Value *Val_nocapture ) { ((i_nocapture < OperandTraits<GetElementPtrInst> ::operands(this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 1134, __PRETTY_FUNCTION__)); OperandTraits<GetElementPtrInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned GetElementPtrInst::getNumOperands() const { return OperandTraits <GetElementPtrInst>::operands(this); } template <int Idx_nocapture> Use &GetElementPtrInst::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &GetElementPtrInst::Op() const { return this ->OpFrom<Idx_nocapture>(this); } |
1135 | |
1136 | //===----------------------------------------------------------------------===// |
1137 | // ICmpInst Class |
1138 | //===----------------------------------------------------------------------===// |
1139 | |
1140 | /// This instruction compares its operands according to the predicate given |
1141 | /// to the constructor. It only operates on integers or pointers. The operands |
1142 | /// must be identical types. |
1143 | /// Represent an integer comparison operator. |
1144 | class ICmpInst: public CmpInst { |
1145 | void AssertOK() { |
1146 | assert(isIntPredicate() &&((isIntPredicate() && "Invalid ICmp predicate value") ? static_cast<void> (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 1147, __PRETTY_FUNCTION__)) |
1147 | "Invalid ICmp predicate value")((isIntPredicate() && "Invalid ICmp predicate value") ? static_cast<void> (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 1147, __PRETTY_FUNCTION__)); |
1148 | assert(getOperand(0)->getType() == getOperand(1)->getType() &&((getOperand(0)->getType() == getOperand(1)->getType() && "Both operands to ICmp instruction are not of the same type!" ) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 1149, __PRETTY_FUNCTION__)) |
1149 | "Both operands to ICmp instruction are not of the same type!")((getOperand(0)->getType() == getOperand(1)->getType() && "Both operands to ICmp instruction are not of the same type!" ) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 1149, __PRETTY_FUNCTION__)); |
1150 | // Check that the operands are the right type |
1151 | assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand (0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction" ) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 1153, __PRETTY_FUNCTION__)) |
1152 | getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand (0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction" ) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 1153, __PRETTY_FUNCTION__)) |
1153 | "Invalid operand types for ICmp instruction")(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand (0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction" ) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 1153, __PRETTY_FUNCTION__)); |
1154 | } |
1155 | |
1156 | protected: |
1157 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1158 | friend class Instruction; |
1159 | |
1160 | /// Clone an identical ICmpInst |
1161 | ICmpInst *cloneImpl() const; |
1162 | |
1163 | public: |
1164 | /// Constructor with insert-before-instruction semantics. |
1165 | ICmpInst( |
1166 | Instruction *InsertBefore, ///< Where to insert |
1167 | Predicate pred, ///< The predicate to use for the comparison |
1168 | Value *LHS, ///< The left-hand-side of the expression |
1169 | Value *RHS, ///< The right-hand-side of the expression |
1170 | const Twine &NameStr = "" ///< Name of the instruction |
1171 | ) : CmpInst(makeCmpResultType(LHS->getType()), |
1172 | Instruction::ICmp, pred, LHS, RHS, NameStr, |
1173 | InsertBefore) { |
1174 | #ifndef NDEBUG |
1175 | AssertOK(); |
1176 | #endif |
1177 | } |
1178 | |
1179 | /// Constructor with insert-at-end semantics. |
1180 | ICmpInst( |
1181 | BasicBlock &InsertAtEnd, ///< Block to insert into. |
1182 | Predicate pred, ///< The predicate to use for the comparison |
1183 | Value *LHS, ///< The left-hand-side of the expression |
1184 | Value *RHS, ///< The right-hand-side of the expression |
1185 | const Twine &NameStr = "" ///< Name of the instruction |
1186 | ) : CmpInst(makeCmpResultType(LHS->getType()), |
1187 | Instruction::ICmp, pred, LHS, RHS, NameStr, |
1188 | &InsertAtEnd) { |
1189 | #ifndef NDEBUG |
1190 | AssertOK(); |
1191 | #endif |
1192 | } |
1193 | |
1194 | /// Constructor with no-insertion semantics |
1195 | ICmpInst( |
1196 | Predicate pred, ///< The predicate to use for the comparison |
1197 | Value *LHS, ///< The left-hand-side of the expression |
1198 | Value *RHS, ///< The right-hand-side of the expression |
1199 | const Twine &NameStr = "" ///< Name of the instruction |
1200 | ) : CmpInst(makeCmpResultType(LHS->getType()), |
1201 | Instruction::ICmp, pred, LHS, RHS, NameStr) { |
1202 | #ifndef NDEBUG |
1203 | AssertOK(); |
1204 | #endif |
1205 | } |
1206 | |
1207 | /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc. |
1208 | /// @returns the predicate that would be the result if the operand were |
1209 | /// regarded as signed. |
1210 | /// Return the signed version of the predicate |
1211 | Predicate getSignedPredicate() const { |
1212 | return getSignedPredicate(getPredicate()); |
1213 | } |
1214 | |
1215 | /// This is a static version that you can use without an instruction. |
1216 | /// Return the signed version of the predicate. |
1217 | static Predicate getSignedPredicate(Predicate pred); |
1218 | |
1219 | /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc. |
1220 | /// @returns the predicate that would be the result if the operand were |
1221 | /// regarded as unsigned. |
1222 | /// Return the unsigned version of the predicate |
1223 | Predicate getUnsignedPredicate() const { |
1224 | return getUnsignedPredicate(getPredicate()); |
1225 | } |
1226 | |
1227 | /// This is a static version that you can use without an instruction. |
1228 | /// Return the unsigned version of the predicate. |
1229 | static Predicate getUnsignedPredicate(Predicate pred); |
1230 | |
1231 | /// Return true if this predicate is either EQ or NE. This also |
1232 | /// tests for commutativity. |
1233 | static bool isEquality(Predicate P) { |
1234 | return P == ICMP_EQ || P == ICMP_NE; |
1235 | } |
1236 | |
1237 | /// Return true if this predicate is either EQ or NE. This also |
1238 | /// tests for commutativity. |
1239 | bool isEquality() const { |
1240 | return isEquality(getPredicate()); |
1241 | } |
1242 | |
1243 | /// @returns true if the predicate of this ICmpInst is commutative |
1244 | /// Determine if this relation is commutative. |
1245 | bool isCommutative() const { return isEquality(); } |
1246 | |
1247 | /// Return true if the predicate is relational (not EQ or NE). |
1248 | /// |
1249 | bool isRelational() const { |
1250 | return !isEquality(); |
1251 | } |
1252 | |
1253 | /// Return true if the predicate is relational (not EQ or NE). |
1254 | /// |
1255 | static bool isRelational(Predicate P) { |
1256 | return !isEquality(P); |
1257 | } |
1258 | |
1259 | /// Exchange the two operands to this instruction in such a way that it does |
1260 | /// not modify the semantics of the instruction. The predicate value may be |
1261 | /// changed to retain the same result if the predicate is order dependent |
1262 | /// (e.g. ult). |
1263 | /// Swap operands and adjust predicate. |
1264 | void swapOperands() { |
1265 | setPredicate(getSwappedPredicate()); |
1266 | Op<0>().swap(Op<1>()); |
1267 | } |
1268 | |
1269 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1270 | static bool classof(const Instruction *I) { |
1271 | return I->getOpcode() == Instruction::ICmp; |
1272 | } |
1273 | static bool classof(const Value *V) { |
1274 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1275 | } |
1276 | }; |
1277 | |
1278 | //===----------------------------------------------------------------------===// |
1279 | // FCmpInst Class |
1280 | //===----------------------------------------------------------------------===// |
1281 | |
1282 | /// This instruction compares its operands according to the predicate given |
1283 | /// to the constructor. It only operates on floating point values or packed |
1284 | /// vectors of floating point values. The operands must be identical types. |
1285 | /// Represents a floating point comparison operator. |
1286 | class FCmpInst: public CmpInst { |
1287 | void AssertOK() { |
1288 | assert(isFPPredicate() && "Invalid FCmp predicate value")((isFPPredicate() && "Invalid FCmp predicate value") ? static_cast<void> (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 1288, __PRETTY_FUNCTION__)); |
1289 | assert(getOperand(0)->getType() == getOperand(1)->getType() &&((getOperand(0)->getType() == getOperand(1)->getType() && "Both operands to FCmp instruction are not of the same type!" ) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 1290, __PRETTY_FUNCTION__)) |
1290 | "Both operands to FCmp instruction are not of the same type!")((getOperand(0)->getType() == getOperand(1)->getType() && "Both operands to FCmp instruction are not of the same type!" ) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 1290, __PRETTY_FUNCTION__)); |
1291 | // Check that the operands are the right type |
1292 | assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&((getOperand(0)->getType()->isFPOrFPVectorTy() && "Invalid operand types for FCmp instruction") ? static_cast< void> (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 1293, __PRETTY_FUNCTION__)) |
1293 | "Invalid operand types for FCmp instruction")((getOperand(0)->getType()->isFPOrFPVectorTy() && "Invalid operand types for FCmp instruction") ? static_cast< void> (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 1293, __PRETTY_FUNCTION__)); |
1294 | } |
1295 | |
1296 | protected: |
1297 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1298 | friend class Instruction; |
1299 | |
1300 | /// Clone an identical FCmpInst |
1301 | FCmpInst *cloneImpl() const; |
1302 | |
1303 | public: |
1304 | /// Constructor with insert-before-instruction semantics. |
1305 | FCmpInst( |
1306 | Instruction *InsertBefore, ///< Where to insert |
1307 | Predicate pred, ///< The predicate to use for the comparison |
1308 | Value *LHS, ///< The left-hand-side of the expression |
1309 | Value *RHS, ///< The right-hand-side of the expression |
1310 | const Twine &NameStr = "" ///< Name of the instruction |
1311 | ) : CmpInst(makeCmpResultType(LHS->getType()), |
1312 | Instruction::FCmp, pred, LHS, RHS, NameStr, |
1313 | InsertBefore) { |
1314 | AssertOK(); |
1315 | } |
1316 | |
1317 | /// Constructor with insert-at-end semantics. |
1318 | FCmpInst( |
1319 | BasicBlock &InsertAtEnd, ///< Block to insert into. |
1320 | Predicate pred, ///< The predicate to use for the comparison |
1321 | Value *LHS, ///< The left-hand-side of the expression |
1322 | Value *RHS, ///< The right-hand-side of the expression |
1323 | const Twine &NameStr = "" ///< Name of the instruction |
1324 | ) : CmpInst(makeCmpResultType(LHS->getType()), |
1325 | Instruction::FCmp, pred, LHS, RHS, NameStr, |
1326 | &InsertAtEnd) { |
1327 | AssertOK(); |
1328 | } |
1329 | |
1330 | /// Constructor with no-insertion semantics |
1331 | FCmpInst( |
1332 | Predicate Pred, ///< The predicate to use for the comparison |
1333 | Value *LHS, ///< The left-hand-side of the expression |
1334 | Value *RHS, ///< The right-hand-side of the expression |
1335 | const Twine &NameStr = "", ///< Name of the instruction |
1336 | Instruction *FlagsSource = nullptr |
1337 | ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS, |
1338 | RHS, NameStr, nullptr, FlagsSource) { |
1339 | AssertOK(); |
1340 | } |
1341 | |
1342 | /// @returns true if the predicate of this instruction is EQ or NE. |
1343 | /// Determine if this is an equality predicate. |
1344 | static bool isEquality(Predicate Pred) { |
1345 | return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ || |
1346 | Pred == FCMP_UNE; |
1347 | } |
1348 | |
1349 | /// @returns true if the predicate of this instruction is EQ or NE. |
1350 | /// Determine if this is an equality predicate. |
1351 | bool isEquality() const { return isEquality(getPredicate()); } |
1352 | |
1353 | /// @returns true if the predicate of this instruction is commutative. |
1354 | /// Determine if this is a commutative predicate. |
1355 | bool isCommutative() const { |
1356 | return isEquality() || |
1357 | getPredicate() == FCMP_FALSE || |
1358 | getPredicate() == FCMP_TRUE || |
1359 | getPredicate() == FCMP_ORD || |
1360 | getPredicate() == FCMP_UNO; |
1361 | } |
1362 | |
1363 | /// @returns true if the predicate is relational (not EQ or NE). |
1364 | /// Determine if this a relational predicate. |
1365 | bool isRelational() const { return !isEquality(); } |
1366 | |
1367 | /// Exchange the two operands to this instruction in such a way that it does |
1368 | /// not modify the semantics of the instruction. The predicate value may be |
1369 | /// changed to retain the same result if the predicate is order dependent |
1370 | /// (e.g. ult). |
1371 | /// Swap operands and adjust predicate. |
1372 | void swapOperands() { |
1373 | setPredicate(getSwappedPredicate()); |
1374 | Op<0>().swap(Op<1>()); |
1375 | } |
1376 | |
1377 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
1378 | static bool classof(const Instruction *I) { |
1379 | return I->getOpcode() == Instruction::FCmp; |
1380 | } |
1381 | static bool classof(const Value *V) { |
1382 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1383 | } |
1384 | }; |
1385 | |
1386 | //===----------------------------------------------------------------------===// |
1387 | /// This class represents a function call, abstracting a target |
1388 | /// machine's calling convention. This class uses low bit of the SubClassData |
1389 | /// field to indicate whether or not this is a tail call. The rest of the bits |
1390 | /// hold the calling convention of the call. |
1391 | /// |
1392 | class CallInst : public CallBase { |
1393 | CallInst(const CallInst &CI); |
1394 | |
1395 | /// Construct a CallInst given a range of arguments. |
1396 | /// Construct a CallInst from a range of arguments |
1397 | inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1398 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, |
1399 | Instruction *InsertBefore); |
1400 | |
1401 | inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1402 | const Twine &NameStr, Instruction *InsertBefore) |
1403 | : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {} |
1404 | |
1405 | /// Construct a CallInst given a range of arguments. |
1406 | /// Construct a CallInst from a range of arguments |
1407 | inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1408 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, |
1409 | BasicBlock *InsertAtEnd); |
1410 | |
1411 | explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr, |
1412 | Instruction *InsertBefore); |
1413 | |
1414 | CallInst(FunctionType *ty, Value *F, const Twine &NameStr, |
1415 | BasicBlock *InsertAtEnd); |
1416 | |
1417 | void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, |
1418 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); |
1419 | void init(FunctionType *FTy, Value *Func, const Twine &NameStr); |
1420 | |
1421 | /// Compute the number of operands to allocate. |
1422 | static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { |
1423 | // We need one operand for the called function, plus the input operand |
1424 | // counts provided. |
1425 | return 1 + NumArgs + NumBundleInputs; |
1426 | } |
1427 | |
1428 | protected: |
1429 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1430 | friend class Instruction; |
1431 | |
1432 | CallInst *cloneImpl() const; |
1433 | |
1434 | public: |
1435 | static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "", |
1436 | Instruction *InsertBefore = nullptr) { |
1437 | return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore); |
1438 | } |
1439 | |
1440 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1441 | const Twine &NameStr, |
1442 | Instruction *InsertBefore = nullptr) { |
1443 | return new (ComputeNumOperands(Args.size())) |
1444 | CallInst(Ty, Func, Args, None, NameStr, InsertBefore); |
1445 | } |
1446 | |
1447 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1448 | ArrayRef<OperandBundleDef> Bundles = None, |
1449 | const Twine &NameStr = "", |
1450 | Instruction *InsertBefore = nullptr) { |
1451 | const int NumOperands = |
1452 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); |
1453 | const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
1454 | |
1455 | return new (NumOperands, DescriptorBytes) |
1456 | CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore); |
1457 | } |
1458 | |
1459 | static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr, |
1460 | BasicBlock *InsertAtEnd) { |
1461 | return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd); |
1462 | } |
1463 | |
1464 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1465 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
1466 | return new (ComputeNumOperands(Args.size())) |
1467 | CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd); |
1468 | } |
1469 | |
1470 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1471 | ArrayRef<OperandBundleDef> Bundles, |
1472 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
1473 | const int NumOperands = |
1474 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); |
1475 | const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
1476 | |
1477 | return new (NumOperands, DescriptorBytes) |
1478 | CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd); |
1479 | } |
1480 | |
1481 | static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "", |
1482 | Instruction *InsertBefore = nullptr) { |
1483 | return Create(Func.getFunctionType(), Func.getCallee(), NameStr, |
1484 | InsertBefore); |
1485 | } |
1486 | |
1487 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, |
1488 | ArrayRef<OperandBundleDef> Bundles = None, |
1489 | const Twine &NameStr = "", |
1490 | Instruction *InsertBefore = nullptr) { |
1491 | return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, |
1492 | NameStr, InsertBefore); |
1493 | } |
1494 | |
1495 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, |
1496 | const Twine &NameStr, |
1497 | Instruction *InsertBefore = nullptr) { |
1498 | return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, |
1499 | InsertBefore); |
1500 | } |
1501 | |
1502 | static CallInst *Create(FunctionCallee Func, const Twine &NameStr, |
1503 | BasicBlock *InsertAtEnd) { |
1504 | return Create(Func.getFunctionType(), Func.getCallee(), NameStr, |
1505 | InsertAtEnd); |
1506 | } |
1507 | |
1508 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, |
1509 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
1510 | return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, |
1511 | InsertAtEnd); |
1512 | } |
1513 | |
1514 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, |
1515 | ArrayRef<OperandBundleDef> Bundles, |
1516 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
1517 | return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, |
1518 | NameStr, InsertAtEnd); |
1519 | } |
1520 | |
1521 | // Deprecated [opaque pointer types] |
1522 | static CallInst *Create(Value *Func, const Twine &NameStr = "", |
1523 | Instruction *InsertBefore = nullptr) { |
1524 | return Create(cast<FunctionType>( |
1525 | cast<PointerType>(Func->getType())->getElementType()), |
1526 | Func, NameStr, InsertBefore); |
1527 | } |
1528 | |
1529 | // Deprecated [opaque pointer types] |
1530 | static CallInst *Create(Value *Func, ArrayRef<Value *> Args, |
1531 | const Twine &NameStr, |
1532 | Instruction *InsertBefore = nullptr) { |
1533 | return Create(cast<FunctionType>( |
1534 | cast<PointerType>(Func->getType())->getElementType()), |
1535 | Func, Args, NameStr, InsertBefore); |
1536 | } |
1537 | |
1538 | // Deprecated [opaque pointer types] |
1539 | static CallInst *Create(Value *Func, ArrayRef<Value *> Args, |
1540 | ArrayRef<OperandBundleDef> Bundles = None, |
1541 | const Twine &NameStr = "", |
1542 | Instruction *InsertBefore = nullptr) { |
1543 | return Create(cast<FunctionType>( |
1544 | cast<PointerType>(Func->getType())->getElementType()), |
1545 | Func, Args, Bundles, NameStr, InsertBefore); |
1546 | } |
1547 | |
1548 | // Deprecated [opaque pointer types] |
1549 | static CallInst *Create(Value *Func, const Twine &NameStr, |
1550 | BasicBlock *InsertAtEnd) { |
1551 | return Create(cast<FunctionType>( |
1552 | cast<PointerType>(Func->getType())->getElementType()), |
1553 | Func, NameStr, InsertAtEnd); |
1554 | } |
1555 | |
1556 | // Deprecated [opaque pointer types] |
1557 | static CallInst *Create(Value *Func, ArrayRef<Value *> Args, |
1558 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
1559 | return Create(cast<FunctionType>( |
1560 | cast<PointerType>(Func->getType())->getElementType()), |
1561 | Func, Args, NameStr, InsertAtEnd); |
1562 | } |
1563 | |
1564 | // Deprecated [opaque pointer types] |
1565 | static CallInst *Create(Value *Func, ArrayRef<Value *> Args, |
1566 | ArrayRef<OperandBundleDef> Bundles, |
1567 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
1568 | return Create(cast<FunctionType>( |
1569 | cast<PointerType>(Func->getType())->getElementType()), |
1570 | Func, Args, Bundles, NameStr, InsertAtEnd); |
1571 | } |
1572 | |
1573 | /// Create a clone of \p CI with a different set of operand bundles and |
1574 | /// insert it before \p InsertPt. |
1575 | /// |
1576 | /// The returned call instruction is identical \p CI in every way except that |
1577 | /// the operand bundles for the new instruction are set to the operand bundles |
1578 | /// in \p Bundles. |
1579 | static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles, |
1580 | Instruction *InsertPt = nullptr); |
1581 | |
1582 | /// Generate the IR for a call to malloc: |
1583 | /// 1. Compute the malloc call's argument as the specified type's size, |
1584 | /// possibly multiplied by the array size if the array size is not |
1585 | /// constant 1. |
1586 | /// 2. Call malloc with that argument. |
1587 | /// 3. Bitcast the result of the malloc call to the specified type. |
1588 | static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, |
1589 | Type *AllocTy, Value *AllocSize, |
1590 | Value *ArraySize = nullptr, |
1591 | Function *MallocF = nullptr, |
1592 | const Twine &Name = ""); |
1593 | static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, |
1594 | Type *AllocTy, Value *AllocSize, |
1595 | Value *ArraySize = nullptr, |
1596 | Function *MallocF = nullptr, |
1597 | const Twine &Name = ""); |
1598 | static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, |
1599 | Type *AllocTy, Value *AllocSize, |
1600 | Value *ArraySize = nullptr, |
1601 | ArrayRef<OperandBundleDef> Bundles = None, |
1602 | Function *MallocF = nullptr, |
1603 | const Twine &Name = ""); |
1604 | static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, |
1605 | Type *AllocTy, Value *AllocSize, |
1606 | Value *ArraySize = nullptr, |
1607 | ArrayRef<OperandBundleDef> Bundles = None, |
1608 | Function *MallocF = nullptr, |
1609 | const Twine &Name = ""); |
1610 | /// Generate the IR for a call to the builtin free function. |
1611 | static Instruction *CreateFree(Value *Source, Instruction *InsertBefore); |
1612 | static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd); |
1613 | static Instruction *CreateFree(Value *Source, |
1614 | ArrayRef<OperandBundleDef> Bundles, |
1615 | Instruction *InsertBefore); |
1616 | static Instruction *CreateFree(Value *Source, |
1617 | ArrayRef<OperandBundleDef> Bundles, |
1618 | BasicBlock *InsertAtEnd); |
1619 | |
1620 | // Note that 'musttail' implies 'tail'. |
1621 | enum TailCallKind { |
1622 | TCK_None = 0, |
1623 | TCK_Tail = 1, |
1624 | TCK_MustTail = 2, |
1625 | TCK_NoTail = 3 |
1626 | }; |
1627 | TailCallKind getTailCallKind() const { |
1628 | return TailCallKind(getSubclassDataFromInstruction() & 3); |
1629 | } |
1630 | |
1631 | bool isTailCall() const { |
1632 | unsigned Kind = getSubclassDataFromInstruction() & 3; |
1633 | return Kind == TCK_Tail || Kind == TCK_MustTail; |
1634 | } |
1635 | |
1636 | bool isMustTailCall() const { |
1637 | return (getSubclassDataFromInstruction() & 3) == TCK_MustTail; |
1638 | } |
1639 | |
1640 | bool isNoTailCall() const { |
1641 | return (getSubclassDataFromInstruction() & 3) == TCK_NoTail; |
1642 | } |
1643 | |
1644 | void setTailCall(bool isTC = true) { |
1645 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) | |
1646 | unsigned(isTC ? TCK_Tail : TCK_None)); |
1647 | } |
1648 | |
1649 | void setTailCallKind(TailCallKind TCK) { |
1650 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) | |
1651 | unsigned(TCK)); |
1652 | } |
1653 | |
1654 | /// Return true if the call can return twice |
1655 | bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); } |
1656 | void setCanReturnTwice() { |
1657 | addAttribute(AttributeList::FunctionIndex, Attribute::ReturnsTwice); |
1658 | } |
1659 | |
1660 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1661 | static bool classof(const Instruction *I) { |
1662 | return I->getOpcode() == Instruction::Call; |
1663 | } |
1664 | static bool classof(const Value *V) { |
1665 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1666 | } |
1667 | |
1668 | /// Updates profile metadata by scaling it by \p S / \p T. |
1669 | void updateProfWeight(uint64_t S, uint64_t T); |
1670 | |
1671 | private: |
1672 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
1673 | // method so that subclasses cannot accidentally use it. |
1674 | void setInstructionSubclassData(unsigned short D) { |
1675 | Instruction::setInstructionSubclassData(D); |
1676 | } |
1677 | }; |
1678 | |
1679 | CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1680 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, |
1681 | BasicBlock *InsertAtEnd) |
1682 | : CallBase(Ty->getReturnType(), Instruction::Call, |
1683 | OperandTraits<CallBase>::op_end(this) - |
1684 | (Args.size() + CountBundleInputs(Bundles) + 1), |
1685 | unsigned(Args.size() + CountBundleInputs(Bundles) + 1), |
1686 | InsertAtEnd) { |
1687 | init(Ty, Func, Args, Bundles, NameStr); |
1688 | } |
1689 | |
1690 | CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1691 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, |
1692 | Instruction *InsertBefore) |
1693 | : CallBase(Ty->getReturnType(), Instruction::Call, |
1694 | OperandTraits<CallBase>::op_end(this) - |
1695 | (Args.size() + CountBundleInputs(Bundles) + 1), |
1696 | unsigned(Args.size() + CountBundleInputs(Bundles) + 1), |
1697 | InsertBefore) { |
1698 | init(Ty, Func, Args, Bundles, NameStr); |
1699 | } |
1700 | |
1701 | //===----------------------------------------------------------------------===// |
1702 | // SelectInst Class |
1703 | //===----------------------------------------------------------------------===// |
1704 | |
1705 | /// This class represents the LLVM 'select' instruction. |
1706 | /// |
1707 | class SelectInst : public Instruction { |
1708 | SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, |
1709 | Instruction *InsertBefore) |
1710 | : Instruction(S1->getType(), Instruction::Select, |
1711 | &Op<0>(), 3, InsertBefore) { |
1712 | init(C, S1, S2); |
1713 | setName(NameStr); |
1714 | } |
1715 | |
1716 | SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, |
1717 | BasicBlock *InsertAtEnd) |
1718 | : Instruction(S1->getType(), Instruction::Select, |
1719 | &Op<0>(), 3, InsertAtEnd) { |
1720 | init(C, S1, S2); |
1721 | setName(NameStr); |
1722 | } |
1723 | |
1724 | void init(Value *C, Value *S1, Value *S2) { |
1725 | assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")((!areInvalidOperands(C, S1, S2) && "Invalid operands for select" ) ? static_cast<void> (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 1725, __PRETTY_FUNCTION__)); |
1726 | Op<0>() = C; |
1727 | Op<1>() = S1; |
1728 | Op<2>() = S2; |
1729 | } |
1730 | |
1731 | protected: |
1732 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1733 | friend class Instruction; |
1734 | |
1735 | SelectInst *cloneImpl() const; |
1736 | |
1737 | public: |
1738 | static SelectInst *Create(Value *C, Value *S1, Value *S2, |
1739 | const Twine &NameStr = "", |
1740 | Instruction *InsertBefore = nullptr, |
1741 | Instruction *MDFrom = nullptr) { |
1742 | SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore); |
1743 | if (MDFrom) |
1744 | Sel->copyMetadata(*MDFrom); |
1745 | return Sel; |
1746 | } |
1747 | |
1748 | static SelectInst *Create(Value *C, Value *S1, Value *S2, |
1749 | const Twine &NameStr, |
1750 | BasicBlock *InsertAtEnd) { |
1751 | return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd); |
1752 | } |
1753 | |
1754 | const Value *getCondition() const { return Op<0>(); } |
1755 | const Value *getTrueValue() const { return Op<1>(); } |
1756 | const Value *getFalseValue() const { return Op<2>(); } |
1757 | Value *getCondition() { return Op<0>(); } |
1758 | Value *getTrueValue() { return Op<1>(); } |
1759 | Value *getFalseValue() { return Op<2>(); } |
1760 | |
1761 | void setCondition(Value *V) { Op<0>() = V; } |
1762 | void setTrueValue(Value *V) { Op<1>() = V; } |
1763 | void setFalseValue(Value *V) { Op<2>() = V; } |
1764 | |
1765 | /// Return a string if the specified operands are invalid |
1766 | /// for a select operation, otherwise return null. |
1767 | static const char *areInvalidOperands(Value *Cond, Value *True, Value *False); |
1768 | |
1769 | /// Transparently provide more efficient getOperand methods. |
1770 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
1771 | |
1772 | OtherOps getOpcode() const { |
1773 | return static_cast<OtherOps>(Instruction::getOpcode()); |
1774 | } |
1775 | |
1776 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1777 | static bool classof(const Instruction *I) { |
1778 | return I->getOpcode() == Instruction::Select; |
1779 | } |
1780 | static bool classof(const Value *V) { |
1781 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1782 | } |
1783 | }; |
1784 | |
1785 | template <> |
1786 | struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> { |
1787 | }; |
1788 | |
1789 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits <SelectInst>::op_begin(this); } SelectInst::const_op_iterator SelectInst::op_begin() const { return OperandTraits<SelectInst >::op_begin(const_cast<SelectInst*>(this)); } SelectInst ::op_iterator SelectInst::op_end() { return OperandTraits< SelectInst>::op_end(this); } SelectInst::const_op_iterator SelectInst::op_end() const { return OperandTraits<SelectInst >::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<SelectInst>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 1789, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<SelectInst>::op_begin(const_cast<SelectInst *>(this))[i_nocapture].get()); } void SelectInst::setOperand (unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture < OperandTraits<SelectInst>::operands(this) && "setOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 1789, __PRETTY_FUNCTION__)); OperandTraits<SelectInst> ::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned SelectInst ::getNumOperands() const { return OperandTraits<SelectInst >::operands(this); } template <int Idx_nocapture> Use &SelectInst::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & SelectInst::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
1790 | |
1791 | //===----------------------------------------------------------------------===// |
1792 | // VAArgInst Class |
1793 | //===----------------------------------------------------------------------===// |
1794 | |
1795 | /// This class represents the va_arg llvm instruction, which returns |
1796 | /// an argument of the specified type given a va_list and increments that list |
1797 | /// |
1798 | class VAArgInst : public UnaryInstruction { |
1799 | protected: |
1800 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1801 | friend class Instruction; |
1802 | |
1803 | VAArgInst *cloneImpl() const; |
1804 | |
1805 | public: |
1806 | VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "", |
1807 | Instruction *InsertBefore = nullptr) |
1808 | : UnaryInstruction(Ty, VAArg, List, InsertBefore) { |
1809 | setName(NameStr); |
1810 | } |
1811 | |
1812 | VAArgInst(Value *List, Type *Ty, const Twine &NameStr, |
1813 | BasicBlock *InsertAtEnd) |
1814 | : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) { |
1815 | setName(NameStr); |
1816 | } |
1817 | |
1818 | Value *getPointerOperand() { return getOperand(0); } |
1819 | const Value *getPointerOperand() const { return getOperand(0); } |
1820 | static unsigned getPointerOperandIndex() { return 0U; } |
1821 | |
1822 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1823 | static bool classof(const Instruction *I) { |
1824 | return I->getOpcode() == VAArg; |
1825 | } |
1826 | static bool classof(const Value *V) { |
1827 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1828 | } |
1829 | }; |
1830 | |
1831 | //===----------------------------------------------------------------------===// |
1832 | // ExtractElementInst Class |
1833 | //===----------------------------------------------------------------------===// |
1834 | |
1835 | /// This instruction extracts a single (scalar) |
1836 | /// element from a VectorType value |
1837 | /// |
1838 | class ExtractElementInst : public Instruction { |
1839 | ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "", |
1840 | Instruction *InsertBefore = nullptr); |
1841 | ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr, |
1842 | BasicBlock *InsertAtEnd); |
1843 | |
1844 | protected: |
1845 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1846 | friend class Instruction; |
1847 | |
1848 | ExtractElementInst *cloneImpl() const; |
1849 | |
1850 | public: |
1851 | static ExtractElementInst *Create(Value *Vec, Value *Idx, |
1852 | const Twine &NameStr = "", |
1853 | Instruction *InsertBefore = nullptr) { |
1854 | return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore); |
1855 | } |
1856 | |
1857 | static ExtractElementInst *Create(Value *Vec, Value *Idx, |
1858 | const Twine &NameStr, |
1859 | BasicBlock *InsertAtEnd) { |
1860 | return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd); |
1861 | } |
1862 | |
1863 | /// Return true if an extractelement instruction can be |
1864 | /// formed with the specified operands. |
1865 | static bool isValidOperands(const Value *Vec, const Value *Idx); |
1866 | |
1867 | Value *getVectorOperand() { return Op<0>(); } |
1868 | Value *getIndexOperand() { return Op<1>(); } |
1869 | const Value *getVectorOperand() const { return Op<0>(); } |
1870 | const Value *getIndexOperand() const { return Op<1>(); } |
1871 | |
1872 | VectorType *getVectorOperandType() const { |
1873 | return cast<VectorType>(getVectorOperand()->getType()); |
1874 | } |
1875 | |
1876 | /// Transparently provide more efficient getOperand methods. |
1877 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
1878 | |
1879 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1880 | static bool classof(const Instruction *I) { |
1881 | return I->getOpcode() == Instruction::ExtractElement; |
1882 | } |
1883 | static bool classof(const Value *V) { |
1884 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1885 | } |
1886 | }; |
1887 | |
1888 | template <> |
1889 | struct OperandTraits<ExtractElementInst> : |
1890 | public FixedNumOperandTraits<ExtractElementInst, 2> { |
1891 | }; |
1892 | |
1893 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin( ) { return OperandTraits<ExtractElementInst>::op_begin( this); } ExtractElementInst::const_op_iterator ExtractElementInst ::op_begin() const { return OperandTraits<ExtractElementInst >::op_begin(const_cast<ExtractElementInst*>(this)); } ExtractElementInst::op_iterator ExtractElementInst::op_end() { return OperandTraits<ExtractElementInst>::op_end(this ); } ExtractElementInst::const_op_iterator ExtractElementInst ::op_end() const { return OperandTraits<ExtractElementInst >::op_end(const_cast<ExtractElementInst*>(this)); } Value *ExtractElementInst::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<ExtractElementInst>:: operands(this) && "getOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 1893, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<ExtractElementInst>::op_begin(const_cast <ExtractElementInst*>(this))[i_nocapture].get()); } void ExtractElementInst::setOperand(unsigned i_nocapture, Value * Val_nocapture) { ((i_nocapture < OperandTraits<ExtractElementInst >::operands(this) && "setOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 1893, __PRETTY_FUNCTION__)); OperandTraits<ExtractElementInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned ExtractElementInst::getNumOperands() const { return OperandTraits <ExtractElementInst>::operands(this); } template <int Idx_nocapture> Use &ExtractElementInst::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &ExtractElementInst::Op() const { return this->OpFrom<Idx_nocapture>(this); } |
1894 | |
1895 | //===----------------------------------------------------------------------===// |
1896 | // InsertElementInst Class |
1897 | //===----------------------------------------------------------------------===// |
1898 | |
1899 | /// This instruction inserts a single (scalar) |
1900 | /// element into a VectorType value |
1901 | /// |
1902 | class InsertElementInst : public Instruction { |
1903 | InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, |
1904 | const Twine &NameStr = "", |
1905 | Instruction *InsertBefore = nullptr); |
1906 | InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr, |
1907 | BasicBlock *InsertAtEnd); |
1908 | |
1909 | protected: |
1910 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1911 | friend class Instruction; |
1912 | |
1913 | InsertElementInst *cloneImpl() const; |
1914 | |
1915 | public: |
1916 | static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, |
1917 | const Twine &NameStr = "", |
1918 | Instruction *InsertBefore = nullptr) { |
1919 | return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore); |
1920 | } |
1921 | |
1922 | static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, |
1923 | const Twine &NameStr, |
1924 | BasicBlock *InsertAtEnd) { |
1925 | return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd); |
1926 | } |
1927 | |
1928 | /// Return true if an insertelement instruction can be |
1929 | /// formed with the specified operands. |
1930 | static bool isValidOperands(const Value *Vec, const Value *NewElt, |
1931 | const Value *Idx); |
1932 | |
1933 | /// Overload to return most specific vector type. |
1934 | /// |
1935 | VectorType *getType() const { |
1936 | return cast<VectorType>(Instruction::getType()); |
1937 | } |
1938 | |
1939 | /// Transparently provide more efficient getOperand methods. |
1940 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
1941 | |
1942 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1943 | static bool classof(const Instruction *I) { |
1944 | return I->getOpcode() == Instruction::InsertElement; |
1945 | } |
1946 | static bool classof(const Value *V) { |
1947 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1948 | } |
1949 | }; |
1950 | |
1951 | template <> |
1952 | struct OperandTraits<InsertElementInst> : |
1953 | public FixedNumOperandTraits<InsertElementInst, 3> { |
1954 | }; |
1955 | |
1956 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() { return OperandTraits<InsertElementInst>::op_begin(this ); } InsertElementInst::const_op_iterator InsertElementInst:: op_begin() const { return OperandTraits<InsertElementInst> ::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst ::op_iterator InsertElementInst::op_end() { return OperandTraits <InsertElementInst>::op_end(this); } InsertElementInst:: const_op_iterator InsertElementInst::op_end() const { return OperandTraits <InsertElementInst>::op_end(const_cast<InsertElementInst *>(this)); } Value *InsertElementInst::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<InsertElementInst >::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 1956, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<InsertElementInst>::op_begin(const_cast <InsertElementInst*>(this))[i_nocapture].get()); } void InsertElementInst::setOperand(unsigned i_nocapture, Value *Val_nocapture ) { ((i_nocapture < OperandTraits<InsertElementInst> ::operands(this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 1956, __PRETTY_FUNCTION__)); OperandTraits<InsertElementInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned InsertElementInst::getNumOperands() const { return OperandTraits <InsertElementInst>::operands(this); } template <int Idx_nocapture> Use &InsertElementInst::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &InsertElementInst::Op() const { return this ->OpFrom<Idx_nocapture>(this); } |
1957 | |
1958 | //===----------------------------------------------------------------------===// |
1959 | // ShuffleVectorInst Class |
1960 | //===----------------------------------------------------------------------===// |
1961 | |
1962 | /// This instruction constructs a fixed permutation of two |
1963 | /// input vectors. |
1964 | /// |
1965 | class ShuffleVectorInst : public Instruction { |
1966 | protected: |
1967 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1968 | friend class Instruction; |
1969 | |
1970 | ShuffleVectorInst *cloneImpl() const; |
1971 | |
1972 | public: |
1973 | ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, |
1974 | const Twine &NameStr = "", |
1975 | Instruction *InsertBefor = nullptr); |
1976 | ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, |
1977 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
1978 | |
1979 | // allocate space for exactly three operands |
1980 | void *operator new(size_t s) { |
1981 | return User::operator new(s, 3); |
1982 | } |
1983 | |
1984 | /// Swap the first 2 operands and adjust the mask to preserve the semantics |
1985 | /// of the instruction. |
1986 | void commute(); |
1987 | |
1988 | /// Return true if a shufflevector instruction can be |
1989 | /// formed with the specified operands. |
1990 | static bool isValidOperands(const Value *V1, const Value *V2, |
1991 | const Value *Mask); |
1992 | |
1993 | /// Overload to return most specific vector type. |
1994 | /// |
1995 | VectorType *getType() const { |
1996 | return cast<VectorType>(Instruction::getType()); |
1997 | } |
1998 | |
1999 | /// Transparently provide more efficient getOperand methods. |
2000 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
2001 | |
2002 | Constant *getMask() const { |
2003 | return cast<Constant>(getOperand(2)); |
2004 | } |
2005 | |
2006 | /// Return the shuffle mask value for the specified element of the mask. |
2007 | /// Return -1 if the element is undef. |
2008 | static int getMaskValue(const Constant *Mask, unsigned Elt); |
2009 | |
2010 | /// Return the shuffle mask value of this instruction for the given element |
2011 | /// index. Return -1 if the element is undef. |
2012 | int getMaskValue(unsigned Elt) const { |
2013 | return getMaskValue(getMask(), Elt); |
2014 | } |
2015 | |
2016 | /// Convert the input shuffle mask operand to a vector of integers. Undefined |
2017 | /// elements of the mask are returned as -1. |
2018 | static void getShuffleMask(const Constant *Mask, |
2019 | SmallVectorImpl<int> &Result); |
2020 | |
2021 | /// Return the mask for this instruction as a vector of integers. Undefined |
2022 | /// elements of the mask are returned as -1. |
2023 | void getShuffleMask(SmallVectorImpl<int> &Result) const { |
2024 | return getShuffleMask(getMask(), Result); |
2025 | } |
2026 | |
2027 | SmallVector<int, 16> getShuffleMask() const { |
2028 | SmallVector<int, 16> Mask; |
2029 | getShuffleMask(Mask); |
2030 | return Mask; |
2031 | } |
2032 | |
2033 | /// Return true if this shuffle returns a vector with a different number of |
2034 | /// elements than its source vectors. |
2035 | /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3> |
2036 | /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5> |
2037 | bool changesLength() const { |
2038 | unsigned NumSourceElts = Op<0>()->getType()->getVectorNumElements(); |
2039 | unsigned NumMaskElts = getMask()->getType()->getVectorNumElements(); |
2040 | return NumSourceElts != NumMaskElts; |
2041 | } |
2042 | |
2043 | /// Return true if this shuffle returns a vector with a greater number of |
2044 | /// elements than its source vectors. |
2045 | /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3> |
2046 | bool increasesLength() const { |
2047 | unsigned NumSourceElts = Op<0>()->getType()->getVectorNumElements(); |
2048 | unsigned NumMaskElts = getMask()->getType()->getVectorNumElements(); |
2049 | return NumSourceElts < NumMaskElts; |
2050 | } |
2051 | |
2052 | /// Return true if this shuffle mask chooses elements from exactly one source |
2053 | /// vector. |
2054 | /// Example: <7,5,undef,7> |
2055 | /// This assumes that vector operands are the same length as the mask. |
2056 | static bool isSingleSourceMask(ArrayRef<int> Mask); |
2057 | static bool isSingleSourceMask(const Constant *Mask) { |
2058 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant." ) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 2058, __PRETTY_FUNCTION__)); |
2059 | SmallVector<int, 16> MaskAsInts; |
2060 | getShuffleMask(Mask, MaskAsInts); |
2061 | return isSingleSourceMask(MaskAsInts); |
2062 | } |
2063 | |
2064 | /// Return true if this shuffle chooses elements from exactly one source |
2065 | /// vector without changing the length of that vector. |
2066 | /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3> |
2067 | /// TODO: Optionally allow length-changing shuffles. |
2068 | bool isSingleSource() const { |
2069 | return !changesLength() && isSingleSourceMask(getMask()); |
2070 | } |
2071 | |
2072 | /// Return true if this shuffle mask chooses elements from exactly one source |
2073 | /// vector without lane crossings. A shuffle using this mask is not |
2074 | /// necessarily a no-op because it may change the number of elements from its |
2075 | /// input vectors or it may provide demanded bits knowledge via undef lanes. |
2076 | /// Example: <undef,undef,2,3> |
2077 | static bool isIdentityMask(ArrayRef<int> Mask); |
2078 | static bool isIdentityMask(const Constant *Mask) { |
2079 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant." ) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 2079, __PRETTY_FUNCTION__)); |
2080 | SmallVector<int, 16> MaskAsInts; |
2081 | getShuffleMask(Mask, MaskAsInts); |
2082 | return isIdentityMask(MaskAsInts); |
2083 | } |
2084 | |
2085 | /// Return true if this shuffle chooses elements from exactly one source |
2086 | /// vector without lane crossings and does not change the number of elements |
2087 | /// from its input vectors. |
2088 | /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef> |
2089 | bool isIdentity() const { |
2090 | return !changesLength() && isIdentityMask(getShuffleMask()); |
2091 | } |
2092 | |
2093 | /// Return true if this shuffle lengthens exactly one source vector with |
2094 | /// undefs in the high elements. |
2095 | bool isIdentityWithPadding() const; |
2096 | |
2097 | /// Return true if this shuffle extracts the first N elements of exactly one |
2098 | /// source vector. |
2099 | bool isIdentityWithExtract() const; |
2100 | |
2101 | /// Return true if this shuffle concatenates its 2 source vectors. This |
2102 | /// returns false if either input is undefined. In that case, the shuffle is |
2103 | /// is better classified as an identity with padding operation. |
2104 | bool isConcat() const; |
2105 | |
2106 | /// Return true if this shuffle mask chooses elements from its source vectors |
2107 | /// without lane crossings. A shuffle using this mask would be |
2108 | /// equivalent to a vector select with a constant condition operand. |
2109 | /// Example: <4,1,6,undef> |
2110 | /// This returns false if the mask does not choose from both input vectors. |
2111 | /// In that case, the shuffle is better classified as an identity shuffle. |
2112 | /// This assumes that vector operands are the same length as the mask |
2113 | /// (a length-changing shuffle can never be equivalent to a vector select). |
2114 | static bool isSelectMask(ArrayRef<int> Mask); |
2115 | static bool isSelectMask(const Constant *Mask) { |
2116 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant." ) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 2116, __PRETTY_FUNCTION__)); |
2117 | SmallVector<int, 16> MaskAsInts; |
2118 | getShuffleMask(Mask, MaskAsInts); |
2119 | return isSelectMask(MaskAsInts); |
2120 | } |
2121 | |
2122 | /// Return true if this shuffle chooses elements from its source vectors |
2123 | /// without lane crossings and all operands have the same number of elements. |
2124 | /// In other words, this shuffle is equivalent to a vector select with a |
2125 | /// constant condition operand. |
2126 | /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3> |
2127 | /// This returns false if the mask does not choose from both input vectors. |
2128 | /// In that case, the shuffle is better classified as an identity shuffle. |
2129 | /// TODO: Optionally allow length-changing shuffles. |
2130 | bool isSelect() const { |
2131 | return !changesLength() && isSelectMask(getMask()); |
2132 | } |
2133 | |
2134 | /// Return true if this shuffle mask swaps the order of elements from exactly |
2135 | /// one source vector. |
2136 | /// Example: <7,6,undef,4> |
2137 | /// This assumes that vector operands are the same length as the mask. |
2138 | static bool isReverseMask(ArrayRef<int> Mask); |
2139 | static bool isReverseMask(const Constant *Mask) { |
2140 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant." ) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 2140, __PRETTY_FUNCTION__)); |
2141 | SmallVector<int, 16> MaskAsInts; |
2142 | getShuffleMask(Mask, MaskAsInts); |
2143 | return isReverseMask(MaskAsInts); |
2144 | } |
2145 | |
2146 | /// Return true if this shuffle swaps the order of elements from exactly |
2147 | /// one source vector. |
2148 | /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef> |
2149 | /// TODO: Optionally allow length-changing shuffles. |
2150 | bool isReverse() const { |
2151 | return !changesLength() && isReverseMask(getMask()); |
2152 | } |
2153 | |
2154 | /// Return true if this shuffle mask chooses all elements with the same value |
2155 | /// as the first element of exactly one source vector. |
2156 | /// Example: <4,undef,undef,4> |
2157 | /// This assumes that vector operands are the same length as the mask. |
2158 | static bool isZeroEltSplatMask(ArrayRef<int> Mask); |
2159 | static bool isZeroEltSplatMask(const Constant *Mask) { |
2160 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant." ) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 2160, __PRETTY_FUNCTION__)); |
2161 | SmallVector<int, 16> MaskAsInts; |
2162 | getShuffleMask(Mask, MaskAsInts); |
2163 | return isZeroEltSplatMask(MaskAsInts); |
2164 | } |
2165 | |
2166 | /// Return true if all elements of this shuffle are the same value as the |
2167 | /// first element of exactly one source vector without changing the length |
2168 | /// of that vector. |
2169 | /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0> |
2170 | /// TODO: Optionally allow length-changing shuffles. |
2171 | /// TODO: Optionally allow splats from other elements. |
2172 | bool isZeroEltSplat() const { |
2173 | return !changesLength() && isZeroEltSplatMask(getMask()); |
2174 | } |
2175 | |
2176 | /// Return true if this shuffle mask is a transpose mask. |
2177 | /// Transpose vector masks transpose a 2xn matrix. They read corresponding |
2178 | /// even- or odd-numbered vector elements from two n-dimensional source |
2179 | /// vectors and write each result into consecutive elements of an |
2180 | /// n-dimensional destination vector. Two shuffles are necessary to complete |
2181 | /// the transpose, one for the even elements and another for the odd elements. |
2182 | /// This description closely follows how the TRN1 and TRN2 AArch64 |
2183 | /// instructions operate. |
2184 | /// |
2185 | /// For example, a simple 2x2 matrix can be transposed with: |
2186 | /// |
2187 | /// ; Original matrix |
2188 | /// m0 = < a, b > |
2189 | /// m1 = < c, d > |
2190 | /// |
2191 | /// ; Transposed matrix |
2192 | /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 > |
2193 | /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 > |
2194 | /// |
2195 | /// For matrices having greater than n columns, the resulting nx2 transposed |
2196 | /// matrix is stored in two result vectors such that one vector contains |
2197 | /// interleaved elements from all the even-numbered rows and the other vector |
2198 | /// contains interleaved elements from all the odd-numbered rows. For example, |
2199 | /// a 2x4 matrix can be transposed with: |
2200 | /// |
2201 | /// ; Original matrix |
2202 | /// m0 = < a, b, c, d > |
2203 | /// m1 = < e, f, g, h > |
2204 | /// |
2205 | /// ; Transposed matrix |
2206 | /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 > |
2207 | /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 > |
2208 | static bool isTransposeMask(ArrayRef<int> Mask); |
2209 | static bool isTransposeMask(const Constant *Mask) { |
2210 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant." ) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 2210, __PRETTY_FUNCTION__)); |
2211 | SmallVector<int, 16> MaskAsInts; |
2212 | getShuffleMask(Mask, MaskAsInts); |
2213 | return isTransposeMask(MaskAsInts); |
2214 | } |
2215 | |
2216 | /// Return true if this shuffle transposes the elements of its inputs without |
2217 | /// changing the length of the vectors. This operation may also be known as a |
2218 | /// merge or interleave. See the description for isTransposeMask() for the |
2219 | /// exact specification. |
2220 | /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6> |
2221 | bool isTranspose() const { |
2222 | return !changesLength() && isTransposeMask(getMask()); |
2223 | } |
2224 | |
2225 | /// Return true if this shuffle mask is an extract subvector mask. |
2226 | /// A valid extract subvector mask returns a smaller vector from a single |
2227 | /// source operand. The base extraction index is returned as well. |
2228 | static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts, |
2229 | int &Index); |
2230 | static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts, |
2231 | int &Index) { |
2232 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant." ) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 2232, __PRETTY_FUNCTION__)); |
2233 | SmallVector<int, 16> MaskAsInts; |
2234 | getShuffleMask(Mask, MaskAsInts); |
2235 | return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index); |
2236 | } |
2237 | |
2238 | /// Return true if this shuffle mask is an extract subvector mask. |
2239 | bool isExtractSubvectorMask(int &Index) const { |
2240 | int NumSrcElts = Op<0>()->getType()->getVectorNumElements(); |
2241 | return isExtractSubvectorMask(getMask(), NumSrcElts, Index); |
2242 | } |
2243 | |
2244 | /// Change values in a shuffle permute mask assuming the two vector operands |
2245 | /// of length InVecNumElts have swapped position. |
2246 | static void commuteShuffleMask(MutableArrayRef<int> Mask, |
2247 | unsigned InVecNumElts) { |
2248 | for (int &Idx : Mask) { |
2249 | if (Idx == -1) |
2250 | continue; |
2251 | Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts; |
2252 | assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&((Idx >= 0 && Idx < (int)InVecNumElts * 2 && "shufflevector mask index out of range") ? static_cast<void > (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 2253, __PRETTY_FUNCTION__)) |
2253 | "shufflevector mask index out of range")((Idx >= 0 && Idx < (int)InVecNumElts * 2 && "shufflevector mask index out of range") ? static_cast<void > (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 2253, __PRETTY_FUNCTION__)); |
2254 | } |
2255 | } |
2256 | |
2257 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
2258 | static bool classof(const Instruction *I) { |
2259 | return I->getOpcode() == Instruction::ShuffleVector; |
2260 | } |
2261 | static bool classof(const Value *V) { |
2262 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
2263 | } |
2264 | }; |
2265 | |
2266 | template <> |
2267 | struct OperandTraits<ShuffleVectorInst> : |
2268 | public FixedNumOperandTraits<ShuffleVectorInst, 3> { |
2269 | }; |
2270 | |
2271 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() { return OperandTraits<ShuffleVectorInst>::op_begin(this ); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst:: op_begin() const { return OperandTraits<ShuffleVectorInst> ::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst ::op_iterator ShuffleVectorInst::op_end() { return OperandTraits <ShuffleVectorInst>::op_end(this); } ShuffleVectorInst:: const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits <ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst *>(this)); } Value *ShuffleVectorInst::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<ShuffleVectorInst >::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 2271, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<ShuffleVectorInst>::op_begin(const_cast <ShuffleVectorInst*>(this))[i_nocapture].get()); } void ShuffleVectorInst::setOperand(unsigned i_nocapture, Value *Val_nocapture ) { ((i_nocapture < OperandTraits<ShuffleVectorInst> ::operands(this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 2271, __PRETTY_FUNCTION__)); OperandTraits<ShuffleVectorInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned ShuffleVectorInst::getNumOperands() const { return OperandTraits <ShuffleVectorInst>::operands(this); } template <int Idx_nocapture> Use &ShuffleVectorInst::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &ShuffleVectorInst::Op() const { return this ->OpFrom<Idx_nocapture>(this); } |
2272 | |
2273 | //===----------------------------------------------------------------------===// |
2274 | // ExtractValueInst Class |
2275 | //===----------------------------------------------------------------------===// |
2276 | |
2277 | /// This instruction extracts a struct member or array |
2278 | /// element value from an aggregate value. |
2279 | /// |
2280 | class ExtractValueInst : public UnaryInstruction { |
2281 | SmallVector<unsigned, 4> Indices; |
2282 | |
2283 | ExtractValueInst(const ExtractValueInst &EVI); |
2284 | |
2285 | /// Constructors - Create a extractvalue instruction with a base aggregate |
2286 | /// value and a list of indices. The first ctor can optionally insert before |
2287 | /// an existing instruction, the second appends the new instruction to the |
2288 | /// specified BasicBlock. |
2289 | inline ExtractValueInst(Value *Agg, |
2290 | ArrayRef<unsigned> Idxs, |
2291 | const Twine &NameStr, |
2292 | Instruction *InsertBefore); |
2293 | inline ExtractValueInst(Value *Agg, |
2294 | ArrayRef<unsigned> Idxs, |
2295 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
2296 | |
2297 | void init(ArrayRef<unsigned> Idxs, const Twine &NameStr); |
2298 | |
2299 | protected: |
2300 | // Note: Instruction needs to be a friend here to call cloneImpl. |
2301 | friend class Instruction; |
2302 | |
2303 | ExtractValueInst *cloneImpl() const; |
2304 | |
2305 | public: |
2306 | static ExtractValueInst *Create(Value *Agg, |
2307 | ArrayRef<unsigned> Idxs, |
2308 | const Twine &NameStr = "", |
2309 | Instruction *InsertBefore = nullptr) { |
2310 | return new |
2311 | ExtractValueInst(Agg, Idxs, NameStr, InsertBefore); |
2312 | } |
2313 | |
2314 | static ExtractValueInst *Create(Value *Agg, |
2315 | ArrayRef<unsigned> Idxs, |
2316 | const Twine &NameStr, |
2317 | BasicBlock *InsertAtEnd) { |
2318 | return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd); |
2319 | } |
2320 | |
2321 | /// Returns the type of the element that would be extracted |
2322 | /// with an extractvalue instruction with the specified parameters. |
2323 | /// |
2324 | /// Null is returned if the indices are invalid for the specified type. |
2325 | static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs); |
2326 | |
2327 | using idx_iterator = const unsigned*; |
2328 | |
2329 | inline idx_iterator idx_begin() const { return Indices.begin(); } |
2330 | inline idx_iterator idx_end() const { return Indices.end(); } |
2331 | inline iterator_range<idx_iterator> indices() const { |
2332 | return make_range(idx_begin(), idx_end()); |
2333 | } |
2334 | |
2335 | Value *getAggregateOperand() { |
2336 | return getOperand(0); |
2337 | } |
2338 | const Value *getAggregateOperand() const { |
2339 | return getOperand(0); |
2340 | } |
2341 | static unsigned getAggregateOperandIndex() { |
2342 | return 0U; // get index for modifying correct operand |
2343 | } |
2344 | |
2345 | ArrayRef<unsigned> getIndices() const { |
2346 | return Indices; |
2347 | } |
2348 | |
2349 | unsigned getNumIndices() const { |
2350 | return (unsigned)Indices.size(); |
2351 | } |
2352 | |
2353 | bool hasIndices() const { |
2354 | return true; |
2355 | } |
2356 | |
2357 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
2358 | static bool classof(const Instruction *I) { |
2359 | return I->getOpcode() == Instruction::ExtractValue; |
2360 | } |
2361 | static bool classof(const Value *V) { |
2362 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
2363 | } |
2364 | }; |
2365 | |
2366 | ExtractValueInst::ExtractValueInst(Value *Agg, |
2367 | ArrayRef<unsigned> Idxs, |
2368 | const Twine &NameStr, |
2369 | Instruction *InsertBefore) |
2370 | : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), |
2371 | ExtractValue, Agg, InsertBefore) { |
2372 | init(Idxs, NameStr); |
2373 | } |
2374 | |
2375 | ExtractValueInst::ExtractValueInst(Value *Agg, |
2376 | ArrayRef<unsigned> Idxs, |
2377 | const Twine &NameStr, |
2378 | BasicBlock *InsertAtEnd) |
2379 | : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), |
2380 | ExtractValue, Agg, InsertAtEnd) { |
2381 | init(Idxs, NameStr); |
2382 | } |
2383 | |
2384 | //===----------------------------------------------------------------------===// |
2385 | // InsertValueInst Class |
2386 | //===----------------------------------------------------------------------===// |
2387 | |
2388 | /// This instruction inserts a struct field of array element |
2389 | /// value into an aggregate value. |
2390 | /// |
2391 | class InsertValueInst : public Instruction { |
2392 | SmallVector<unsigned, 4> Indices; |
2393 | |
2394 | InsertValueInst(const InsertValueInst &IVI); |
2395 | |
2396 | /// Constructors - Create a insertvalue instruction with a base aggregate |
2397 | /// value, a value to insert, and a list of indices. The first ctor can |
2398 | /// optionally insert before an existing instruction, the second appends |
2399 | /// the new instruction to the specified BasicBlock. |
2400 | inline InsertValueInst(Value *Agg, Value *Val, |
2401 | ArrayRef<unsigned> Idxs, |
2402 | const Twine &NameStr, |
2403 | Instruction *InsertBefore); |
2404 | inline InsertValueInst(Value *Agg, Value *Val, |
2405 | ArrayRef<unsigned> Idxs, |
2406 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
2407 | |
2408 | /// Constructors - These two constructors are convenience methods because one |
2409 | /// and two index insertvalue instructions are so common. |
2410 | InsertValueInst(Value *Agg, Value *Val, unsigned Idx, |
2411 | const Twine &NameStr = "", |
2412 | Instruction *InsertBefore = nullptr); |
2413 | InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr, |
2414 | BasicBlock *InsertAtEnd); |
2415 | |
2416 | void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, |
2417 | const Twine &NameStr); |
2418 | |
2419 | protected: |
2420 | // Note: Instruction needs to be a friend here to call cloneImpl. |
2421 | friend class Instruction; |
2422 | |
2423 | InsertValueInst *cloneImpl() const; |
2424 | |
2425 | public: |
2426 | // allocate space for exactly two operands |
2427 | void *operator new(size_t s) { |
2428 | return User::operator new(s, 2); |
2429 | } |
2430 | |
2431 | static InsertValueInst *Create(Value *Agg, Value *Val, |
2432 | ArrayRef<unsigned> Idxs, |
2433 | const Twine &NameStr = "", |
2434 | Instruction *InsertBefore = nullptr) { |
2435 | return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore); |
2436 | } |
2437 | |
2438 | static InsertValueInst *Create(Value *Agg, Value *Val, |
2439 | ArrayRef<unsigned> Idxs, |
2440 | const Twine &NameStr, |
2441 | BasicBlock *InsertAtEnd) { |
2442 | return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd); |
2443 | } |
2444 | |
2445 | /// Transparently provide more efficient getOperand methods. |
2446 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
2447 | |
2448 | using idx_iterator = const unsigned*; |
2449 | |
2450 | inline idx_iterator idx_begin() const { return Indices.begin(); } |
2451 | inline idx_iterator idx_end() const { return Indices.end(); } |
2452 | inline iterator_range<idx_iterator> indices() const { |
2453 | return make_range(idx_begin(), idx_end()); |
2454 | } |
2455 | |
2456 | Value *getAggregateOperand() { |
2457 | return getOperand(0); |
2458 | } |
2459 | const Value *getAggregateOperand() const { |
2460 | return getOperand(0); |
2461 | } |
2462 | static unsigned getAggregateOperandIndex() { |
2463 | return 0U; // get index for modifying correct operand |
2464 | } |
2465 | |
2466 | Value *getInsertedValueOperand() { |
2467 | return getOperand(1); |
2468 | } |
2469 | const Value *getInsertedValueOperand() const { |
2470 | return getOperand(1); |
2471 | } |
2472 | static unsigned getInsertedValueOperandIndex() { |
2473 | return 1U; // get index for modifying correct operand |
2474 | } |
2475 | |
2476 | ArrayRef<unsigned> getIndices() const { |
2477 | return Indices; |
2478 | } |
2479 | |
2480 | unsigned getNumIndices() const { |
2481 | return (unsigned)Indices.size(); |
2482 | } |
2483 | |
2484 | bool hasIndices() const { |
2485 | return true; |
2486 | } |
2487 | |
2488 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
2489 | static bool classof(const Instruction *I) { |
2490 | return I->getOpcode() == Instruction::InsertValue; |
2491 | } |
2492 | static bool classof(const Value *V) { |
2493 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
2494 | } |
2495 | }; |
2496 | |
2497 | template <> |
2498 | struct OperandTraits<InsertValueInst> : |
2499 | public FixedNumOperandTraits<InsertValueInst, 2> { |
2500 | }; |
2501 | |
2502 | InsertValueInst::InsertValueInst(Value *Agg, |
2503 | Value *Val, |
2504 | ArrayRef<unsigned> Idxs, |
2505 | const Twine &NameStr, |
2506 | Instruction *InsertBefore) |
2507 | : Instruction(Agg->getType(), InsertValue, |
2508 | OperandTraits<InsertValueInst>::op_begin(this), |
2509 | 2, InsertBefore) { |
2510 | init(Agg, Val, Idxs, NameStr); |
2511 | } |
2512 | |
2513 | InsertValueInst::InsertValueInst(Value *Agg, |
2514 | Value *Val, |
2515 | ArrayRef<unsigned> Idxs, |
2516 | const Twine &NameStr, |
2517 | BasicBlock *InsertAtEnd) |
2518 | : Instruction(Agg->getType(), InsertValue, |
2519 | OperandTraits<InsertValueInst>::op_begin(this), |
2520 | 2, InsertAtEnd) { |
2521 | init(Agg, Val, Idxs, NameStr); |
2522 | } |
2523 | |
2524 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst ::const_op_iterator InsertValueInst::op_begin() const { return OperandTraits<InsertValueInst>::op_begin(const_cast< InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst ::op_end() { return OperandTraits<InsertValueInst>::op_end (this); } InsertValueInst::const_op_iterator InsertValueInst:: op_end() const { return OperandTraits<InsertValueInst>:: op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<InsertValueInst>::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 2524, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<InsertValueInst>::op_begin(const_cast< InsertValueInst*>(this))[i_nocapture].get()); } void InsertValueInst ::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (( i_nocapture < OperandTraits<InsertValueInst>::operands (this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 2524, __PRETTY_FUNCTION__)); OperandTraits<InsertValueInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned InsertValueInst::getNumOperands() const { return OperandTraits <InsertValueInst>::operands(this); } template <int Idx_nocapture > Use &InsertValueInst::Op() { return this->OpFrom< Idx_nocapture>(this); } template <int Idx_nocapture> const Use &InsertValueInst::Op() const { return this-> OpFrom<Idx_nocapture>(this); } |
2525 | |
2526 | //===----------------------------------------------------------------------===// |
2527 | // PHINode Class |
2528 | //===----------------------------------------------------------------------===// |
2529 | |
2530 | // PHINode - The PHINode class is used to represent the magical mystical PHI |
2531 | // node, that can not exist in nature, but can be synthesized in a computer |
2532 | // scientist's overactive imagination. |
2533 | // |
2534 | class PHINode : public Instruction { |
2535 | /// The number of operands actually allocated. NumOperands is |
2536 | /// the number actually in use. |
2537 | unsigned ReservedSpace; |
2538 | |
2539 | PHINode(const PHINode &PN); |
2540 | |
2541 | explicit PHINode(Type *Ty, unsigned NumReservedValues, |
2542 | const Twine &NameStr = "", |
2543 | Instruction *InsertBefore = nullptr) |
2544 | : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore), |
2545 | ReservedSpace(NumReservedValues) { |
2546 | setName(NameStr); |
2547 | allocHungoffUses(ReservedSpace); |
2548 | } |
2549 | |
2550 | PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr, |
2551 | BasicBlock *InsertAtEnd) |
2552 | : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd), |
2553 | ReservedSpace(NumReservedValues) { |
2554 | setName(NameStr); |
2555 | allocHungoffUses(ReservedSpace); |
2556 | } |
2557 | |
2558 | protected: |
2559 | // Note: Instruction needs to be a friend here to call cloneImpl. |
2560 | friend class Instruction; |
2561 | |
2562 | PHINode *cloneImpl() const; |
2563 | |
2564 | // allocHungoffUses - this is more complicated than the generic |
2565 | // User::allocHungoffUses, because we have to allocate Uses for the incoming |
2566 | // values and pointers to the incoming blocks, all in one allocation. |
2567 | void allocHungoffUses(unsigned N) { |
2568 | User::allocHungoffUses(N, /* IsPhi */ true); |
2569 | } |
2570 | |
2571 | public: |
2572 | /// Constructors - NumReservedValues is a hint for the number of incoming |
2573 | /// edges that this phi node will have (use 0 if you really have no idea). |
2574 | static PHINode *Create(Type *Ty, unsigned NumReservedValues, |
2575 | const Twine &NameStr = "", |
2576 | Instruction *InsertBefore = nullptr) { |
2577 | return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore); |
2578 | } |
2579 | |
2580 | static PHINode *Create(Type *Ty, unsigned NumReservedValues, |
2581 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
2582 | return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd); |
2583 | } |
2584 | |
2585 | /// Provide fast operand accessors |
2586 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
2587 | |
2588 | // Block iterator interface. This provides access to the list of incoming |
2589 | // basic blocks, which parallels the list of incoming values. |
2590 | |
2591 | using block_iterator = BasicBlock **; |
2592 | using const_block_iterator = BasicBlock * const *; |
2593 | |
2594 | block_iterator block_begin() { |
2595 | Use::UserRef *ref = |
2596 | reinterpret_cast<Use::UserRef*>(op_begin() + ReservedSpace); |
2597 | return reinterpret_cast<block_iterator>(ref + 1); |
2598 | } |
2599 | |
2600 | const_block_iterator block_begin() const { |
2601 | const Use::UserRef *ref = |
2602 | reinterpret_cast<const Use::UserRef*>(op_begin() + ReservedSpace); |
2603 | return reinterpret_cast<const_block_iterator>(ref + 1); |
2604 | } |
2605 | |
2606 | block_iterator block_end() { |
2607 | return block_begin() + getNumOperands(); |
2608 | } |
2609 | |
2610 | const_block_iterator block_end() const { |
2611 | return block_begin() + getNumOperands(); |
2612 | } |
2613 | |
2614 | iterator_range<block_iterator> blocks() { |
2615 | return make_range(block_begin(), block_end()); |
2616 | } |
2617 | |
2618 | iterator_range<const_block_iterator> blocks() const { |
2619 | return make_range(block_begin(), block_end()); |
2620 | } |
2621 | |
2622 | op_range incoming_values() { return operands(); } |
2623 | |
2624 | const_op_range incoming_values() const { return operands(); } |
2625 | |
2626 | /// Return the number of incoming edges |
2627 | /// |
2628 | unsigned getNumIncomingValues() const { return getNumOperands(); } |
2629 | |
2630 | /// Return incoming value number x |
2631 | /// |
2632 | Value *getIncomingValue(unsigned i) const { |
2633 | return getOperand(i); |
2634 | } |
2635 | void setIncomingValue(unsigned i, Value *V) { |
2636 | assert(V && "PHI node got a null value!")((V && "PHI node got a null value!") ? static_cast< void> (0) : __assert_fail ("V && \"PHI node got a null value!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 2636, __PRETTY_FUNCTION__)); |
2637 | assert(getType() == V->getType() &&((getType() == V->getType() && "All operands to PHI node must be the same type as the PHI node!" ) ? static_cast<void> (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 2638, __PRETTY_FUNCTION__)) |
2638 | "All operands to PHI node must be the same type as the PHI node!")((getType() == V->getType() && "All operands to PHI node must be the same type as the PHI node!" ) ? static_cast<void> (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 2638, __PRETTY_FUNCTION__)); |
2639 | setOperand(i, V); |
2640 | } |
2641 | |
2642 | static unsigned getOperandNumForIncomingValue(unsigned i) { |
2643 | return i; |
2644 | } |
2645 | |
2646 | static unsigned getIncomingValueNumForOperand(unsigned i) { |
2647 | return i; |
2648 | } |
2649 | |
2650 | /// Return incoming basic block number @p i. |
2651 | /// |
2652 | BasicBlock *getIncomingBlock(unsigned i) const { |
2653 | return block_begin()[i]; |
2654 | } |
2655 | |
2656 | /// Return incoming basic block corresponding |
2657 | /// to an operand of the PHI. |
2658 | /// |
2659 | BasicBlock *getIncomingBlock(const Use &U) const { |
2660 | assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")((this == U.getUser() && "Iterator doesn't point to PHI's Uses?" ) ? static_cast<void> (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 2660, __PRETTY_FUNCTION__)); |
2661 | return getIncomingBlock(unsigned(&U - op_begin())); |
2662 | } |
2663 | |
2664 | /// Return incoming basic block corresponding |
2665 | /// to value use iterator. |
2666 | /// |
2667 | BasicBlock *getIncomingBlock(Value::const_user_iterator I) const { |
2668 | return getIncomingBlock(I.getUse()); |
2669 | } |
2670 | |
2671 | void setIncomingBlock(unsigned i, BasicBlock *BB) { |
2672 | assert(BB && "PHI node got a null basic block!")((BB && "PHI node got a null basic block!") ? static_cast <void> (0) : __assert_fail ("BB && \"PHI node got a null basic block!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 2672, __PRETTY_FUNCTION__)); |
2673 | block_begin()[i] = BB; |
2674 | } |
2675 | |
2676 | /// Replace every incoming basic block \p Old to basic block \p New. |
2677 | void replaceIncomingBlockWith(BasicBlock *Old, BasicBlock *New) { |
2678 | assert(New && Old && "PHI node got a null basic block!")((New && Old && "PHI node got a null basic block!" ) ? static_cast<void> (0) : __assert_fail ("New && Old && \"PHI node got a null basic block!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 2678, __PRETTY_FUNCTION__)); |
2679 | for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) |
2680 | if (getIncomingBlock(Op) == Old) |
2681 | setIncomingBlock(Op, New); |
2682 | } |
2683 | |
2684 | /// Add an incoming value to the end of the PHI list |
2685 | /// |
2686 | void addIncoming(Value *V, BasicBlock *BB) { |
2687 | if (getNumOperands() == ReservedSpace) |
2688 | growOperands(); // Get more space! |
2689 | // Initialize some new operands. |
2690 | setNumHungOffUseOperands(getNumOperands() + 1); |
2691 | setIncomingValue(getNumOperands() - 1, V); |
2692 | setIncomingBlock(getNumOperands() - 1, BB); |
2693 | } |
2694 | |
2695 | /// Remove an incoming value. This is useful if a |
2696 | /// predecessor basic block is deleted. The value removed is returned. |
2697 | /// |
2698 | /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty |
2699 | /// is true), the PHI node is destroyed and any uses of it are replaced with |
2700 | /// dummy values. The only time there should be zero incoming values to a PHI |
2701 | /// node is when the block is dead, so this strategy is sound. |
2702 | /// |
2703 | Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true); |
2704 | |
2705 | Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) { |
2706 | int Idx = getBasicBlockIndex(BB); |
2707 | assert(Idx >= 0 && "Invalid basic block argument to remove!")((Idx >= 0 && "Invalid basic block argument to remove!" ) ? static_cast<void> (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument to remove!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 2707, __PRETTY_FUNCTION__)); |
2708 | return removeIncomingValue(Idx, DeletePHIIfEmpty); |
2709 | } |
2710 | |
2711 | /// Return the first index of the specified basic |
2712 | /// block in the value list for this PHI. Returns -1 if no instance. |
2713 | /// |
2714 | int getBasicBlockIndex(const BasicBlock *BB) const { |
2715 | for (unsigned i = 0, e = getNumOperands(); i != e; ++i) |
2716 | if (block_begin()[i] == BB) |
2717 | return i; |
2718 | return -1; |
2719 | } |
2720 | |
2721 | Value *getIncomingValueForBlock(const BasicBlock *BB) const { |
2722 | int Idx = getBasicBlockIndex(BB); |
2723 | assert(Idx >= 0 && "Invalid basic block argument!")((Idx >= 0 && "Invalid basic block argument!") ? static_cast <void> (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 2723, __PRETTY_FUNCTION__)); |
2724 | return getIncomingValue(Idx); |
2725 | } |
2726 | |
2727 | /// If the specified PHI node always merges together the |
2728 | /// same value, return the value, otherwise return null. |
2729 | Value *hasConstantValue() const; |
2730 | |
2731 | /// Whether the specified PHI node always merges |
2732 | /// together the same value, assuming undefs are equal to a unique |
2733 | /// non-undef value. |
2734 | bool hasConstantOrUndefValue() const; |
2735 | |
2736 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
2737 | static bool classof(const Instruction *I) { |
2738 | return I->getOpcode() == Instruction::PHI; |
2739 | } |
2740 | static bool classof(const Value *V) { |
2741 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
2742 | } |
2743 | |
2744 | private: |
2745 | void growOperands(); |
2746 | }; |
2747 | |
2748 | template <> |
2749 | struct OperandTraits<PHINode> : public HungoffOperandTraits<2> { |
2750 | }; |
2751 | |
2752 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits <PHINode>::op_begin(this); } PHINode::const_op_iterator PHINode::op_begin() const { return OperandTraits<PHINode> ::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator PHINode::op_end() { return OperandTraits<PHINode>::op_end (this); } PHINode::const_op_iterator PHINode::op_end() const { return OperandTraits<PHINode>::op_end(const_cast<PHINode *>(this)); } Value *PHINode::getOperand(unsigned i_nocapture ) const { ((i_nocapture < OperandTraits<PHINode>::operands (this) && "getOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 2752, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<PHINode>::op_begin(const_cast<PHINode *>(this))[i_nocapture].get()); } void PHINode::setOperand( unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture < OperandTraits<PHINode>::operands(this) && "setOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 2752, __PRETTY_FUNCTION__)); OperandTraits<PHINode>:: op_begin(this)[i_nocapture] = Val_nocapture; } unsigned PHINode ::getNumOperands() const { return OperandTraits<PHINode> ::operands(this); } template <int Idx_nocapture> Use & PHINode::Op() { return this->OpFrom<Idx_nocapture>(this ); } template <int Idx_nocapture> const Use &PHINode ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } |
2753 | |
2754 | //===----------------------------------------------------------------------===// |
2755 | // LandingPadInst Class |
2756 | //===----------------------------------------------------------------------===// |
2757 | |
2758 | //===--------------------------------------------------------------------------- |
2759 | /// The landingpad instruction holds all of the information |
2760 | /// necessary to generate correct exception handling. The landingpad instruction |
2761 | /// cannot be moved from the top of a landing pad block, which itself is |
2762 | /// accessible only from the 'unwind' edge of an invoke. This uses the |
2763 | /// SubclassData field in Value to store whether or not the landingpad is a |
2764 | /// cleanup. |
2765 | /// |
2766 | class LandingPadInst : public Instruction { |
2767 | /// The number of operands actually allocated. NumOperands is |
2768 | /// the number actually in use. |
2769 | unsigned ReservedSpace; |
2770 | |
2771 | LandingPadInst(const LandingPadInst &LP); |
2772 | |
2773 | public: |
2774 | enum ClauseType { Catch, Filter }; |
2775 | |
2776 | private: |
2777 | explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, |
2778 | const Twine &NameStr, Instruction *InsertBefore); |
2779 | explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, |
2780 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
2781 | |
2782 | // Allocate space for exactly zero operands. |
2783 | void *operator new(size_t s) { |
2784 | return User::operator new(s); |
2785 | } |
2786 | |
2787 | void growOperands(unsigned Size); |
2788 | void init(unsigned NumReservedValues, const Twine &NameStr); |
2789 | |
2790 | protected: |
2791 | // Note: Instruction needs to be a friend here to call cloneImpl. |
2792 | friend class Instruction; |
2793 | |
2794 | LandingPadInst *cloneImpl() const; |
2795 | |
2796 | public: |
2797 | /// Constructors - NumReservedClauses is a hint for the number of incoming |
2798 | /// clauses that this landingpad will have (use 0 if you really have no idea). |
2799 | static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, |
2800 | const Twine &NameStr = "", |
2801 | Instruction *InsertBefore = nullptr); |
2802 | static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, |
2803 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
2804 | |
2805 | /// Provide fast operand accessors |
2806 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
2807 | |
2808 | /// Return 'true' if this landingpad instruction is a |
2809 | /// cleanup. I.e., it should be run when unwinding even if its landing pad |
2810 | /// doesn't catch the exception. |
2811 | bool isCleanup() const { return getSubclassDataFromInstruction() & 1; } |
2812 | |
2813 | /// Indicate that this landingpad instruction is a cleanup. |
2814 | void setCleanup(bool V) { |
2815 | setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | |
2816 | (V ? 1 : 0)); |
2817 | } |
2818 | |
2819 | /// Add a catch or filter clause to the landing pad. |
2820 | void addClause(Constant *ClauseVal); |
2821 | |
2822 | /// Get the value of the clause at index Idx. Use isCatch/isFilter to |
2823 | /// determine what type of clause this is. |
2824 | Constant *getClause(unsigned Idx) const { |
2825 | return cast<Constant>(getOperandList()[Idx]); |
2826 | } |
2827 | |
2828 | /// Return 'true' if the clause and index Idx is a catch clause. |
2829 | bool isCatch(unsigned Idx) const { |
2830 | return !isa<ArrayType>(getOperandList()[Idx]->getType()); |
2831 | } |
2832 | |
2833 | /// Return 'true' if the clause and index Idx is a filter clause. |
2834 | bool isFilter(unsigned Idx) const { |
2835 | return isa<ArrayType>(getOperandList()[Idx]->getType()); |
2836 | } |
2837 | |
2838 | /// Get the number of clauses for this landing pad. |
2839 | unsigned getNumClauses() const { return getNumOperands(); } |
2840 | |
2841 | /// Grow the size of the operand list to accommodate the new |
2842 | /// number of clauses. |
2843 | void reserveClauses(unsigned Size) { growOperands(Size); } |
2844 | |
2845 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
2846 | static bool classof(const Instruction *I) { |
2847 | return I->getOpcode() == Instruction::LandingPad; |
2848 | } |
2849 | static bool classof(const Value *V) { |
2850 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
2851 | } |
2852 | }; |
2853 | |
2854 | template <> |
2855 | struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> { |
2856 | }; |
2857 | |
2858 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst ::const_op_iterator LandingPadInst::op_begin() const { return OperandTraits<LandingPadInst>::op_begin(const_cast< LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst ::op_end() { return OperandTraits<LandingPadInst>::op_end (this); } LandingPadInst::const_op_iterator LandingPadInst::op_end () const { return OperandTraits<LandingPadInst>::op_end (const_cast<LandingPadInst*>(this)); } Value *LandingPadInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<LandingPadInst>::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 2858, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<LandingPadInst>::op_begin(const_cast< LandingPadInst*>(this))[i_nocapture].get()); } void LandingPadInst ::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (( i_nocapture < OperandTraits<LandingPadInst>::operands (this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 2858, __PRETTY_FUNCTION__)); OperandTraits<LandingPadInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned LandingPadInst::getNumOperands() const { return OperandTraits <LandingPadInst>::operands(this); } template <int Idx_nocapture > Use &LandingPadInst::Op() { return this->OpFrom< Idx_nocapture>(this); } template <int Idx_nocapture> const Use &LandingPadInst::Op() const { return this-> OpFrom<Idx_nocapture>(this); } |
2859 | |
2860 | //===----------------------------------------------------------------------===// |
2861 | // ReturnInst Class |
2862 | //===----------------------------------------------------------------------===// |
2863 | |
2864 | //===--------------------------------------------------------------------------- |
2865 | /// Return a value (possibly void), from a function. Execution |
2866 | /// does not continue in this function any longer. |
2867 | /// |
2868 | class ReturnInst : public Instruction { |
2869 | ReturnInst(const ReturnInst &RI); |
2870 | |
2871 | private: |
2872 | // ReturnInst constructors: |
2873 | // ReturnInst() - 'ret void' instruction |
2874 | // ReturnInst( null) - 'ret void' instruction |
2875 | // ReturnInst(Value* X) - 'ret X' instruction |
2876 | // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I |
2877 | // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I |
2878 | // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B |
2879 | // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B |
2880 | // |
2881 | // NOTE: If the Value* passed is of type void then the constructor behaves as |
2882 | // if it was passed NULL. |
2883 | explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr, |
2884 | Instruction *InsertBefore = nullptr); |
2885 | ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd); |
2886 | explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd); |
2887 | |
2888 | protected: |
2889 | // Note: Instruction needs to be a friend here to call cloneImpl. |
2890 | friend class Instruction; |
2891 | |
2892 | ReturnInst *cloneImpl() const; |
2893 | |
2894 | public: |
2895 | static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr, |
2896 | Instruction *InsertBefore = nullptr) { |
2897 | return new(!!retVal) ReturnInst(C, retVal, InsertBefore); |
2898 | } |
2899 | |
2900 | static ReturnInst* Create(LLVMContext &C, Value *retVal, |
2901 | BasicBlock *InsertAtEnd) { |
2902 | return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd); |
2903 | } |
2904 | |
2905 | static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) { |
2906 | return new(0) ReturnInst(C, InsertAtEnd); |
2907 | } |
2908 | |
2909 | /// Provide fast operand accessors |
2910 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
2911 | |
2912 | /// Convenience accessor. Returns null if there is no return value. |
2913 | Value *getReturnValue() const { |
2914 | return getNumOperands() != 0 ? getOperand(0) : nullptr; |
2915 | } |
2916 | |
2917 | unsigned getNumSuccessors() const { return 0; } |
2918 | |
2919 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
2920 | static bool classof(const Instruction *I) { |
2921 | return (I->getOpcode() == Instruction::Ret); |
2922 | } |
2923 | static bool classof(const Value *V) { |
2924 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
2925 | } |
2926 | |
2927 | private: |
2928 | BasicBlock *getSuccessor(unsigned idx) const { |
2929 | llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 2929); |
2930 | } |
2931 | |
2932 | void setSuccessor(unsigned idx, BasicBlock *B) { |
2933 | llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 2933); |
2934 | } |
2935 | }; |
2936 | |
2937 | template <> |
2938 | struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> { |
2939 | }; |
2940 | |
2941 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits <ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator ReturnInst::op_begin() const { return OperandTraits<ReturnInst >::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst ::op_iterator ReturnInst::op_end() { return OperandTraits< ReturnInst>::op_end(this); } ReturnInst::const_op_iterator ReturnInst::op_end() const { return OperandTraits<ReturnInst >::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<ReturnInst>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 2941, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<ReturnInst>::op_begin(const_cast<ReturnInst *>(this))[i_nocapture].get()); } void ReturnInst::setOperand (unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture < OperandTraits<ReturnInst>::operands(this) && "setOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 2941, __PRETTY_FUNCTION__)); OperandTraits<ReturnInst> ::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned ReturnInst ::getNumOperands() const { return OperandTraits<ReturnInst >::operands(this); } template <int Idx_nocapture> Use &ReturnInst::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & ReturnInst::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
2942 | |
2943 | //===----------------------------------------------------------------------===// |
2944 | // BranchInst Class |
2945 | //===----------------------------------------------------------------------===// |
2946 | |
2947 | //===--------------------------------------------------------------------------- |
2948 | /// Conditional or Unconditional Branch instruction. |
2949 | /// |
2950 | class BranchInst : public Instruction { |
2951 | /// Ops list - Branches are strange. The operands are ordered: |
2952 | /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because |
2953 | /// they don't have to check for cond/uncond branchness. These are mostly |
2954 | /// accessed relative from op_end(). |
2955 | BranchInst(const BranchInst &BI); |
2956 | // BranchInst constructors (where {B, T, F} are blocks, and C is a condition): |
2957 | // BranchInst(BB *B) - 'br B' |
2958 | // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F' |
2959 | // BranchInst(BB* B, Inst *I) - 'br B' insert before I |
2960 | // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I |
2961 | // BranchInst(BB* B, BB *I) - 'br B' insert at end |
2962 | // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end |
2963 | explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr); |
2964 | BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, |
2965 | Instruction *InsertBefore = nullptr); |
2966 | BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd); |
2967 | BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, |
2968 | BasicBlock *InsertAtEnd); |
2969 | |
2970 | void AssertOK(); |
2971 | |
2972 | protected: |
2973 | // Note: Instruction needs to be a friend here to call cloneImpl. |
2974 | friend class Instruction; |
2975 | |
2976 | BranchInst *cloneImpl() const; |
2977 | |
2978 | public: |
2979 | /// Iterator type that casts an operand to a basic block. |
2980 | /// |
2981 | /// This only makes sense because the successors are stored as adjacent |
2982 | /// operands for branch instructions. |
2983 | struct succ_op_iterator |
2984 | : iterator_adaptor_base<succ_op_iterator, value_op_iterator, |
2985 | std::random_access_iterator_tag, BasicBlock *, |
2986 | ptrdiff_t, BasicBlock *, BasicBlock *> { |
2987 | explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} |
2988 | |
2989 | BasicBlock *operator*() const { return cast<BasicBlock>(*I); } |
2990 | BasicBlock *operator->() const { return operator*(); } |
2991 | }; |
2992 | |
2993 | /// The const version of `succ_op_iterator`. |
2994 | struct const_succ_op_iterator |
2995 | : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, |
2996 | std::random_access_iterator_tag, |
2997 | const BasicBlock *, ptrdiff_t, const BasicBlock *, |
2998 | const BasicBlock *> { |
2999 | explicit const_succ_op_iterator(const_value_op_iterator I) |
3000 | : iterator_adaptor_base(I) {} |
3001 | |
3002 | const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } |
3003 | const BasicBlock *operator->() const { return operator*(); } |
3004 | }; |
3005 | |
3006 | static BranchInst *Create(BasicBlock *IfTrue, |
3007 | Instruction *InsertBefore = nullptr) { |
3008 | return new(1) BranchInst(IfTrue, InsertBefore); |
3009 | } |
3010 | |
3011 | static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, |
3012 | Value *Cond, Instruction *InsertBefore = nullptr) { |
3013 | return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore); |
3014 | } |
3015 | |
3016 | static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) { |
3017 | return new(1) BranchInst(IfTrue, InsertAtEnd); |
3018 | } |
3019 | |
3020 | static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, |
3021 | Value *Cond, BasicBlock *InsertAtEnd) { |
3022 | return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd); |
3023 | } |
3024 | |
3025 | /// Transparently provide more efficient getOperand methods. |
3026 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
3027 | |
3028 | bool isUnconditional() const { return getNumOperands() == 1; } |
3029 | bool isConditional() const { return getNumOperands() == 3; } |
3030 | |
3031 | Value *getCondition() const { |
3032 | assert(isConditional() && "Cannot get condition of an uncond branch!")((isConditional() && "Cannot get condition of an uncond branch!" ) ? static_cast<void> (0) : __assert_fail ("isConditional() && \"Cannot get condition of an uncond branch!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3032, __PRETTY_FUNCTION__)); |
3033 | return Op<-3>(); |
3034 | } |
3035 | |
3036 | void setCondition(Value *V) { |
3037 | assert(isConditional() && "Cannot set condition of unconditional branch!")((isConditional() && "Cannot set condition of unconditional branch!" ) ? static_cast<void> (0) : __assert_fail ("isConditional() && \"Cannot set condition of unconditional branch!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3037, __PRETTY_FUNCTION__)); |
3038 | Op<-3>() = V; |
3039 | } |
3040 | |
3041 | unsigned getNumSuccessors() const { return 1+isConditional(); } |
3042 | |
3043 | BasicBlock *getSuccessor(unsigned i) const { |
3044 | assert(i < getNumSuccessors() && "Successor # out of range for Branch!")((i < getNumSuccessors() && "Successor # out of range for Branch!" ) ? static_cast<void> (0) : __assert_fail ("i < getNumSuccessors() && \"Successor # out of range for Branch!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3044, __PRETTY_FUNCTION__)); |
3045 | return cast_or_null<BasicBlock>((&Op<-1>() - i)->get()); |
3046 | } |
3047 | |
3048 | void setSuccessor(unsigned idx, BasicBlock *NewSucc) { |
3049 | assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")((idx < getNumSuccessors() && "Successor # out of range for Branch!" ) ? static_cast<void> (0) : __assert_fail ("idx < getNumSuccessors() && \"Successor # out of range for Branch!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3049, __PRETTY_FUNCTION__)); |
3050 | *(&Op<-1>() - idx) = NewSucc; |
3051 | } |
3052 | |
3053 | /// Swap the successors of this branch instruction. |
3054 | /// |
3055 | /// Swaps the successors of the branch instruction. This also swaps any |
3056 | /// branch weight metadata associated with the instruction so that it |
3057 | /// continues to map correctly to each operand. |
3058 | void swapSuccessors(); |
3059 | |
3060 | iterator_range<succ_op_iterator> successors() { |
3061 | return make_range( |
3062 | succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)), |
3063 | succ_op_iterator(value_op_end())); |
3064 | } |
3065 | |
3066 | iterator_range<const_succ_op_iterator> successors() const { |
3067 | return make_range(const_succ_op_iterator( |
3068 | std::next(value_op_begin(), isConditional() ? 1 : 0)), |
3069 | const_succ_op_iterator(value_op_end())); |
3070 | } |
3071 | |
3072 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
3073 | static bool classof(const Instruction *I) { |
3074 | return (I->getOpcode() == Instruction::Br); |
3075 | } |
3076 | static bool classof(const Value *V) { |
3077 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
3078 | } |
3079 | }; |
3080 | |
3081 | template <> |
3082 | struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> { |
3083 | }; |
3084 | |
3085 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits <BranchInst>::op_begin(this); } BranchInst::const_op_iterator BranchInst::op_begin() const { return OperandTraits<BranchInst >::op_begin(const_cast<BranchInst*>(this)); } BranchInst ::op_iterator BranchInst::op_end() { return OperandTraits< BranchInst>::op_end(this); } BranchInst::const_op_iterator BranchInst::op_end() const { return OperandTraits<BranchInst >::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<BranchInst>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3085, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<BranchInst>::op_begin(const_cast<BranchInst *>(this))[i_nocapture].get()); } void BranchInst::setOperand (unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture < OperandTraits<BranchInst>::operands(this) && "setOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3085, __PRETTY_FUNCTION__)); OperandTraits<BranchInst> ::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned BranchInst ::getNumOperands() const { return OperandTraits<BranchInst >::operands(this); } template <int Idx_nocapture> Use &BranchInst::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & BranchInst::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
3086 | |
3087 | //===----------------------------------------------------------------------===// |
3088 | // SwitchInst Class |
3089 | //===----------------------------------------------------------------------===// |
3090 | |
3091 | //===--------------------------------------------------------------------------- |
3092 | /// Multiway switch |
3093 | /// |
3094 | class SwitchInst : public Instruction { |
3095 | unsigned ReservedSpace; |
3096 | |
3097 | // Operand[0] = Value to switch on |
3098 | // Operand[1] = Default basic block destination |
3099 | // Operand[2n ] = Value to match |
3100 | // Operand[2n+1] = BasicBlock to go to on match |
3101 | SwitchInst(const SwitchInst &SI); |
3102 | |
3103 | /// Create a new switch instruction, specifying a value to switch on and a |
3104 | /// default destination. The number of additional cases can be specified here |
3105 | /// to make memory allocation more efficient. This constructor can also |
3106 | /// auto-insert before another instruction. |
3107 | SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, |
3108 | Instruction *InsertBefore); |
3109 | |
3110 | /// Create a new switch instruction, specifying a value to switch on and a |
3111 | /// default destination. The number of additional cases can be specified here |
3112 | /// to make memory allocation more efficient. This constructor also |
3113 | /// auto-inserts at the end of the specified BasicBlock. |
3114 | SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, |
3115 | BasicBlock *InsertAtEnd); |
3116 | |
3117 | // allocate space for exactly zero operands |
3118 | void *operator new(size_t s) { |
3119 | return User::operator new(s); |
3120 | } |
3121 | |
3122 | void init(Value *Value, BasicBlock *Default, unsigned NumReserved); |
3123 | void growOperands(); |
3124 | |
3125 | protected: |
3126 | // Note: Instruction needs to be a friend here to call cloneImpl. |
3127 | friend class Instruction; |
3128 | |
3129 | SwitchInst *cloneImpl() const; |
3130 | |
3131 | public: |
3132 | // -2 |
3133 | static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1); |
3134 | |
3135 | template <typename CaseHandleT> class CaseIteratorImpl; |
3136 | |
3137 | /// A handle to a particular switch case. It exposes a convenient interface |
3138 | /// to both the case value and the successor block. |
3139 | /// |
3140 | /// We define this as a template and instantiate it to form both a const and |
3141 | /// non-const handle. |
3142 | template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT> |
3143 | class CaseHandleImpl { |
3144 | // Directly befriend both const and non-const iterators. |
3145 | friend class SwitchInst::CaseIteratorImpl< |
3146 | CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>; |
3147 | |
3148 | protected: |
3149 | // Expose the switch type we're parameterized with to the iterator. |
3150 | using SwitchInstType = SwitchInstT; |
3151 | |
3152 | SwitchInstT *SI; |
3153 | ptrdiff_t Index; |
3154 | |
3155 | CaseHandleImpl() = default; |
3156 | CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {} |
3157 | |
3158 | public: |
3159 | /// Resolves case value for current case. |
3160 | ConstantIntT *getCaseValue() const { |
3161 | assert((unsigned)Index < SI->getNumCases() &&(((unsigned)Index < SI->getNumCases() && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3162, __PRETTY_FUNCTION__)) |
3162 | "Index out the number of cases.")(((unsigned)Index < SI->getNumCases() && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3162, __PRETTY_FUNCTION__)); |
3163 | return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2)); |
3164 | } |
3165 | |
3166 | /// Resolves successor for current case. |
3167 | BasicBlockT *getCaseSuccessor() const { |
3168 | assert(((unsigned)Index < SI->getNumCases() ||((((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3170, __PRETTY_FUNCTION__)) |
3169 | (unsigned)Index == DefaultPseudoIndex) &&((((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3170, __PRETTY_FUNCTION__)) |
3170 | "Index out the number of cases.")((((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3170, __PRETTY_FUNCTION__)); |
3171 | return SI->getSuccessor(getSuccessorIndex()); |
3172 | } |
3173 | |
3174 | /// Returns number of current case. |
3175 | unsigned getCaseIndex() const { return Index; } |
3176 | |
3177 | /// Returns successor index for current case successor. |
3178 | unsigned getSuccessorIndex() const { |
3179 | assert(((unsigned)Index == DefaultPseudoIndex ||((((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3181, __PRETTY_FUNCTION__)) |
3180 | (unsigned)Index < SI->getNumCases()) &&((((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3181, __PRETTY_FUNCTION__)) |
3181 | "Index out the number of cases.")((((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3181, __PRETTY_FUNCTION__)); |
3182 | return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0; |
3183 | } |
3184 | |
3185 | bool operator==(const CaseHandleImpl &RHS) const { |
3186 | assert(SI == RHS.SI && "Incompatible operators.")((SI == RHS.SI && "Incompatible operators.") ? static_cast <void> (0) : __assert_fail ("SI == RHS.SI && \"Incompatible operators.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3186, __PRETTY_FUNCTION__)); |
3187 | return Index == RHS.Index; |
3188 | } |
3189 | }; |
3190 | |
3191 | using ConstCaseHandle = |
3192 | CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>; |
3193 | |
3194 | class CaseHandle |
3195 | : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> { |
3196 | friend class SwitchInst::CaseIteratorImpl<CaseHandle>; |
3197 | |
3198 | public: |
3199 | CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {} |
3200 | |
3201 | /// Sets the new value for current case. |
3202 | void setValue(ConstantInt *V) { |
3203 | assert((unsigned)Index < SI->getNumCases() &&(((unsigned)Index < SI->getNumCases() && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3204, __PRETTY_FUNCTION__)) |
3204 | "Index out the number of cases.")(((unsigned)Index < SI->getNumCases() && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3204, __PRETTY_FUNCTION__)); |
3205 | SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V)); |
3206 | } |
3207 | |
3208 | /// Sets the new successor for current case. |
3209 | void setSuccessor(BasicBlock *S) { |
3210 | SI->setSuccessor(getSuccessorIndex(), S); |
3211 | } |
3212 | }; |
3213 | |
3214 | template <typename CaseHandleT> |
3215 | class CaseIteratorImpl |
3216 | : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>, |
3217 | std::random_access_iterator_tag, |
3218 | CaseHandleT> { |
3219 | using SwitchInstT = typename CaseHandleT::SwitchInstType; |
3220 | |
3221 | CaseHandleT Case; |
3222 | |
3223 | public: |
3224 | /// Default constructed iterator is in an invalid state until assigned to |
3225 | /// a case for a particular switch. |
3226 | CaseIteratorImpl() = default; |
3227 | |
3228 | /// Initializes case iterator for given SwitchInst and for given |
3229 | /// case number. |
3230 | CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {} |
3231 | |
3232 | /// Initializes case iterator for given SwitchInst and for given |
3233 | /// successor index. |
3234 | static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI, |
3235 | unsigned SuccessorIndex) { |
3236 | assert(SuccessorIndex < SI->getNumSuccessors() &&((SuccessorIndex < SI->getNumSuccessors() && "Successor index # out of range!" ) ? static_cast<void> (0) : __assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3237, __PRETTY_FUNCTION__)) |
3237 | "Successor index # out of range!")((SuccessorIndex < SI->getNumSuccessors() && "Successor index # out of range!" ) ? static_cast<void> (0) : __assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3237, __PRETTY_FUNCTION__)); |
3238 | return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1) |
3239 | : CaseIteratorImpl(SI, DefaultPseudoIndex); |
3240 | } |
3241 | |
3242 | /// Support converting to the const variant. This will be a no-op for const |
3243 | /// variant. |
3244 | operator CaseIteratorImpl<ConstCaseHandle>() const { |
3245 | return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index); |
3246 | } |
3247 | |
3248 | CaseIteratorImpl &operator+=(ptrdiff_t N) { |
3249 | // Check index correctness after addition. |
3250 | // Note: Index == getNumCases() means end(). |
3251 | assert(Case.Index + N >= 0 &&((Case.Index + N >= 0 && (unsigned)(Case.Index + N ) <= Case.SI->getNumCases() && "Case.Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3253, __PRETTY_FUNCTION__)) |
3252 | (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&((Case.Index + N >= 0 && (unsigned)(Case.Index + N ) <= Case.SI->getNumCases() && "Case.Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3253, __PRETTY_FUNCTION__)) |
3253 | "Case.Index out the number of cases.")((Case.Index + N >= 0 && (unsigned)(Case.Index + N ) <= Case.SI->getNumCases() && "Case.Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3253, __PRETTY_FUNCTION__)); |
3254 | Case.Index += N; |
3255 | return *this; |
3256 | } |
3257 | CaseIteratorImpl &operator-=(ptrdiff_t N) { |
3258 | // Check index correctness after subtraction. |
3259 | // Note: Case.Index == getNumCases() means end(). |
3260 | assert(Case.Index - N >= 0 &&((Case.Index - N >= 0 && (unsigned)(Case.Index - N ) <= Case.SI->getNumCases() && "Case.Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3262, __PRETTY_FUNCTION__)) |
3261 | (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&((Case.Index - N >= 0 && (unsigned)(Case.Index - N ) <= Case.SI->getNumCases() && "Case.Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3262, __PRETTY_FUNCTION__)) |
3262 | "Case.Index out the number of cases.")((Case.Index - N >= 0 && (unsigned)(Case.Index - N ) <= Case.SI->getNumCases() && "Case.Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3262, __PRETTY_FUNCTION__)); |
3263 | Case.Index -= N; |
3264 | return *this; |
3265 | } |
3266 | ptrdiff_t operator-(const CaseIteratorImpl &RHS) const { |
3267 | assert(Case.SI == RHS.Case.SI && "Incompatible operators.")((Case.SI == RHS.Case.SI && "Incompatible operators." ) ? static_cast<void> (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3267, __PRETTY_FUNCTION__)); |
3268 | return Case.Index - RHS.Case.Index; |
3269 | } |
3270 | bool operator==(const CaseIteratorImpl &RHS) const { |
3271 | return Case == RHS.Case; |
3272 | } |
3273 | bool operator<(const CaseIteratorImpl &RHS) const { |
3274 | assert(Case.SI == RHS.Case.SI && "Incompatible operators.")((Case.SI == RHS.Case.SI && "Incompatible operators." ) ? static_cast<void> (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3274, __PRETTY_FUNCTION__)); |
3275 | return Case.Index < RHS.Case.Index; |
3276 | } |
3277 | CaseHandleT &operator*() { return Case; } |
3278 | const CaseHandleT &operator*() const { return Case; } |
3279 | }; |
3280 | |
3281 | using CaseIt = CaseIteratorImpl<CaseHandle>; |
3282 | using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>; |
3283 | |
3284 | static SwitchInst *Create(Value *Value, BasicBlock *Default, |
3285 | unsigned NumCases, |
3286 | Instruction *InsertBefore = nullptr) { |
3287 | return new SwitchInst(Value, Default, NumCases, InsertBefore); |
3288 | } |
3289 | |
3290 | static SwitchInst *Create(Value *Value, BasicBlock *Default, |
3291 | unsigned NumCases, BasicBlock *InsertAtEnd) { |
3292 | return new SwitchInst(Value, Default, NumCases, InsertAtEnd); |
3293 | } |
3294 | |
3295 | /// Provide fast operand accessors |
3296 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
3297 | |
3298 | // Accessor Methods for Switch stmt |
3299 | Value *getCondition() const { return getOperand(0); } |
3300 | void setCondition(Value *V) { setOperand(0, V); } |
3301 | |
3302 | BasicBlock *getDefaultDest() const { |
3303 | return cast<BasicBlock>(getOperand(1)); |
3304 | } |
3305 | |
3306 | void setDefaultDest(BasicBlock *DefaultCase) { |
3307 | setOperand(1, reinterpret_cast<Value*>(DefaultCase)); |
3308 | } |
3309 | |
3310 | /// Return the number of 'cases' in this switch instruction, excluding the |
3311 | /// default case. |
3312 | unsigned getNumCases() const { |
3313 | return getNumOperands()/2 - 1; |
3314 | } |
3315 | |
3316 | /// Returns a read/write iterator that points to the first case in the |
3317 | /// SwitchInst. |
3318 | CaseIt case_begin() { |
3319 | return CaseIt(this, 0); |
3320 | } |
3321 | |
3322 | /// Returns a read-only iterator that points to the first case in the |
3323 | /// SwitchInst. |
3324 | ConstCaseIt case_begin() const { |
3325 | return ConstCaseIt(this, 0); |
3326 | } |
3327 | |
3328 | /// Returns a read/write iterator that points one past the last in the |
3329 | /// SwitchInst. |
3330 | CaseIt case_end() { |
3331 | return CaseIt(this, getNumCases()); |
3332 | } |
3333 | |
3334 | /// Returns a read-only iterator that points one past the last in the |
3335 | /// SwitchInst. |
3336 | ConstCaseIt case_end() const { |
3337 | return ConstCaseIt(this, getNumCases()); |
3338 | } |
3339 | |
3340 | /// Iteration adapter for range-for loops. |
3341 | iterator_range<CaseIt> cases() { |
3342 | return make_range(case_begin(), case_end()); |
3343 | } |
3344 | |
3345 | /// Constant iteration adapter for range-for loops. |
3346 | iterator_range<ConstCaseIt> cases() const { |
3347 | return make_range(case_begin(), case_end()); |
3348 | } |
3349 | |
3350 | /// Returns an iterator that points to the default case. |
3351 | /// Note: this iterator allows to resolve successor only. Attempt |
3352 | /// to resolve case value causes an assertion. |
3353 | /// Also note, that increment and decrement also causes an assertion and |
3354 | /// makes iterator invalid. |
3355 | CaseIt case_default() { |
3356 | return CaseIt(this, DefaultPseudoIndex); |
3357 | } |
3358 | ConstCaseIt case_default() const { |
3359 | return ConstCaseIt(this, DefaultPseudoIndex); |
3360 | } |
3361 | |
3362 | /// Search all of the case values for the specified constant. If it is |
3363 | /// explicitly handled, return the case iterator of it, otherwise return |
3364 | /// default case iterator to indicate that it is handled by the default |
3365 | /// handler. |
3366 | CaseIt findCaseValue(const ConstantInt *C) { |
3367 | CaseIt I = llvm::find_if( |
3368 | cases(), [C](CaseHandle &Case) { return Case.getCaseValue() == C; }); |
3369 | if (I != case_end()) |
3370 | return I; |
3371 | |
3372 | return case_default(); |
3373 | } |
3374 | ConstCaseIt findCaseValue(const ConstantInt *C) const { |
3375 | ConstCaseIt I = llvm::find_if(cases(), [C](ConstCaseHandle &Case) { |
3376 | return Case.getCaseValue() == C; |
3377 | }); |
3378 | if (I != case_end()) |
3379 | return I; |
3380 | |
3381 | return case_default(); |
3382 | } |
3383 | |
3384 | /// Finds the unique case value for a given successor. Returns null if the |
3385 | /// successor is not found, not unique, or is the default case. |
3386 | ConstantInt *findCaseDest(BasicBlock *BB) { |
3387 | if (BB == getDefaultDest()) |
3388 | return nullptr; |
3389 | |
3390 | ConstantInt *CI = nullptr; |
3391 | for (auto Case : cases()) { |
3392 | if (Case.getCaseSuccessor() != BB) |
3393 | continue; |
3394 | |
3395 | if (CI) |
3396 | return nullptr; // Multiple cases lead to BB. |
3397 | |
3398 | CI = Case.getCaseValue(); |
3399 | } |
3400 | |
3401 | return CI; |
3402 | } |
3403 | |
3404 | /// Add an entry to the switch instruction. |
3405 | /// Note: |
3406 | /// This action invalidates case_end(). Old case_end() iterator will |
3407 | /// point to the added case. |
3408 | void addCase(ConstantInt *OnVal, BasicBlock *Dest); |
3409 | |
3410 | /// This method removes the specified case and its successor from the switch |
3411 | /// instruction. Note that this operation may reorder the remaining cases at |
3412 | /// index idx and above. |
3413 | /// Note: |
3414 | /// This action invalidates iterators for all cases following the one removed, |
3415 | /// including the case_end() iterator. It returns an iterator for the next |
3416 | /// case. |
3417 | CaseIt removeCase(CaseIt I); |
3418 | |
3419 | unsigned getNumSuccessors() const { return getNumOperands()/2; } |
3420 | BasicBlock *getSuccessor(unsigned idx) const { |
3421 | assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!")((idx < getNumSuccessors() &&"Successor idx out of range for switch!" ) ? static_cast<void> (0) : __assert_fail ("idx < getNumSuccessors() &&\"Successor idx out of range for switch!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3421, __PRETTY_FUNCTION__)); |
3422 | return cast<BasicBlock>(getOperand(idx*2+1)); |
3423 | } |
3424 | void setSuccessor(unsigned idx, BasicBlock *NewSucc) { |
3425 | assert(idx < getNumSuccessors() && "Successor # out of range for switch!")((idx < getNumSuccessors() && "Successor # out of range for switch!" ) ? static_cast<void> (0) : __assert_fail ("idx < getNumSuccessors() && \"Successor # out of range for switch!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3425, __PRETTY_FUNCTION__)); |
3426 | setOperand(idx * 2 + 1, NewSucc); |
3427 | } |
3428 | |
3429 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
3430 | static bool classof(const Instruction *I) { |
3431 | return I->getOpcode() == Instruction::Switch; |
3432 | } |
3433 | static bool classof(const Value *V) { |
3434 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
3435 | } |
3436 | }; |
3437 | |
3438 | /// A wrapper class to simplify modification of SwitchInst cases along with |
3439 | /// their prof branch_weights metadata. |
3440 | class SwitchInstProfUpdateWrapper { |
3441 | SwitchInst &SI; |
3442 | Optional<SmallVector<uint32_t, 8> > Weights = None; |
3443 | |
3444 | // Sticky invalid state is needed to safely ignore operations with prof data |
3445 | // in cases where SwitchInstProfUpdateWrapper is created from SwitchInst |
3446 | // with inconsistent prof data. TODO: once we fix all prof data |
3447 | // inconsistencies we can turn invalid state to assertions. |
3448 | enum { |
3449 | Invalid, |
3450 | Initialized, |
3451 | Changed |
3452 | } State = Invalid; |
3453 | |
3454 | protected: |
3455 | static MDNode *getProfBranchWeightsMD(const SwitchInst &SI); |
3456 | |
3457 | MDNode *buildProfBranchWeightsMD(); |
3458 | |
3459 | void init(); |
3460 | |
3461 | public: |
3462 | using CaseWeightOpt = Optional<uint32_t>; |
3463 | SwitchInst *operator->() { return &SI; } |
3464 | SwitchInst &operator*() { return SI; } |
3465 | operator SwitchInst *() { return &SI; } |
3466 | |
3467 | SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); } |
3468 | |
3469 | ~SwitchInstProfUpdateWrapper() { |
3470 | if (State == Changed) |
3471 | SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD()); |
3472 | } |
3473 | |
3474 | /// Delegate the call to the underlying SwitchInst::removeCase() and remove |
3475 | /// correspondent branch weight. |
3476 | SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I); |
3477 | |
3478 | /// Delegate the call to the underlying SwitchInst::addCase() and set the |
3479 | /// specified branch weight for the added case. |
3480 | void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W); |
3481 | |
3482 | /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark |
3483 | /// this object to not touch the underlying SwitchInst in destructor. |
3484 | SymbolTableList<Instruction>::iterator eraseFromParent(); |
3485 | |
3486 | void setSuccessorWeight(unsigned idx, CaseWeightOpt W); |
3487 | CaseWeightOpt getSuccessorWeight(unsigned idx); |
3488 | |
3489 | static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx); |
3490 | }; |
3491 | |
3492 | template <> |
3493 | struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> { |
3494 | }; |
3495 | |
3496 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)SwitchInst::op_iterator SwitchInst::op_begin() { return OperandTraits <SwitchInst>::op_begin(this); } SwitchInst::const_op_iterator SwitchInst::op_begin() const { return OperandTraits<SwitchInst >::op_begin(const_cast<SwitchInst*>(this)); } SwitchInst ::op_iterator SwitchInst::op_end() { return OperandTraits< SwitchInst>::op_end(this); } SwitchInst::const_op_iterator SwitchInst::op_end() const { return OperandTraits<SwitchInst >::op_end(const_cast<SwitchInst*>(this)); } Value *SwitchInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<SwitchInst>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3496, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<SwitchInst>::op_begin(const_cast<SwitchInst *>(this))[i_nocapture].get()); } void SwitchInst::setOperand (unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture < OperandTraits<SwitchInst>::operands(this) && "setOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3496, __PRETTY_FUNCTION__)); OperandTraits<SwitchInst> ::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned SwitchInst ::getNumOperands() const { return OperandTraits<SwitchInst >::operands(this); } template <int Idx_nocapture> Use &SwitchInst::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & SwitchInst::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
3497 | |
3498 | //===----------------------------------------------------------------------===// |
3499 | // IndirectBrInst Class |
3500 | //===----------------------------------------------------------------------===// |
3501 | |
3502 | //===--------------------------------------------------------------------------- |
3503 | /// Indirect Branch Instruction. |
3504 | /// |
3505 | class IndirectBrInst : public Instruction { |
3506 | unsigned ReservedSpace; |
3507 | |
3508 | // Operand[0] = Address to jump to |
3509 | // Operand[n+1] = n-th destination |
3510 | IndirectBrInst(const IndirectBrInst &IBI); |
3511 | |
3512 | /// Create a new indirectbr instruction, specifying an |
3513 | /// Address to jump to. The number of expected destinations can be specified |
3514 | /// here to make memory allocation more efficient. This constructor can also |
3515 | /// autoinsert before another instruction. |
3516 | IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore); |
3517 | |
3518 | /// Create a new indirectbr instruction, specifying an |
3519 | /// Address to jump to. The number of expected destinations can be specified |
3520 | /// here to make memory allocation more efficient. This constructor also |
3521 | /// autoinserts at the end of the specified BasicBlock. |
3522 | IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd); |
3523 | |
3524 | // allocate space for exactly zero operands |
3525 | void *operator new(size_t s) { |
3526 | return User::operator new(s); |
3527 | } |
3528 | |
3529 | void init(Value *Address, unsigned NumDests); |
3530 | void growOperands(); |
3531 | |
3532 | protected: |
3533 | // Note: Instruction needs to be a friend here to call cloneImpl. |
3534 | friend class Instruction; |
3535 | |
3536 | IndirectBrInst *cloneImpl() const; |
3537 | |
3538 | public: |
3539 | /// Iterator type that casts an operand to a basic block. |
3540 | /// |
3541 | /// This only makes sense because the successors are stored as adjacent |
3542 | /// operands for indirectbr instructions. |
3543 | struct succ_op_iterator |
3544 | : iterator_adaptor_base<succ_op_iterator, value_op_iterator, |
3545 | std::random_access_iterator_tag, BasicBlock *, |
3546 | ptrdiff_t, BasicBlock *, BasicBlock *> { |
3547 | explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} |
3548 | |
3549 | BasicBlock *operator*() const { return cast<BasicBlock>(*I); } |
3550 | BasicBlock *operator->() const { return operator*(); } |
3551 | }; |
3552 | |
3553 | /// The const version of `succ_op_iterator`. |
3554 | struct const_succ_op_iterator |
3555 | : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, |
3556 | std::random_access_iterator_tag, |
3557 | const BasicBlock *, ptrdiff_t, const BasicBlock *, |
3558 | const BasicBlock *> { |
3559 | explicit const_succ_op_iterator(const_value_op_iterator I) |
3560 | : iterator_adaptor_base(I) {} |
3561 | |
3562 | const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } |
3563 | const BasicBlock *operator->() const { return operator*(); } |
3564 | }; |
3565 | |
3566 | static IndirectBrInst *Create(Value *Address, unsigned NumDests, |
3567 | Instruction *InsertBefore = nullptr) { |
3568 | return new IndirectBrInst(Address, NumDests, InsertBefore); |
3569 | } |
3570 | |
3571 | static IndirectBrInst *Create(Value *Address, unsigned NumDests, |
3572 | BasicBlock *InsertAtEnd) { |
3573 | return new IndirectBrInst(Address, NumDests, InsertAtEnd); |
3574 | } |
3575 | |
3576 | /// Provide fast operand accessors. |
3577 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
3578 | |
3579 | // Accessor Methods for IndirectBrInst instruction. |
3580 | Value *getAddress() { return getOperand(0); } |
3581 | const Value *getAddress() const { return getOperand(0); } |
3582 | void setAddress(Value *V) { setOperand(0, V); } |
3583 | |
3584 | /// return the number of possible destinations in this |
3585 | /// indirectbr instruction. |
3586 | unsigned getNumDestinations() const { return getNumOperands()-1; } |
3587 | |
3588 | /// Return the specified destination. |
3589 | BasicBlock *getDestination(unsigned i) { return getSuccessor(i); } |
3590 | const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); } |
3591 | |
3592 | /// Add a destination. |
3593 | /// |
3594 | void addDestination(BasicBlock *Dest); |
3595 | |
3596 | /// This method removes the specified successor from the |
3597 | /// indirectbr instruction. |
3598 | void removeDestination(unsigned i); |
3599 | |
3600 | unsigned getNumSuccessors() const { return getNumOperands()-1; } |
3601 | BasicBlock *getSuccessor(unsigned i) const { |
3602 | return cast<BasicBlock>(getOperand(i+1)); |
3603 | } |
3604 | void setSuccessor(unsigned i, BasicBlock *NewSucc) { |
3605 | setOperand(i + 1, NewSucc); |
3606 | } |
3607 | |
3608 | iterator_range<succ_op_iterator> successors() { |
3609 | return make_range(succ_op_iterator(std::next(value_op_begin())), |
3610 | succ_op_iterator(value_op_end())); |
3611 | } |
3612 | |
3613 | iterator_range<const_succ_op_iterator> successors() const { |
3614 | return make_range(const_succ_op_iterator(std::next(value_op_begin())), |
3615 | const_succ_op_iterator(value_op_end())); |
3616 | } |
3617 | |
3618 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
3619 | static bool classof(const Instruction *I) { |
3620 | return I->getOpcode() == Instruction::IndirectBr; |
3621 | } |
3622 | static bool classof(const Value *V) { |
3623 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
3624 | } |
3625 | }; |
3626 | |
3627 | template <> |
3628 | struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> { |
3629 | }; |
3630 | |
3631 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)IndirectBrInst::op_iterator IndirectBrInst::op_begin() { return OperandTraits<IndirectBrInst>::op_begin(this); } IndirectBrInst ::const_op_iterator IndirectBrInst::op_begin() const { return OperandTraits<IndirectBrInst>::op_begin(const_cast< IndirectBrInst*>(this)); } IndirectBrInst::op_iterator IndirectBrInst ::op_end() { return OperandTraits<IndirectBrInst>::op_end (this); } IndirectBrInst::const_op_iterator IndirectBrInst::op_end () const { return OperandTraits<IndirectBrInst>::op_end (const_cast<IndirectBrInst*>(this)); } Value *IndirectBrInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3631, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<IndirectBrInst>::op_begin(const_cast< IndirectBrInst*>(this))[i_nocapture].get()); } void IndirectBrInst ::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (( i_nocapture < OperandTraits<IndirectBrInst>::operands (this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3631, __PRETTY_FUNCTION__)); OperandTraits<IndirectBrInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned IndirectBrInst::getNumOperands() const { return OperandTraits <IndirectBrInst>::operands(this); } template <int Idx_nocapture > Use &IndirectBrInst::Op() { return this->OpFrom< Idx_nocapture>(this); } template <int Idx_nocapture> const Use &IndirectBrInst::Op() const { return this-> OpFrom<Idx_nocapture>(this); } |
3632 | |
3633 | //===----------------------------------------------------------------------===// |
3634 | // InvokeInst Class |
3635 | //===----------------------------------------------------------------------===// |
3636 | |
3637 | /// Invoke instruction. The SubclassData field is used to hold the |
3638 | /// calling convention of the call. |
3639 | /// |
3640 | class InvokeInst : public CallBase { |
3641 | /// The number of operands for this call beyond the called function, |
3642 | /// arguments, and operand bundles. |
3643 | static constexpr int NumExtraOperands = 2; |
3644 | |
3645 | /// The index from the end of the operand array to the normal destination. |
3646 | static constexpr int NormalDestOpEndIdx = -3; |
3647 | |
3648 | /// The index from the end of the operand array to the unwind destination. |
3649 | static constexpr int UnwindDestOpEndIdx = -2; |
3650 | |
3651 | InvokeInst(const InvokeInst &BI); |
3652 | |
3653 | /// Construct an InvokeInst given a range of arguments. |
3654 | /// |
3655 | /// Construct an InvokeInst from a range of arguments |
3656 | inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3657 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3658 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
3659 | const Twine &NameStr, Instruction *InsertBefore); |
3660 | |
3661 | inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3662 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3663 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
3664 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
3665 | |
3666 | void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3667 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3668 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); |
3669 | |
3670 | /// Compute the number of operands to allocate. |
3671 | static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { |
3672 | // We need one operand for the called function, plus our extra operands and |
3673 | // the input operand counts provided. |
3674 | return 1 + NumExtraOperands + NumArgs + NumBundleInputs; |
3675 | } |
3676 | |
3677 | protected: |
3678 | // Note: Instruction needs to be a friend here to call cloneImpl. |
3679 | friend class Instruction; |
3680 | |
3681 | InvokeInst *cloneImpl() const; |
3682 | |
3683 | public: |
3684 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3685 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3686 | const Twine &NameStr, |
3687 | Instruction *InsertBefore = nullptr) { |
3688 | int NumOperands = ComputeNumOperands(Args.size()); |
3689 | return new (NumOperands) |
3690 | InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands, |
3691 | NameStr, InsertBefore); |
3692 | } |
3693 | |
3694 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3695 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3696 | ArrayRef<OperandBundleDef> Bundles = None, |
3697 | const Twine &NameStr = "", |
3698 | Instruction *InsertBefore = nullptr) { |
3699 | int NumOperands = |
3700 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); |
3701 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
3702 | |
3703 | return new (NumOperands, DescriptorBytes) |
3704 | InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, |
3705 | NameStr, InsertBefore); |
3706 | } |
3707 | |
3708 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3709 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3710 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
3711 | int NumOperands = ComputeNumOperands(Args.size()); |
3712 | return new (NumOperands) |
3713 | InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands, |
3714 | NameStr, InsertAtEnd); |
3715 | } |
3716 | |
3717 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3718 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3719 | ArrayRef<OperandBundleDef> Bundles, |
3720 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
3721 | int NumOperands = |
3722 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); |
3723 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
3724 | |
3725 | return new (NumOperands, DescriptorBytes) |
3726 | InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, |
3727 | NameStr, InsertAtEnd); |
3728 | } |
3729 | |
3730 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, |
3731 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3732 | const Twine &NameStr, |
3733 | Instruction *InsertBefore = nullptr) { |
3734 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, |
3735 | IfException, Args, None, NameStr, InsertBefore); |
3736 | } |
3737 | |
3738 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, |
3739 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3740 | ArrayRef<OperandBundleDef> Bundles = None, |
3741 | const Twine &NameStr = "", |
3742 | Instruction *InsertBefore = nullptr) { |
3743 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, |
3744 | IfException, Args, Bundles, NameStr, InsertBefore); |
3745 | } |
3746 | |
3747 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, |
3748 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3749 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
3750 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, |
3751 | IfException, Args, NameStr, InsertAtEnd); |
3752 | } |
3753 | |
3754 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, |
3755 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3756 | ArrayRef<OperandBundleDef> Bundles, |
3757 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
3758 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, |
3759 | IfException, Args, Bundles, NameStr, InsertAtEnd); |
3760 | } |
3761 | |
3762 | // Deprecated [opaque pointer types] |
3763 | static InvokeInst *Create(Value *Func, BasicBlock *IfNormal, |
3764 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3765 | const Twine &NameStr, |
3766 | Instruction *InsertBefore = nullptr) { |
3767 | return Create(cast<FunctionType>( |
3768 | cast<PointerType>(Func->getType())->getElementType()), |
3769 | Func, IfNormal, IfException, Args, None, NameStr, |
3770 | InsertBefore); |
3771 | } |
3772 | |
3773 | // Deprecated [opaque pointer types] |
3774 | static InvokeInst *Create(Value *Func, BasicBlock *IfNormal, |
3775 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3776 | ArrayRef<OperandBundleDef> Bundles = None, |
3777 | const Twine &NameStr = "", |
3778 | Instruction *InsertBefore = nullptr) { |
3779 | return Create(cast<FunctionType>( |
3780 | cast<PointerType>(Func->getType())->getElementType()), |
3781 | Func, IfNormal, IfException, Args, Bundles, NameStr, |
3782 | InsertBefore); |
3783 | } |
3784 | |
3785 | // Deprecated [opaque pointer types] |
3786 | static InvokeInst *Create(Value *Func, BasicBlock *IfNormal, |
3787 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3788 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
3789 | return Create(cast<FunctionType>( |
3790 | cast<PointerType>(Func->getType())->getElementType()), |
3791 | Func, IfNormal, IfException, Args, NameStr, InsertAtEnd); |
3792 | } |
3793 | |
3794 | // Deprecated [opaque pointer types] |
3795 | static InvokeInst *Create(Value *Func, BasicBlock *IfNormal, |
3796 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3797 | ArrayRef<OperandBundleDef> Bundles, |
3798 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
3799 | return Create(cast<FunctionType>( |
3800 | cast<PointerType>(Func->getType())->getElementType()), |
3801 | Func, IfNormal, IfException, Args, Bundles, NameStr, |
3802 | InsertAtEnd); |
3803 | } |
3804 | |
3805 | /// Create a clone of \p II with a different set of operand bundles and |
3806 | /// insert it before \p InsertPt. |
3807 | /// |
3808 | /// The returned invoke instruction is identical to \p II in every way except |
3809 | /// that the operand bundles for the new instruction are set to the operand |
3810 | /// bundles in \p Bundles. |
3811 | static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles, |
3812 | Instruction *InsertPt = nullptr); |
3813 | |
3814 | /// Determine if the call should not perform indirect branch tracking. |
3815 | bool doesNoCfCheck() const { return hasFnAttr(Attribute::NoCfCheck); } |
3816 | |
3817 | /// Determine if the call cannot unwind. |
3818 | bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); } |
3819 | void setDoesNotThrow() { |
3820 | addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind); |
3821 | } |
3822 | |
3823 | // get*Dest - Return the destination basic blocks... |
3824 | BasicBlock *getNormalDest() const { |
3825 | return cast<BasicBlock>(Op<NormalDestOpEndIdx>()); |
3826 | } |
3827 | BasicBlock *getUnwindDest() const { |
3828 | return cast<BasicBlock>(Op<UnwindDestOpEndIdx>()); |
3829 | } |
3830 | void setNormalDest(BasicBlock *B) { |
3831 | Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B); |
3832 | } |
3833 | void setUnwindDest(BasicBlock *B) { |
3834 | Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B); |
3835 | } |
3836 | |
3837 | /// Get the landingpad instruction from the landing pad |
3838 | /// block (the unwind destination). |
3839 | LandingPadInst *getLandingPadInst() const; |
3840 | |
3841 | BasicBlock *getSuccessor(unsigned i) const { |
3842 | assert(i < 2 && "Successor # out of range for invoke!")((i < 2 && "Successor # out of range for invoke!") ? static_cast<void> (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3842, __PRETTY_FUNCTION__)); |
3843 | return i == 0 ? getNormalDest() : getUnwindDest(); |
3844 | } |
3845 | |
3846 | void setSuccessor(unsigned i, BasicBlock *NewSucc) { |
3847 | assert(i < 2 && "Successor # out of range for invoke!")((i < 2 && "Successor # out of range for invoke!") ? static_cast<void> (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 3847, __PRETTY_FUNCTION__)); |
3848 | if (i == 0) |
3849 | setNormalDest(NewSucc); |
3850 | else |
3851 | setUnwindDest(NewSucc); |
3852 | } |
3853 | |
3854 | unsigned getNumSuccessors() const { return 2; } |
3855 | |
3856 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
3857 | static bool classof(const Instruction *I) { |
3858 | return (I->getOpcode() == Instruction::Invoke); |
3859 | } |
3860 | static bool classof(const Value *V) { |
3861 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
3862 | } |
3863 | |
3864 | private: |
3865 | |
3866 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
3867 | // method so that subclasses cannot accidentally use it. |
3868 | void setInstructionSubclassData(unsigned short D) { |
3869 | Instruction::setInstructionSubclassData(D); |
3870 | } |
3871 | }; |
3872 | |
3873 | InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3874 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3875 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
3876 | const Twine &NameStr, Instruction *InsertBefore) |
3877 | : CallBase(Ty->getReturnType(), Instruction::Invoke, |
3878 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, |
3879 | InsertBefore) { |
3880 | init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); |
3881 | } |
3882 | |
3883 | InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3884 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3885 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
3886 | const Twine &NameStr, BasicBlock *InsertAtEnd) |
3887 | : CallBase(Ty->getReturnType(), Instruction::Invoke, |
3888 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, |
3889 | InsertAtEnd) { |
3890 | init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); |
3891 | } |
3892 | |
3893 | //===----------------------------------------------------------------------===// |
3894 | // CallBrInst Class |
3895 | //===----------------------------------------------------------------------===// |
3896 | |
3897 | /// CallBr instruction, tracking function calls that may not return control but |
3898 | /// instead transfer it to a third location. The SubclassData field is used to |
3899 | /// hold the calling convention of the call. |
3900 | /// |
3901 | class CallBrInst : public CallBase { |
3902 | |
3903 | unsigned NumIndirectDests; |
3904 | |
3905 | CallBrInst(const CallBrInst &BI); |
3906 | |
3907 | /// Construct a CallBrInst given a range of arguments. |
3908 | /// |
3909 | /// Construct a CallBrInst from a range of arguments |
3910 | inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, |
3911 | ArrayRef<BasicBlock *> IndirectDests, |
3912 | ArrayRef<Value *> Args, |
3913 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
3914 | const Twine &NameStr, Instruction *InsertBefore); |
3915 | |
3916 | inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, |
3917 | ArrayRef<BasicBlock *> IndirectDests, |
3918 | ArrayRef<Value *> Args, |
3919 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
3920 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
3921 | |
3922 | void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest, |
3923 | ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args, |
3924 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); |
3925 | |
3926 | /// Compute the number of operands to allocate. |
3927 | static int ComputeNumOperands(int NumArgs, int NumIndirectDests, |
3928 | int NumBundleInputs = 0) { |
3929 | // We need one operand for the called function, plus our extra operands and |
3930 | // the input operand counts provided. |
3931 | return 2 + NumIndirectDests + NumArgs + NumBundleInputs; |
3932 | } |
3933 | |
3934 | protected: |
3935 | // Note: Instruction needs to be a friend here to call cloneImpl. |
3936 | friend class Instruction; |
3937 | |
3938 | CallBrInst *cloneImpl() const; |
3939 | |
3940 | public: |
3941 | static CallBrInst *Create(FunctionType *Ty, Value *Func, |
3942 | BasicBlock *DefaultDest, |
3943 | ArrayRef<BasicBlock *> IndirectDests, |
3944 | ArrayRef<Value *> Args, const Twine &NameStr, |
3945 | Instruction *InsertBefore = nullptr) { |
3946 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); |
3947 | return new (NumOperands) |
3948 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None, |
3949 | NumOperands, NameStr, InsertBefore); |
3950 | } |
3951 | |
3952 | static CallBrInst *Create(FunctionType *Ty, Value *Func, |
3953 | BasicBlock *DefaultDest, |
3954 | ArrayRef<BasicBlock *> IndirectDests, |
3955 | ArrayRef<Value *> Args, |
3956 | ArrayRef<OperandBundleDef> Bundles = None, |
3957 | const Twine &NameStr = "", |
3958 | Instruction *InsertBefore = nullptr) { |
3959 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), |
3960 | CountBundleInputs(Bundles)); |
3961 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
3962 | |
3963 | return new (NumOperands, DescriptorBytes) |
3964 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, |
3965 | NumOperands, NameStr, InsertBefore); |
3966 | } |
3967 | |
3968 | static CallBrInst *Create(FunctionType *Ty, Value *Func, |
3969 | BasicBlock *DefaultDest, |
3970 | ArrayRef<BasicBlock *> IndirectDests, |
3971 | ArrayRef<Value *> Args, const Twine &NameStr, |
3972 | BasicBlock *InsertAtEnd) { |
3973 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); |
3974 | return new (NumOperands) |
3975 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None, |
3976 | NumOperands, NameStr, InsertAtEnd); |
3977 | } |
3978 | |
3979 | static CallBrInst *Create(FunctionType *Ty, Value *Func, |
3980 | BasicBlock *DefaultDest, |
3981 | ArrayRef<BasicBlock *> IndirectDests, |
3982 | ArrayRef<Value *> Args, |
3983 | ArrayRef<OperandBundleDef> Bundles, |
3984 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
3985 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), |
3986 | CountBundleInputs(Bundles)); |
3987 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
3988 | |
3989 | return new (NumOperands, DescriptorBytes) |
3990 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, |
3991 | NumOperands, NameStr, InsertAtEnd); |
3992 | } |
3993 | |
3994 | static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, |
3995 | ArrayRef<BasicBlock *> IndirectDests, |
3996 | ArrayRef<Value *> Args, const Twine &NameStr, |
3997 | Instruction *InsertBefore = nullptr) { |
3998 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, |
3999 | IndirectDests, Args, NameStr, InsertBefore); |
4000 | } |
4001 | |
4002 | static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, |
4003 | ArrayRef<BasicBlock *> IndirectDests, |
4004 | ArrayRef<Value *> Args, |
4005 | ArrayRef<OperandBundleDef> Bundles = None, |
4006 | const Twine &NameStr = "", |
4007 | Instruction *InsertBefore = nullptr) { |
4008 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, |
4009 | IndirectDests, Args, Bundles, NameStr, InsertBefore); |
4010 | } |
4011 | |
4012 | static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, |
4013 | ArrayRef<BasicBlock *> IndirectDests, |
4014 | ArrayRef<Value *> Args, const Twine &NameStr, |
4015 | BasicBlock *InsertAtEnd) { |
4016 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, |
4017 | IndirectDests, Args, NameStr, InsertAtEnd); |
4018 | } |
4019 | |
4020 | static CallBrInst *Create(FunctionCallee Func, |
4021 | BasicBlock *DefaultDest, |
4022 | ArrayRef<BasicBlock *> IndirectDests, |
4023 | ArrayRef<Value *> Args, |
4024 | ArrayRef<OperandBundleDef> Bundles, |
4025 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
4026 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, |
4027 | IndirectDests, Args, Bundles, NameStr, InsertAtEnd); |
4028 | } |
4029 | |
4030 | /// Create a clone of \p CBI with a different set of operand bundles and |
4031 | /// insert it before \p InsertPt. |
4032 | /// |
4033 | /// The returned callbr instruction is identical to \p CBI in every way |
4034 | /// except that the operand bundles for the new instruction are set to the |
4035 | /// operand bundles in \p Bundles. |
4036 | static CallBrInst *Create(CallBrInst *CBI, |
4037 | ArrayRef<OperandBundleDef> Bundles, |
4038 | Instruction *InsertPt = nullptr); |
4039 | |
4040 | /// Return the number of callbr indirect dest labels. |
4041 | /// |
4042 | unsigned getNumIndirectDests() const { return NumIndirectDests; } |
4043 | |
4044 | /// getIndirectDestLabel - Return the i-th indirect dest label. |
4045 | /// |
4046 | Value *getIndirectDestLabel(unsigned i) const { |
4047 | assert(i < getNumIndirectDests() && "Out of bounds!")((i < getNumIndirectDests() && "Out of bounds!") ? static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4047, __PRETTY_FUNCTION__)); |
4048 | return getOperand(i + getNumArgOperands() + getNumTotalBundleOperands() + |
4049 | 1); |
4050 | } |
4051 | |
4052 | Value *getIndirectDestLabelUse(unsigned i) const { |
4053 | assert(i < getNumIndirectDests() && "Out of bounds!")((i < getNumIndirectDests() && "Out of bounds!") ? static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4053, __PRETTY_FUNCTION__)); |
4054 | return getOperandUse(i + getNumArgOperands() + getNumTotalBundleOperands() + |
4055 | 1); |
4056 | } |
4057 | |
4058 | // Return the destination basic blocks... |
4059 | BasicBlock *getDefaultDest() const { |
4060 | return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1)); |
4061 | } |
4062 | BasicBlock *getIndirectDest(unsigned i) const { |
4063 | return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i)); |
4064 | } |
4065 | SmallVector<BasicBlock *, 16> getIndirectDests() const { |
4066 | SmallVector<BasicBlock *, 16> IndirectDests; |
4067 | for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i) |
4068 | IndirectDests.push_back(getIndirectDest(i)); |
4069 | return IndirectDests; |
4070 | } |
4071 | void setDefaultDest(BasicBlock *B) { |
4072 | *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B); |
4073 | } |
4074 | void setIndirectDest(unsigned i, BasicBlock *B) { |
4075 | *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B); |
4076 | } |
4077 | |
4078 | BasicBlock *getSuccessor(unsigned i) const { |
4079 | assert(i < getNumSuccessors() + 1 &&((i < getNumSuccessors() + 1 && "Successor # out of range for callbr!" ) ? static_cast<void> (0) : __assert_fail ("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4080, __PRETTY_FUNCTION__)) |
4080 | "Successor # out of range for callbr!")((i < getNumSuccessors() + 1 && "Successor # out of range for callbr!" ) ? static_cast<void> (0) : __assert_fail ("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4080, __PRETTY_FUNCTION__)); |
4081 | return i == 0 ? getDefaultDest() : getIndirectDest(i - 1); |
4082 | } |
4083 | |
4084 | void setSuccessor(unsigned idx, BasicBlock *NewSucc) { |
4085 | assert(idx < getNumIndirectDests() + 1 &&((idx < getNumIndirectDests() + 1 && "Successor # out of range for callbr!" ) ? static_cast<void> (0) : __assert_fail ("idx < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4086, __PRETTY_FUNCTION__)) |
4086 | "Successor # out of range for callbr!")((idx < getNumIndirectDests() + 1 && "Successor # out of range for callbr!" ) ? static_cast<void> (0) : __assert_fail ("idx < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4086, __PRETTY_FUNCTION__)); |
4087 | *(&Op<-1>() - getNumIndirectDests() -1 + idx) = |
4088 | reinterpret_cast<Value *>(NewSucc); |
4089 | } |
4090 | |
4091 | unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; } |
4092 | |
4093 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4094 | static bool classof(const Instruction *I) { |
4095 | return (I->getOpcode() == Instruction::CallBr); |
4096 | } |
4097 | static bool classof(const Value *V) { |
4098 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4099 | } |
4100 | |
4101 | private: |
4102 | |
4103 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
4104 | // method so that subclasses cannot accidentally use it. |
4105 | void setInstructionSubclassData(unsigned short D) { |
4106 | Instruction::setInstructionSubclassData(D); |
4107 | } |
4108 | }; |
4109 | |
4110 | CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, |
4111 | ArrayRef<BasicBlock *> IndirectDests, |
4112 | ArrayRef<Value *> Args, |
4113 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
4114 | const Twine &NameStr, Instruction *InsertBefore) |
4115 | : CallBase(Ty->getReturnType(), Instruction::CallBr, |
4116 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, |
4117 | InsertBefore) { |
4118 | init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); |
4119 | } |
4120 | |
4121 | CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, |
4122 | ArrayRef<BasicBlock *> IndirectDests, |
4123 | ArrayRef<Value *> Args, |
4124 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
4125 | const Twine &NameStr, BasicBlock *InsertAtEnd) |
4126 | : CallBase( |
4127 | cast<FunctionType>( |
4128 | cast<PointerType>(Func->getType())->getElementType()) |
4129 | ->getReturnType(), |
4130 | Instruction::CallBr, |
4131 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, |
4132 | InsertAtEnd) { |
4133 | init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); |
4134 | } |
4135 | |
4136 | //===----------------------------------------------------------------------===// |
4137 | // ResumeInst Class |
4138 | //===----------------------------------------------------------------------===// |
4139 | |
4140 | //===--------------------------------------------------------------------------- |
4141 | /// Resume the propagation of an exception. |
4142 | /// |
4143 | class ResumeInst : public Instruction { |
4144 | ResumeInst(const ResumeInst &RI); |
4145 | |
4146 | explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr); |
4147 | ResumeInst(Value *Exn, BasicBlock *InsertAtEnd); |
4148 | |
4149 | protected: |
4150 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4151 | friend class Instruction; |
4152 | |
4153 | ResumeInst *cloneImpl() const; |
4154 | |
4155 | public: |
4156 | static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) { |
4157 | return new(1) ResumeInst(Exn, InsertBefore); |
4158 | } |
4159 | |
4160 | static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) { |
4161 | return new(1) ResumeInst(Exn, InsertAtEnd); |
4162 | } |
4163 | |
4164 | /// Provide fast operand accessors |
4165 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
4166 | |
4167 | /// Convenience accessor. |
4168 | Value *getValue() const { return Op<0>(); } |
4169 | |
4170 | unsigned getNumSuccessors() const { return 0; } |
4171 | |
4172 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4173 | static bool classof(const Instruction *I) { |
4174 | return I->getOpcode() == Instruction::Resume; |
4175 | } |
4176 | static bool classof(const Value *V) { |
4177 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4178 | } |
4179 | |
4180 | private: |
4181 | BasicBlock *getSuccessor(unsigned idx) const { |
4182 | llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4182); |
4183 | } |
4184 | |
4185 | void setSuccessor(unsigned idx, BasicBlock *NewSucc) { |
4186 | llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4186); |
4187 | } |
4188 | }; |
4189 | |
4190 | template <> |
4191 | struct OperandTraits<ResumeInst> : |
4192 | public FixedNumOperandTraits<ResumeInst, 1> { |
4193 | }; |
4194 | |
4195 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)ResumeInst::op_iterator ResumeInst::op_begin() { return OperandTraits <ResumeInst>::op_begin(this); } ResumeInst::const_op_iterator ResumeInst::op_begin() const { return OperandTraits<ResumeInst >::op_begin(const_cast<ResumeInst*>(this)); } ResumeInst ::op_iterator ResumeInst::op_end() { return OperandTraits< ResumeInst>::op_end(this); } ResumeInst::const_op_iterator ResumeInst::op_end() const { return OperandTraits<ResumeInst >::op_end(const_cast<ResumeInst*>(this)); } Value *ResumeInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<ResumeInst>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4195, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<ResumeInst>::op_begin(const_cast<ResumeInst *>(this))[i_nocapture].get()); } void ResumeInst::setOperand (unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture < OperandTraits<ResumeInst>::operands(this) && "setOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4195, __PRETTY_FUNCTION__)); OperandTraits<ResumeInst> ::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned ResumeInst ::getNumOperands() const { return OperandTraits<ResumeInst >::operands(this); } template <int Idx_nocapture> Use &ResumeInst::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & ResumeInst::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
4196 | |
4197 | //===----------------------------------------------------------------------===// |
4198 | // CatchSwitchInst Class |
4199 | //===----------------------------------------------------------------------===// |
4200 | class CatchSwitchInst : public Instruction { |
4201 | /// The number of operands actually allocated. NumOperands is |
4202 | /// the number actually in use. |
4203 | unsigned ReservedSpace; |
4204 | |
4205 | // Operand[0] = Outer scope |
4206 | // Operand[1] = Unwind block destination |
4207 | // Operand[n] = BasicBlock to go to on match |
4208 | CatchSwitchInst(const CatchSwitchInst &CSI); |
4209 | |
4210 | /// Create a new switch instruction, specifying a |
4211 | /// default destination. The number of additional handlers can be specified |
4212 | /// here to make memory allocation more efficient. |
4213 | /// This constructor can also autoinsert before another instruction. |
4214 | CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, |
4215 | unsigned NumHandlers, const Twine &NameStr, |
4216 | Instruction *InsertBefore); |
4217 | |
4218 | /// Create a new switch instruction, specifying a |
4219 | /// default destination. The number of additional handlers can be specified |
4220 | /// here to make memory allocation more efficient. |
4221 | /// This constructor also autoinserts at the end of the specified BasicBlock. |
4222 | CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, |
4223 | unsigned NumHandlers, const Twine &NameStr, |
4224 | BasicBlock *InsertAtEnd); |
4225 | |
4226 | // allocate space for exactly zero operands |
4227 | void *operator new(size_t s) { return User::operator new(s); } |
4228 | |
4229 | void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved); |
4230 | void growOperands(unsigned Size); |
4231 | |
4232 | protected: |
4233 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4234 | friend class Instruction; |
4235 | |
4236 | CatchSwitchInst *cloneImpl() const; |
4237 | |
4238 | public: |
4239 | static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, |
4240 | unsigned NumHandlers, |
4241 | const Twine &NameStr = "", |
4242 | Instruction *InsertBefore = nullptr) { |
4243 | return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, |
4244 | InsertBefore); |
4245 | } |
4246 | |
4247 | static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, |
4248 | unsigned NumHandlers, const Twine &NameStr, |
4249 | BasicBlock *InsertAtEnd) { |
4250 | return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, |
4251 | InsertAtEnd); |
4252 | } |
4253 | |
4254 | /// Provide fast operand accessors |
4255 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
4256 | |
4257 | // Accessor Methods for CatchSwitch stmt |
4258 | Value *getParentPad() const { return getOperand(0); } |
4259 | void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); } |
4260 | |
4261 | // Accessor Methods for CatchSwitch stmt |
4262 | bool hasUnwindDest() const { return getSubclassDataFromInstruction() & 1; } |
4263 | bool unwindsToCaller() const { return !hasUnwindDest(); } |
4264 | BasicBlock *getUnwindDest() const { |
4265 | if (hasUnwindDest()) |
4266 | return cast<BasicBlock>(getOperand(1)); |
4267 | return nullptr; |
4268 | } |
4269 | void setUnwindDest(BasicBlock *UnwindDest) { |
4270 | assert(UnwindDest)((UnwindDest) ? static_cast<void> (0) : __assert_fail ( "UnwindDest", "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4270, __PRETTY_FUNCTION__)); |
4271 | assert(hasUnwindDest())((hasUnwindDest()) ? static_cast<void> (0) : __assert_fail ("hasUnwindDest()", "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4271, __PRETTY_FUNCTION__)); |
4272 | setOperand(1, UnwindDest); |
4273 | } |
4274 | |
4275 | /// return the number of 'handlers' in this catchswitch |
4276 | /// instruction, except the default handler |
4277 | unsigned getNumHandlers() const { |
4278 | if (hasUnwindDest()) |
4279 | return getNumOperands() - 2; |
4280 | return getNumOperands() - 1; |
4281 | } |
4282 | |
4283 | private: |
4284 | static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); } |
4285 | static const BasicBlock *handler_helper(const Value *V) { |
4286 | return cast<BasicBlock>(V); |
4287 | } |
4288 | |
4289 | public: |
4290 | using DerefFnTy = BasicBlock *(*)(Value *); |
4291 | using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>; |
4292 | using handler_range = iterator_range<handler_iterator>; |
4293 | using ConstDerefFnTy = const BasicBlock *(*)(const Value *); |
4294 | using const_handler_iterator = |
4295 | mapped_iterator<const_op_iterator, ConstDerefFnTy>; |
4296 | using const_handler_range = iterator_range<const_handler_iterator>; |
4297 | |
4298 | /// Returns an iterator that points to the first handler in CatchSwitchInst. |
4299 | handler_iterator handler_begin() { |
4300 | op_iterator It = op_begin() + 1; |
4301 | if (hasUnwindDest()) |
4302 | ++It; |
4303 | return handler_iterator(It, DerefFnTy(handler_helper)); |
4304 | } |
4305 | |
4306 | /// Returns an iterator that points to the first handler in the |
4307 | /// CatchSwitchInst. |
4308 | const_handler_iterator handler_begin() const { |
4309 | const_op_iterator It = op_begin() + 1; |
4310 | if (hasUnwindDest()) |
4311 | ++It; |
4312 | return const_handler_iterator(It, ConstDerefFnTy(handler_helper)); |
4313 | } |
4314 | |
4315 | /// Returns a read-only iterator that points one past the last |
4316 | /// handler in the CatchSwitchInst. |
4317 | handler_iterator handler_end() { |
4318 | return handler_iterator(op_end(), DerefFnTy(handler_helper)); |
4319 | } |
4320 | |
4321 | /// Returns an iterator that points one past the last handler in the |
4322 | /// CatchSwitchInst. |
4323 | const_handler_iterator handler_end() const { |
4324 | return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper)); |
4325 | } |
4326 | |
4327 | /// iteration adapter for range-for loops. |
4328 | handler_range handlers() { |
4329 | return make_range(handler_begin(), handler_end()); |
4330 | } |
4331 | |
4332 | /// iteration adapter for range-for loops. |
4333 | const_handler_range handlers() const { |
4334 | return make_range(handler_begin(), handler_end()); |
4335 | } |
4336 | |
4337 | /// Add an entry to the switch instruction... |
4338 | /// Note: |
4339 | /// This action invalidates handler_end(). Old handler_end() iterator will |
4340 | /// point to the added handler. |
4341 | void addHandler(BasicBlock *Dest); |
4342 | |
4343 | void removeHandler(handler_iterator HI); |
4344 | |
4345 | unsigned getNumSuccessors() const { return getNumOperands() - 1; } |
4346 | BasicBlock *getSuccessor(unsigned Idx) const { |
4347 | assert(Idx < getNumSuccessors() &&((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!" ) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4348, __PRETTY_FUNCTION__)) |
4348 | "Successor # out of range for catchswitch!")((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!" ) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4348, __PRETTY_FUNCTION__)); |
4349 | return cast<BasicBlock>(getOperand(Idx + 1)); |
4350 | } |
4351 | void setSuccessor(unsigned Idx, BasicBlock *NewSucc) { |
4352 | assert(Idx < getNumSuccessors() &&((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!" ) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4353, __PRETTY_FUNCTION__)) |
4353 | "Successor # out of range for catchswitch!")((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!" ) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4353, __PRETTY_FUNCTION__)); |
4354 | setOperand(Idx + 1, NewSucc); |
4355 | } |
4356 | |
4357 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4358 | static bool classof(const Instruction *I) { |
4359 | return I->getOpcode() == Instruction::CatchSwitch; |
4360 | } |
4361 | static bool classof(const Value *V) { |
4362 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4363 | } |
4364 | }; |
4365 | |
4366 | template <> |
4367 | struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {}; |
4368 | |
4369 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)CatchSwitchInst::op_iterator CatchSwitchInst::op_begin() { return OperandTraits<CatchSwitchInst>::op_begin(this); } CatchSwitchInst ::const_op_iterator CatchSwitchInst::op_begin() const { return OperandTraits<CatchSwitchInst>::op_begin(const_cast< CatchSwitchInst*>(this)); } CatchSwitchInst::op_iterator CatchSwitchInst ::op_end() { return OperandTraits<CatchSwitchInst>::op_end (this); } CatchSwitchInst::const_op_iterator CatchSwitchInst:: op_end() const { return OperandTraits<CatchSwitchInst>:: op_end(const_cast<CatchSwitchInst*>(this)); } Value *CatchSwitchInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4369, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<CatchSwitchInst>::op_begin(const_cast< CatchSwitchInst*>(this))[i_nocapture].get()); } void CatchSwitchInst ::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (( i_nocapture < OperandTraits<CatchSwitchInst>::operands (this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4369, __PRETTY_FUNCTION__)); OperandTraits<CatchSwitchInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned CatchSwitchInst::getNumOperands() const { return OperandTraits <CatchSwitchInst>::operands(this); } template <int Idx_nocapture > Use &CatchSwitchInst::Op() { return this->OpFrom< Idx_nocapture>(this); } template <int Idx_nocapture> const Use &CatchSwitchInst::Op() const { return this-> OpFrom<Idx_nocapture>(this); } |
4370 | |
4371 | //===----------------------------------------------------------------------===// |
4372 | // CleanupPadInst Class |
4373 | //===----------------------------------------------------------------------===// |
4374 | class CleanupPadInst : public FuncletPadInst { |
4375 | private: |
4376 | explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, |
4377 | unsigned Values, const Twine &NameStr, |
4378 | Instruction *InsertBefore) |
4379 | : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, |
4380 | NameStr, InsertBefore) {} |
4381 | explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, |
4382 | unsigned Values, const Twine &NameStr, |
4383 | BasicBlock *InsertAtEnd) |
4384 | : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, |
4385 | NameStr, InsertAtEnd) {} |
4386 | |
4387 | public: |
4388 | static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = None, |
4389 | const Twine &NameStr = "", |
4390 | Instruction *InsertBefore = nullptr) { |
4391 | unsigned Values = 1 + Args.size(); |
4392 | return new (Values) |
4393 | CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore); |
4394 | } |
4395 | |
4396 | static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args, |
4397 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
4398 | unsigned Values = 1 + Args.size(); |
4399 | return new (Values) |
4400 | CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd); |
4401 | } |
4402 | |
4403 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4404 | static bool classof(const Instruction *I) { |
4405 | return I->getOpcode() == Instruction::CleanupPad; |
4406 | } |
4407 | static bool classof(const Value *V) { |
4408 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4409 | } |
4410 | }; |
4411 | |
4412 | //===----------------------------------------------------------------------===// |
4413 | // CatchPadInst Class |
4414 | //===----------------------------------------------------------------------===// |
4415 | class CatchPadInst : public FuncletPadInst { |
4416 | private: |
4417 | explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, |
4418 | unsigned Values, const Twine &NameStr, |
4419 | Instruction *InsertBefore) |
4420 | : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, |
4421 | NameStr, InsertBefore) {} |
4422 | explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, |
4423 | unsigned Values, const Twine &NameStr, |
4424 | BasicBlock *InsertAtEnd) |
4425 | : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, |
4426 | NameStr, InsertAtEnd) {} |
4427 | |
4428 | public: |
4429 | static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, |
4430 | const Twine &NameStr = "", |
4431 | Instruction *InsertBefore = nullptr) { |
4432 | unsigned Values = 1 + Args.size(); |
4433 | return new (Values) |
4434 | CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore); |
4435 | } |
4436 | |
4437 | static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, |
4438 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
4439 | unsigned Values = 1 + Args.size(); |
4440 | return new (Values) |
4441 | CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd); |
4442 | } |
4443 | |
4444 | /// Convenience accessors |
4445 | CatchSwitchInst *getCatchSwitch() const { |
4446 | return cast<CatchSwitchInst>(Op<-1>()); |
4447 | } |
4448 | void setCatchSwitch(Value *CatchSwitch) { |
4449 | assert(CatchSwitch)((CatchSwitch) ? static_cast<void> (0) : __assert_fail ( "CatchSwitch", "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4449, __PRETTY_FUNCTION__)); |
4450 | Op<-1>() = CatchSwitch; |
4451 | } |
4452 | |
4453 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4454 | static bool classof(const Instruction *I) { |
4455 | return I->getOpcode() == Instruction::CatchPad; |
4456 | } |
4457 | static bool classof(const Value *V) { |
4458 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4459 | } |
4460 | }; |
4461 | |
4462 | //===----------------------------------------------------------------------===// |
4463 | // CatchReturnInst Class |
4464 | //===----------------------------------------------------------------------===// |
4465 | |
4466 | class CatchReturnInst : public Instruction { |
4467 | CatchReturnInst(const CatchReturnInst &RI); |
4468 | CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore); |
4469 | CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd); |
4470 | |
4471 | void init(Value *CatchPad, BasicBlock *BB); |
4472 | |
4473 | protected: |
4474 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4475 | friend class Instruction; |
4476 | |
4477 | CatchReturnInst *cloneImpl() const; |
4478 | |
4479 | public: |
4480 | static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, |
4481 | Instruction *InsertBefore = nullptr) { |
4482 | assert(CatchPad)((CatchPad) ? static_cast<void> (0) : __assert_fail ("CatchPad" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4482, __PRETTY_FUNCTION__)); |
4483 | assert(BB)((BB) ? static_cast<void> (0) : __assert_fail ("BB", "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4483, __PRETTY_FUNCTION__)); |
4484 | return new (2) CatchReturnInst(CatchPad, BB, InsertBefore); |
4485 | } |
4486 | |
4487 | static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, |
4488 | BasicBlock *InsertAtEnd) { |
4489 | assert(CatchPad)((CatchPad) ? static_cast<void> (0) : __assert_fail ("CatchPad" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4489, __PRETTY_FUNCTION__)); |
4490 | assert(BB)((BB) ? static_cast<void> (0) : __assert_fail ("BB", "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4490, __PRETTY_FUNCTION__)); |
4491 | return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd); |
4492 | } |
4493 | |
4494 | /// Provide fast operand accessors |
4495 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
4496 | |
4497 | /// Convenience accessors. |
4498 | CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); } |
4499 | void setCatchPad(CatchPadInst *CatchPad) { |
4500 | assert(CatchPad)((CatchPad) ? static_cast<void> (0) : __assert_fail ("CatchPad" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4500, __PRETTY_FUNCTION__)); |
4501 | Op<0>() = CatchPad; |
4502 | } |
4503 | |
4504 | BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); } |
4505 | void setSuccessor(BasicBlock *NewSucc) { |
4506 | assert(NewSucc)((NewSucc) ? static_cast<void> (0) : __assert_fail ("NewSucc" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4506, __PRETTY_FUNCTION__)); |
4507 | Op<1>() = NewSucc; |
4508 | } |
4509 | unsigned getNumSuccessors() const { return 1; } |
4510 | |
4511 | /// Get the parentPad of this catchret's catchpad's catchswitch. |
4512 | /// The successor block is implicitly a member of this funclet. |
4513 | Value *getCatchSwitchParentPad() const { |
4514 | return getCatchPad()->getCatchSwitch()->getParentPad(); |
4515 | } |
4516 | |
4517 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4518 | static bool classof(const Instruction *I) { |
4519 | return (I->getOpcode() == Instruction::CatchRet); |
4520 | } |
4521 | static bool classof(const Value *V) { |
4522 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4523 | } |
4524 | |
4525 | private: |
4526 | BasicBlock *getSuccessor(unsigned Idx) const { |
4527 | assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")((Idx < getNumSuccessors() && "Successor # out of range for catchret!" ) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4527, __PRETTY_FUNCTION__)); |
4528 | return getSuccessor(); |
4529 | } |
4530 | |
4531 | void setSuccessor(unsigned Idx, BasicBlock *B) { |
4532 | assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")((Idx < getNumSuccessors() && "Successor # out of range for catchret!" ) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4532, __PRETTY_FUNCTION__)); |
4533 | setSuccessor(B); |
4534 | } |
4535 | }; |
4536 | |
4537 | template <> |
4538 | struct OperandTraits<CatchReturnInst> |
4539 | : public FixedNumOperandTraits<CatchReturnInst, 2> {}; |
4540 | |
4541 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)CatchReturnInst::op_iterator CatchReturnInst::op_begin() { return OperandTraits<CatchReturnInst>::op_begin(this); } CatchReturnInst ::const_op_iterator CatchReturnInst::op_begin() const { return OperandTraits<CatchReturnInst>::op_begin(const_cast< CatchReturnInst*>(this)); } CatchReturnInst::op_iterator CatchReturnInst ::op_end() { return OperandTraits<CatchReturnInst>::op_end (this); } CatchReturnInst::const_op_iterator CatchReturnInst:: op_end() const { return OperandTraits<CatchReturnInst>:: op_end(const_cast<CatchReturnInst*>(this)); } Value *CatchReturnInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4541, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<CatchReturnInst>::op_begin(const_cast< CatchReturnInst*>(this))[i_nocapture].get()); } void CatchReturnInst ::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (( i_nocapture < OperandTraits<CatchReturnInst>::operands (this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4541, __PRETTY_FUNCTION__)); OperandTraits<CatchReturnInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned CatchReturnInst::getNumOperands() const { return OperandTraits <CatchReturnInst>::operands(this); } template <int Idx_nocapture > Use &CatchReturnInst::Op() { return this->OpFrom< Idx_nocapture>(this); } template <int Idx_nocapture> const Use &CatchReturnInst::Op() const { return this-> OpFrom<Idx_nocapture>(this); } |
4542 | |
4543 | //===----------------------------------------------------------------------===// |
4544 | // CleanupReturnInst Class |
4545 | //===----------------------------------------------------------------------===// |
4546 | |
4547 | class CleanupReturnInst : public Instruction { |
4548 | private: |
4549 | CleanupReturnInst(const CleanupReturnInst &RI); |
4550 | CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, |
4551 | Instruction *InsertBefore = nullptr); |
4552 | CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, |
4553 | BasicBlock *InsertAtEnd); |
4554 | |
4555 | void init(Value *CleanupPad, BasicBlock *UnwindBB); |
4556 | |
4557 | protected: |
4558 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4559 | friend class Instruction; |
4560 | |
4561 | CleanupReturnInst *cloneImpl() const; |
4562 | |
4563 | public: |
4564 | static CleanupReturnInst *Create(Value *CleanupPad, |
4565 | BasicBlock *UnwindBB = nullptr, |
4566 | Instruction *InsertBefore = nullptr) { |
4567 | assert(CleanupPad)((CleanupPad) ? static_cast<void> (0) : __assert_fail ( "CleanupPad", "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4567, __PRETTY_FUNCTION__)); |
4568 | unsigned Values = 1; |
4569 | if (UnwindBB) |
4570 | ++Values; |
4571 | return new (Values) |
4572 | CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore); |
4573 | } |
4574 | |
4575 | static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB, |
4576 | BasicBlock *InsertAtEnd) { |
4577 | assert(CleanupPad)((CleanupPad) ? static_cast<void> (0) : __assert_fail ( "CleanupPad", "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4577, __PRETTY_FUNCTION__)); |
4578 | unsigned Values = 1; |
4579 | if (UnwindBB) |
4580 | ++Values; |
4581 | return new (Values) |
4582 | CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd); |
4583 | } |
4584 | |
4585 | /// Provide fast operand accessors |
4586 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
4587 | |
4588 | bool hasUnwindDest() const { return getSubclassDataFromInstruction() & 1; } |
4589 | bool unwindsToCaller() const { return !hasUnwindDest(); } |
4590 | |
4591 | /// Convenience accessor. |
4592 | CleanupPadInst *getCleanupPad() const { |
4593 | return cast<CleanupPadInst>(Op<0>()); |
4594 | } |
4595 | void setCleanupPad(CleanupPadInst *CleanupPad) { |
4596 | assert(CleanupPad)((CleanupPad) ? static_cast<void> (0) : __assert_fail ( "CleanupPad", "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4596, __PRETTY_FUNCTION__)); |
4597 | Op<0>() = CleanupPad; |
4598 | } |
4599 | |
4600 | unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; } |
4601 | |
4602 | BasicBlock *getUnwindDest() const { |
4603 | return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr; |
4604 | } |
4605 | void setUnwindDest(BasicBlock *NewDest) { |
4606 | assert(NewDest)((NewDest) ? static_cast<void> (0) : __assert_fail ("NewDest" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4606, __PRETTY_FUNCTION__)); |
4607 | assert(hasUnwindDest())((hasUnwindDest()) ? static_cast<void> (0) : __assert_fail ("hasUnwindDest()", "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4607, __PRETTY_FUNCTION__)); |
4608 | Op<1>() = NewDest; |
4609 | } |
4610 | |
4611 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4612 | static bool classof(const Instruction *I) { |
4613 | return (I->getOpcode() == Instruction::CleanupRet); |
4614 | } |
4615 | static bool classof(const Value *V) { |
4616 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4617 | } |
4618 | |
4619 | private: |
4620 | BasicBlock *getSuccessor(unsigned Idx) const { |
4621 | assert(Idx == 0)((Idx == 0) ? static_cast<void> (0) : __assert_fail ("Idx == 0" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4621, __PRETTY_FUNCTION__)); |
4622 | return getUnwindDest(); |
4623 | } |
4624 | |
4625 | void setSuccessor(unsigned Idx, BasicBlock *B) { |
4626 | assert(Idx == 0)((Idx == 0) ? static_cast<void> (0) : __assert_fail ("Idx == 0" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4626, __PRETTY_FUNCTION__)); |
4627 | setUnwindDest(B); |
4628 | } |
4629 | |
4630 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
4631 | // method so that subclasses cannot accidentally use it. |
4632 | void setInstructionSubclassData(unsigned short D) { |
4633 | Instruction::setInstructionSubclassData(D); |
4634 | } |
4635 | }; |
4636 | |
4637 | template <> |
4638 | struct OperandTraits<CleanupReturnInst> |
4639 | : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {}; |
4640 | |
4641 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)CleanupReturnInst::op_iterator CleanupReturnInst::op_begin() { return OperandTraits<CleanupReturnInst>::op_begin(this ); } CleanupReturnInst::const_op_iterator CleanupReturnInst:: op_begin() const { return OperandTraits<CleanupReturnInst> ::op_begin(const_cast<CleanupReturnInst*>(this)); } CleanupReturnInst ::op_iterator CleanupReturnInst::op_end() { return OperandTraits <CleanupReturnInst>::op_end(this); } CleanupReturnInst:: const_op_iterator CleanupReturnInst::op_end() const { return OperandTraits <CleanupReturnInst>::op_end(const_cast<CleanupReturnInst *>(this)); } Value *CleanupReturnInst::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<CleanupReturnInst >::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4641, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<CleanupReturnInst>::op_begin(const_cast <CleanupReturnInst*>(this))[i_nocapture].get()); } void CleanupReturnInst::setOperand(unsigned i_nocapture, Value *Val_nocapture ) { ((i_nocapture < OperandTraits<CleanupReturnInst> ::operands(this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4641, __PRETTY_FUNCTION__)); OperandTraits<CleanupReturnInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned CleanupReturnInst::getNumOperands() const { return OperandTraits <CleanupReturnInst>::operands(this); } template <int Idx_nocapture> Use &CleanupReturnInst::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &CleanupReturnInst::Op() const { return this ->OpFrom<Idx_nocapture>(this); } |
4642 | |
4643 | //===----------------------------------------------------------------------===// |
4644 | // UnreachableInst Class |
4645 | //===----------------------------------------------------------------------===// |
4646 | |
4647 | //===--------------------------------------------------------------------------- |
4648 | /// This function has undefined behavior. In particular, the |
4649 | /// presence of this instruction indicates some higher level knowledge that the |
4650 | /// end of the block cannot be reached. |
4651 | /// |
4652 | class UnreachableInst : public Instruction { |
4653 | protected: |
4654 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4655 | friend class Instruction; |
4656 | |
4657 | UnreachableInst *cloneImpl() const; |
4658 | |
4659 | public: |
4660 | explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr); |
4661 | explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd); |
4662 | |
4663 | // allocate space for exactly zero operands |
4664 | void *operator new(size_t s) { |
4665 | return User::operator new(s, 0); |
4666 | } |
4667 | |
4668 | unsigned getNumSuccessors() const { return 0; } |
4669 | |
4670 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4671 | static bool classof(const Instruction *I) { |
4672 | return I->getOpcode() == Instruction::Unreachable; |
4673 | } |
4674 | static bool classof(const Value *V) { |
4675 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4676 | } |
4677 | |
4678 | private: |
4679 | BasicBlock *getSuccessor(unsigned idx) const { |
4680 | llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4680); |
4681 | } |
4682 | |
4683 | void setSuccessor(unsigned idx, BasicBlock *B) { |
4684 | llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 4684); |
4685 | } |
4686 | }; |
4687 | |
4688 | //===----------------------------------------------------------------------===// |
4689 | // TruncInst Class |
4690 | //===----------------------------------------------------------------------===// |
4691 | |
4692 | /// This class represents a truncation of integer types. |
4693 | class TruncInst : public CastInst { |
4694 | protected: |
4695 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4696 | friend class Instruction; |
4697 | |
4698 | /// Clone an identical TruncInst |
4699 | TruncInst *cloneImpl() const; |
4700 | |
4701 | public: |
4702 | /// Constructor with insert-before-instruction semantics |
4703 | TruncInst( |
4704 | Value *S, ///< The value to be truncated |
4705 | Type *Ty, ///< The (smaller) type to truncate to |
4706 | const Twine &NameStr = "", ///< A name for the new instruction |
4707 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4708 | ); |
4709 | |
4710 | /// Constructor with insert-at-end-of-block semantics |
4711 | TruncInst( |
4712 | Value *S, ///< The value to be truncated |
4713 | Type *Ty, ///< The (smaller) type to truncate to |
4714 | const Twine &NameStr, ///< A name for the new instruction |
4715 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4716 | ); |
4717 | |
4718 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4719 | static bool classof(const Instruction *I) { |
4720 | return I->getOpcode() == Trunc; |
4721 | } |
4722 | static bool classof(const Value *V) { |
4723 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4724 | } |
4725 | }; |
4726 | |
4727 | //===----------------------------------------------------------------------===// |
4728 | // ZExtInst Class |
4729 | //===----------------------------------------------------------------------===// |
4730 | |
4731 | /// This class represents zero extension of integer types. |
4732 | class ZExtInst : public CastInst { |
4733 | protected: |
4734 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4735 | friend class Instruction; |
4736 | |
4737 | /// Clone an identical ZExtInst |
4738 | ZExtInst *cloneImpl() const; |
4739 | |
4740 | public: |
4741 | /// Constructor with insert-before-instruction semantics |
4742 | ZExtInst( |
4743 | Value *S, ///< The value to be zero extended |
4744 | Type *Ty, ///< The type to zero extend to |
4745 | const Twine &NameStr = "", ///< A name for the new instruction |
4746 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4747 | ); |
4748 | |
4749 | /// Constructor with insert-at-end semantics. |
4750 | ZExtInst( |
4751 | Value *S, ///< The value to be zero extended |
4752 | Type *Ty, ///< The type to zero extend to |
4753 | const Twine &NameStr, ///< A name for the new instruction |
4754 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4755 | ); |
4756 | |
4757 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4758 | static bool classof(const Instruction *I) { |
4759 | return I->getOpcode() == ZExt; |
4760 | } |
4761 | static bool classof(const Value *V) { |
4762 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4763 | } |
4764 | }; |
4765 | |
4766 | //===----------------------------------------------------------------------===// |
4767 | // SExtInst Class |
4768 | //===----------------------------------------------------------------------===// |
4769 | |
4770 | /// This class represents a sign extension of integer types. |
4771 | class SExtInst : public CastInst { |
4772 | protected: |
4773 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4774 | friend class Instruction; |
4775 | |
4776 | /// Clone an identical SExtInst |
4777 | SExtInst *cloneImpl() const; |
4778 | |
4779 | public: |
4780 | /// Constructor with insert-before-instruction semantics |
4781 | SExtInst( |
4782 | Value *S, ///< The value to be sign extended |
4783 | Type *Ty, ///< The type to sign extend to |
4784 | const Twine &NameStr = "", ///< A name for the new instruction |
4785 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4786 | ); |
4787 | |
4788 | /// Constructor with insert-at-end-of-block semantics |
4789 | SExtInst( |
4790 | Value *S, ///< The value to be sign extended |
4791 | Type *Ty, ///< The type to sign extend to |
4792 | const Twine &NameStr, ///< A name for the new instruction |
4793 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4794 | ); |
4795 | |
4796 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4797 | static bool classof(const Instruction *I) { |
4798 | return I->getOpcode() == SExt; |
4799 | } |
4800 | static bool classof(const Value *V) { |
4801 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4802 | } |
4803 | }; |
4804 | |
4805 | //===----------------------------------------------------------------------===// |
4806 | // FPTruncInst Class |
4807 | //===----------------------------------------------------------------------===// |
4808 | |
4809 | /// This class represents a truncation of floating point types. |
4810 | class FPTruncInst : public CastInst { |
4811 | protected: |
4812 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4813 | friend class Instruction; |
4814 | |
4815 | /// Clone an identical FPTruncInst |
4816 | FPTruncInst *cloneImpl() const; |
4817 | |
4818 | public: |
4819 | /// Constructor with insert-before-instruction semantics |
4820 | FPTruncInst( |
4821 | Value *S, ///< The value to be truncated |
4822 | Type *Ty, ///< The type to truncate to |
4823 | const Twine &NameStr = "", ///< A name for the new instruction |
4824 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4825 | ); |
4826 | |
4827 | /// Constructor with insert-before-instruction semantics |
4828 | FPTruncInst( |
4829 | Value *S, ///< The value to be truncated |
4830 | Type *Ty, ///< The type to truncate to |
4831 | const Twine &NameStr, ///< A name for the new instruction |
4832 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4833 | ); |
4834 | |
4835 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4836 | static bool classof(const Instruction *I) { |
4837 | return I->getOpcode() == FPTrunc; |
4838 | } |
4839 | static bool classof(const Value *V) { |
4840 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4841 | } |
4842 | }; |
4843 | |
4844 | //===----------------------------------------------------------------------===// |
4845 | // FPExtInst Class |
4846 | //===----------------------------------------------------------------------===// |
4847 | |
4848 | /// This class represents an extension of floating point types. |
4849 | class FPExtInst : public CastInst { |
4850 | protected: |
4851 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4852 | friend class Instruction; |
4853 | |
4854 | /// Clone an identical FPExtInst |
4855 | FPExtInst *cloneImpl() const; |
4856 | |
4857 | public: |
4858 | /// Constructor with insert-before-instruction semantics |
4859 | FPExtInst( |
4860 | Value *S, ///< The value to be extended |
4861 | Type *Ty, ///< The type to extend to |
4862 | const Twine &NameStr = "", ///< A name for the new instruction |
4863 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4864 | ); |
4865 | |
4866 | /// Constructor with insert-at-end-of-block semantics |
4867 | FPExtInst( |
4868 | Value *S, ///< The value to be extended |
4869 | Type *Ty, ///< The type to extend to |
4870 | const Twine &NameStr, ///< A name for the new instruction |
4871 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4872 | ); |
4873 | |
4874 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4875 | static bool classof(const Instruction *I) { |
4876 | return I->getOpcode() == FPExt; |
4877 | } |
4878 | static bool classof(const Value *V) { |
4879 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4880 | } |
4881 | }; |
4882 | |
4883 | //===----------------------------------------------------------------------===// |
4884 | // UIToFPInst Class |
4885 | //===----------------------------------------------------------------------===// |
4886 | |
4887 | /// This class represents a cast unsigned integer to floating point. |
4888 | class UIToFPInst : public CastInst { |
4889 | protected: |
4890 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4891 | friend class Instruction; |
4892 | |
4893 | /// Clone an identical UIToFPInst |
4894 | UIToFPInst *cloneImpl() const; |
4895 | |
4896 | public: |
4897 | /// Constructor with insert-before-instruction semantics |
4898 | UIToFPInst( |
4899 | Value *S, ///< The value to be converted |
4900 | Type *Ty, ///< The type to convert to |
4901 | const Twine &NameStr = "", ///< A name for the new instruction |
4902 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4903 | ); |
4904 | |
4905 | /// Constructor with insert-at-end-of-block semantics |
4906 | UIToFPInst( |
4907 | Value *S, ///< The value to be converted |
4908 | Type *Ty, ///< The type to convert to |
4909 | const Twine &NameStr, ///< A name for the new instruction |
4910 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4911 | ); |
4912 | |
4913 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4914 | static bool classof(const Instruction *I) { |
4915 | return I->getOpcode() == UIToFP; |
4916 | } |
4917 | static bool classof(const Value *V) { |
4918 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4919 | } |
4920 | }; |
4921 | |
4922 | //===----------------------------------------------------------------------===// |
4923 | // SIToFPInst Class |
4924 | //===----------------------------------------------------------------------===// |
4925 | |
4926 | /// This class represents a cast from signed integer to floating point. |
4927 | class SIToFPInst : public CastInst { |
4928 | protected: |
4929 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4930 | friend class Instruction; |
4931 | |
4932 | /// Clone an identical SIToFPInst |
4933 | SIToFPInst *cloneImpl() const; |
4934 | |
4935 | public: |
4936 | /// Constructor with insert-before-instruction semantics |
4937 | SIToFPInst( |
4938 | Value *S, ///< The value to be converted |
4939 | Type *Ty, ///< The type to convert to |
4940 | const Twine &NameStr = "", ///< A name for the new instruction |
4941 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4942 | ); |
4943 | |
4944 | /// Constructor with insert-at-end-of-block semantics |
4945 | SIToFPInst( |
4946 | Value *S, ///< The value to be converted |
4947 | Type *Ty, ///< The type to convert to |
4948 | const Twine &NameStr, ///< A name for the new instruction |
4949 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4950 | ); |
4951 | |
4952 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4953 | static bool classof(const Instruction *I) { |
4954 | return I->getOpcode() == SIToFP; |
4955 | } |
4956 | static bool classof(const Value *V) { |
4957 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4958 | } |
4959 | }; |
4960 | |
4961 | //===----------------------------------------------------------------------===// |
4962 | // FPToUIInst Class |
4963 | //===----------------------------------------------------------------------===// |
4964 | |
4965 | /// This class represents a cast from floating point to unsigned integer |
4966 | class FPToUIInst : public CastInst { |
4967 | protected: |
4968 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4969 | friend class Instruction; |
4970 | |
4971 | /// Clone an identical FPToUIInst |
4972 | FPToUIInst *cloneImpl() const; |
4973 | |
4974 | public: |
4975 | /// Constructor with insert-before-instruction semantics |
4976 | FPToUIInst( |
4977 | Value *S, ///< The value to be converted |
4978 | Type *Ty, ///< The type to convert to |
4979 | const Twine &NameStr = "", ///< A name for the new instruction |
4980 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4981 | ); |
4982 | |
4983 | /// Constructor with insert-at-end-of-block semantics |
4984 | FPToUIInst( |
4985 | Value *S, ///< The value to be converted |
4986 | Type *Ty, ///< The type to convert to |
4987 | const Twine &NameStr, ///< A name for the new instruction |
4988 | BasicBlock *InsertAtEnd ///< Where to insert the new instruction |
4989 | ); |
4990 | |
4991 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4992 | static bool classof(const Instruction *I) { |
4993 | return I->getOpcode() == FPToUI; |
4994 | } |
4995 | static bool classof(const Value *V) { |
4996 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4997 | } |
4998 | }; |
4999 | |
5000 | //===----------------------------------------------------------------------===// |
5001 | // FPToSIInst Class |
5002 | //===----------------------------------------------------------------------===// |
5003 | |
5004 | /// This class represents a cast from floating point to signed integer. |
5005 | class FPToSIInst : public CastInst { |
5006 | protected: |
5007 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5008 | friend class Instruction; |
5009 | |
5010 | /// Clone an identical FPToSIInst |
5011 | FPToSIInst *cloneImpl() const; |
5012 | |
5013 | public: |
5014 | /// Constructor with insert-before-instruction semantics |
5015 | FPToSIInst( |
5016 | Value *S, ///< The value to be converted |
5017 | Type *Ty, ///< The type to convert to |
5018 | const Twine &NameStr = "", ///< A name for the new instruction |
5019 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
5020 | ); |
5021 | |
5022 | /// Constructor with insert-at-end-of-block semantics |
5023 | FPToSIInst( |
5024 | Value *S, ///< The value to be converted |
5025 | Type *Ty, ///< The type to convert to |
5026 | const Twine &NameStr, ///< A name for the new instruction |
5027 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
5028 | ); |
5029 | |
5030 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
5031 | static bool classof(const Instruction *I) { |
5032 | return I->getOpcode() == FPToSI; |
5033 | } |
5034 | static bool classof(const Value *V) { |
5035 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5036 | } |
5037 | }; |
5038 | |
5039 | //===----------------------------------------------------------------------===// |
5040 | // IntToPtrInst Class |
5041 | //===----------------------------------------------------------------------===// |
5042 | |
5043 | /// This class represents a cast from an integer to a pointer. |
5044 | class IntToPtrInst : public CastInst { |
5045 | public: |
5046 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5047 | friend class Instruction; |
5048 | |
5049 | /// Constructor with insert-before-instruction semantics |
5050 | IntToPtrInst( |
5051 | Value *S, ///< The value to be converted |
5052 | Type *Ty, ///< The type to convert to |
5053 | const Twine &NameStr = "", ///< A name for the new instruction |
5054 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
5055 | ); |
5056 | |
5057 | /// Constructor with insert-at-end-of-block semantics |
5058 | IntToPtrInst( |
5059 | Value *S, ///< The value to be converted |
5060 | Type *Ty, ///< The type to convert to |
5061 | const Twine &NameStr, ///< A name for the new instruction |
5062 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
5063 | ); |
5064 | |
5065 | /// Clone an identical IntToPtrInst. |
5066 | IntToPtrInst *cloneImpl() const; |
5067 | |
5068 | /// Returns the address space of this instruction's pointer type. |
5069 | unsigned getAddressSpace() const { |
5070 | return getType()->getPointerAddressSpace(); |
5071 | } |
5072 | |
5073 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
5074 | static bool classof(const Instruction *I) { |
5075 | return I->getOpcode() == IntToPtr; |
5076 | } |
5077 | static bool classof(const Value *V) { |
5078 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5079 | } |
5080 | }; |
5081 | |
5082 | //===----------------------------------------------------------------------===// |
5083 | // PtrToIntInst Class |
5084 | //===----------------------------------------------------------------------===// |
5085 | |
5086 | /// This class represents a cast from a pointer to an integer. |
5087 | class PtrToIntInst : public CastInst { |
5088 | protected: |
5089 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5090 | friend class Instruction; |
5091 | |
5092 | /// Clone an identical PtrToIntInst. |
5093 | PtrToIntInst *cloneImpl() const; |
5094 | |
5095 | public: |
5096 | /// Constructor with insert-before-instruction semantics |
5097 | PtrToIntInst( |
5098 | Value *S, ///< The value to be converted |
5099 | Type *Ty, ///< The type to convert to |
5100 | const Twine &NameStr = "", ///< A name for the new instruction |
5101 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
5102 | ); |
5103 | |
5104 | /// Constructor with insert-at-end-of-block semantics |
5105 | PtrToIntInst( |
5106 | Value *S, ///< The value to be converted |
5107 | Type *Ty, ///< The type to convert to |
5108 | const Twine &NameStr, ///< A name for the new instruction |
5109 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
5110 | ); |
5111 | |
5112 | /// Gets the pointer operand. |
5113 | Value *getPointerOperand() { return getOperand(0); } |
5114 | /// Gets the pointer operand. |
5115 | const Value *getPointerOperand() const { return getOperand(0); } |
5116 | /// Gets the operand index of the pointer operand. |
5117 | static unsigned getPointerOperandIndex() { return 0U; } |
5118 | |
5119 | /// Returns the address space of the pointer operand. |
5120 | unsigned getPointerAddressSpace() const { |
5121 | return getPointerOperand()->getType()->getPointerAddressSpace(); |
5122 | } |
5123 | |
5124 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
5125 | static bool classof(const Instruction *I) { |
5126 | return I->getOpcode() == PtrToInt; |
5127 | } |
5128 | static bool classof(const Value *V) { |
5129 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5130 | } |
5131 | }; |
5132 | |
5133 | //===----------------------------------------------------------------------===// |
5134 | // BitCastInst Class |
5135 | //===----------------------------------------------------------------------===// |
5136 | |
5137 | /// This class represents a no-op cast from one type to another. |
5138 | class BitCastInst : public CastInst { |
5139 | protected: |
5140 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5141 | friend class Instruction; |
5142 | |
5143 | /// Clone an identical BitCastInst. |
5144 | BitCastInst *cloneImpl() const; |
5145 | |
5146 | public: |
5147 | /// Constructor with insert-before-instruction semantics |
5148 | BitCastInst( |
5149 | Value *S, ///< The value to be casted |
5150 | Type *Ty, ///< The type to casted to |
5151 | const Twine &NameStr = "", ///< A name for the new instruction |
5152 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
5153 | ); |
5154 | |
5155 | /// Constructor with insert-at-end-of-block semantics |
5156 | BitCastInst( |
5157 | Value *S, ///< The value to be casted |
5158 | Type *Ty, ///< The type to casted to |
5159 | const Twine &NameStr, ///< A name for the new instruction |
5160 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
5161 | ); |
5162 | |
5163 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
5164 | static bool classof(const Instruction *I) { |
5165 | return I->getOpcode() == BitCast; |
5166 | } |
5167 | static bool classof(const Value *V) { |
5168 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5169 | } |
5170 | }; |
5171 | |
5172 | //===----------------------------------------------------------------------===// |
5173 | // AddrSpaceCastInst Class |
5174 | //===----------------------------------------------------------------------===// |
5175 | |
5176 | /// This class represents a conversion between pointers from one address space |
5177 | /// to another. |
5178 | class AddrSpaceCastInst : public CastInst { |
5179 | protected: |
5180 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5181 | friend class Instruction; |
5182 | |
5183 | /// Clone an identical AddrSpaceCastInst. |
5184 | AddrSpaceCastInst *cloneImpl() const; |
5185 | |
5186 | public: |
5187 | /// Constructor with insert-before-instruction semantics |
5188 | AddrSpaceCastInst( |
5189 | Value *S, ///< The value to be casted |
5190 | Type *Ty, ///< The type to casted to |
5191 | const Twine &NameStr = "", ///< A name for the new instruction |
5192 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
5193 | ); |
5194 | |
5195 | /// Constructor with insert-at-end-of-block semantics |
5196 | AddrSpaceCastInst( |
5197 | Value *S, ///< The value to be casted |
5198 | Type *Ty, ///< The type to casted to |
5199 | const Twine &NameStr, ///< A name for the new instruction |
5200 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
5201 | ); |
5202 | |
5203 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
5204 | static bool classof(const Instruction *I) { |
5205 | return I->getOpcode() == AddrSpaceCast; |
5206 | } |
5207 | static bool classof(const Value *V) { |
5208 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5209 | } |
5210 | |
5211 | /// Gets the pointer operand. |
5212 | Value *getPointerOperand() { |
5213 | return getOperand(0); |
5214 | } |
5215 | |
5216 | /// Gets the pointer operand. |
5217 | const Value *getPointerOperand() const { |
5218 | return getOperand(0); |
5219 | } |
5220 | |
5221 | /// Gets the operand index of the pointer operand. |
5222 | static unsigned getPointerOperandIndex() { |
5223 | return 0U; |
5224 | } |
5225 | |
5226 | /// Returns the address space of the pointer operand. |
5227 | unsigned getSrcAddressSpace() const { |
5228 | return getPointerOperand()->getType()->getPointerAddressSpace(); |
5229 | } |
5230 | |
5231 | /// Returns the address space of the result. |
5232 | unsigned getDestAddressSpace() const { |
5233 | return getType()->getPointerAddressSpace(); |
5234 | } |
5235 | }; |
5236 | |
5237 | /// A helper function that returns the pointer operand of a load or store |
5238 | /// instruction. Returns nullptr if not load or store. |
5239 | inline Value *getLoadStorePointerOperand(Value *V) { |
5240 | if (auto *Load = dyn_cast<LoadInst>(V)) |
5241 | return Load->getPointerOperand(); |
5242 | if (auto *Store = dyn_cast<StoreInst>(V)) |
5243 | return Store->getPointerOperand(); |
5244 | return nullptr; |
5245 | } |
5246 | |
5247 | /// A helper function that returns the pointer operand of a load, store |
5248 | /// or GEP instruction. Returns nullptr if not load, store, or GEP. |
5249 | inline Value *getPointerOperand(Value *V) { |
5250 | if (auto *Ptr = getLoadStorePointerOperand(V)) |
5251 | return Ptr; |
5252 | if (auto *Gep = dyn_cast<GetElementPtrInst>(V)) |
5253 | return Gep->getPointerOperand(); |
5254 | return nullptr; |
5255 | } |
5256 | |
5257 | /// A helper function that returns the alignment of load or store instruction. |
5258 | inline unsigned getLoadStoreAlignment(Value *I) { |
5259 | assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(((isa<LoadInst>(I) || isa<StoreInst>(I)) && "Expected Load or Store instruction") ? static_cast<void> (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 5260, __PRETTY_FUNCTION__)) |
5260 | "Expected Load or Store instruction")(((isa<LoadInst>(I) || isa<StoreInst>(I)) && "Expected Load or Store instruction") ? static_cast<void> (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 5260, __PRETTY_FUNCTION__)); |
5261 | if (auto *LI = dyn_cast<LoadInst>(I)) |
5262 | return LI->getAlignment(); |
5263 | return cast<StoreInst>(I)->getAlignment(); |
5264 | } |
5265 | |
5266 | /// A helper function that returns the address space of the pointer operand of |
5267 | /// load or store instruction. |
5268 | inline unsigned getLoadStoreAddressSpace(Value *I) { |
5269 | assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(((isa<LoadInst>(I) || isa<StoreInst>(I)) && "Expected Load or Store instruction") ? static_cast<void> (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 5270, __PRETTY_FUNCTION__)) |
5270 | "Expected Load or Store instruction")(((isa<LoadInst>(I) || isa<StoreInst>(I)) && "Expected Load or Store instruction") ? static_cast<void> (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/IR/Instructions.h" , 5270, __PRETTY_FUNCTION__)); |
5271 | if (auto *LI = dyn_cast<LoadInst>(I)) |
5272 | return LI->getPointerAddressSpace(); |
5273 | return cast<StoreInst>(I)->getPointerAddressSpace(); |
5274 | } |
5275 | |
5276 | } // end namespace llvm |
5277 | |
5278 | #endif // LLVM_IR_INSTRUCTIONS_H |
1 | //===- llvm/Support/Casting.h - Allow flexible, checked, casts --*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file defines the isa<X>(), cast<X>(), dyn_cast<X>(), cast_or_null<X>(), |
10 | // and dyn_cast_or_null<X>() templates. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #ifndef LLVM_SUPPORT_CASTING_H |
15 | #define LLVM_SUPPORT_CASTING_H |
16 | |
17 | #include "llvm/Support/Compiler.h" |
18 | #include "llvm/Support/type_traits.h" |
19 | #include <cassert> |
20 | #include <memory> |
21 | #include <type_traits> |
22 | |
23 | namespace llvm { |
24 | |
25 | //===----------------------------------------------------------------------===// |
26 | // isa<x> Support Templates |
27 | //===----------------------------------------------------------------------===// |
28 | |
29 | // Define a template that can be specialized by smart pointers to reflect the |
30 | // fact that they are automatically dereferenced, and are not involved with the |
31 | // template selection process... the default implementation is a noop. |
32 | // |
33 | template<typename From> struct simplify_type { |
34 | using SimpleType = From; // The real type this represents... |
35 | |
36 | // An accessor to get the real value... |
37 | static SimpleType &getSimplifiedValue(From &Val) { return Val; } |
38 | }; |
39 | |
40 | template<typename From> struct simplify_type<const From> { |
41 | using NonConstSimpleType = typename simplify_type<From>::SimpleType; |
42 | using SimpleType = |
43 | typename add_const_past_pointer<NonConstSimpleType>::type; |
44 | using RetType = |
45 | typename add_lvalue_reference_if_not_pointer<SimpleType>::type; |
46 | |
47 | static RetType getSimplifiedValue(const From& Val) { |
48 | return simplify_type<From>::getSimplifiedValue(const_cast<From&>(Val)); |
49 | } |
50 | }; |
51 | |
52 | // The core of the implementation of isa<X> is here; To and From should be |
53 | // the names of classes. This template can be specialized to customize the |
54 | // implementation of isa<> without rewriting it from scratch. |
55 | template <typename To, typename From, typename Enabler = void> |
56 | struct isa_impl { |
57 | static inline bool doit(const From &Val) { |
58 | return To::classof(&Val); |
59 | } |
60 | }; |
61 | |
62 | /// Always allow upcasts, and perform no dynamic check for them. |
63 | template <typename To, typename From> |
64 | struct isa_impl< |
65 | To, From, typename std::enable_if<std::is_base_of<To, From>::value>::type> { |
66 | static inline bool doit(const From &) { return true; } |
67 | }; |
68 | |
69 | template <typename To, typename From> struct isa_impl_cl { |
70 | static inline bool doit(const From &Val) { |
71 | return isa_impl<To, From>::doit(Val); |
72 | } |
73 | }; |
74 | |
75 | template <typename To, typename From> struct isa_impl_cl<To, const From> { |
76 | static inline bool doit(const From &Val) { |
77 | return isa_impl<To, From>::doit(Val); |
78 | } |
79 | }; |
80 | |
81 | template <typename To, typename From> |
82 | struct isa_impl_cl<To, const std::unique_ptr<From>> { |
83 | static inline bool doit(const std::unique_ptr<From> &Val) { |
84 | assert(Val && "isa<> used on a null pointer")((Val && "isa<> used on a null pointer") ? static_cast <void> (0) : __assert_fail ("Val && \"isa<> used on a null pointer\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/Casting.h" , 84, __PRETTY_FUNCTION__)); |
85 | return isa_impl_cl<To, From>::doit(*Val); |
86 | } |
87 | }; |
88 | |
89 | template <typename To, typename From> struct isa_impl_cl<To, From*> { |
90 | static inline bool doit(const From *Val) { |
91 | assert(Val && "isa<> used on a null pointer")((Val && "isa<> used on a null pointer") ? static_cast <void> (0) : __assert_fail ("Val && \"isa<> used on a null pointer\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/Casting.h" , 91, __PRETTY_FUNCTION__)); |
92 | return isa_impl<To, From>::doit(*Val); |
93 | } |
94 | }; |
95 | |
96 | template <typename To, typename From> struct isa_impl_cl<To, From*const> { |
97 | static inline bool doit(const From *Val) { |
98 | assert(Val && "isa<> used on a null pointer")((Val && "isa<> used on a null pointer") ? static_cast <void> (0) : __assert_fail ("Val && \"isa<> used on a null pointer\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/Casting.h" , 98, __PRETTY_FUNCTION__)); |
99 | return isa_impl<To, From>::doit(*Val); |
100 | } |
101 | }; |
102 | |
103 | template <typename To, typename From> struct isa_impl_cl<To, const From*> { |
104 | static inline bool doit(const From *Val) { |
105 | assert(Val && "isa<> used on a null pointer")((Val && "isa<> used on a null pointer") ? static_cast <void> (0) : __assert_fail ("Val && \"isa<> used on a null pointer\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/Casting.h" , 105, __PRETTY_FUNCTION__)); |
106 | return isa_impl<To, From>::doit(*Val); |
107 | } |
108 | }; |
109 | |
110 | template <typename To, typename From> struct isa_impl_cl<To, const From*const> { |
111 | static inline bool doit(const From *Val) { |
112 | assert(Val && "isa<> used on a null pointer")((Val && "isa<> used on a null pointer") ? static_cast <void> (0) : __assert_fail ("Val && \"isa<> used on a null pointer\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/Casting.h" , 112, __PRETTY_FUNCTION__)); |
113 | return isa_impl<To, From>::doit(*Val); |
114 | } |
115 | }; |
116 | |
117 | template<typename To, typename From, typename SimpleFrom> |
118 | struct isa_impl_wrap { |
119 | // When From != SimplifiedType, we can simplify the type some more by using |
120 | // the simplify_type template. |
121 | static bool doit(const From &Val) { |
122 | return isa_impl_wrap<To, SimpleFrom, |
123 | typename simplify_type<SimpleFrom>::SimpleType>::doit( |
124 | simplify_type<const From>::getSimplifiedValue(Val)); |
125 | } |
126 | }; |
127 | |
128 | template<typename To, typename FromTy> |
129 | struct isa_impl_wrap<To, FromTy, FromTy> { |
130 | // When From == SimpleType, we are as simple as we are going to get. |
131 | static bool doit(const FromTy &Val) { |
132 | return isa_impl_cl<To,FromTy>::doit(Val); |
133 | } |
134 | }; |
135 | |
136 | // isa<X> - Return true if the parameter to the template is an instance of the |
137 | // template type argument. Used like this: |
138 | // |
139 | // if (isa<Type>(myVal)) { ... } |
140 | // |
141 | template <class X, class Y> LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isa(const Y &Val) { |
142 | return isa_impl_wrap<X, const Y, |
143 | typename simplify_type<const Y>::SimpleType>::doit(Val); |
144 | } |
145 | |
146 | // isa_and_nonnull<X> - Functionally identical to isa, except that a null value |
147 | // is accepted. |
148 | // |
149 | template <class X, class Y> |
150 | LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isa_and_nonnull(const Y &Val) { |
151 | if (!Val) |
152 | return false; |
153 | return isa<X>(Val); |
154 | } |
155 | |
156 | //===----------------------------------------------------------------------===// |
157 | // cast<x> Support Templates |
158 | //===----------------------------------------------------------------------===// |
159 | |
160 | template<class To, class From> struct cast_retty; |
161 | |
162 | // Calculate what type the 'cast' function should return, based on a requested |
163 | // type of To and a source type of From. |
164 | template<class To, class From> struct cast_retty_impl { |
165 | using ret_type = To &; // Normal case, return Ty& |
166 | }; |
167 | template<class To, class From> struct cast_retty_impl<To, const From> { |
168 | using ret_type = const To &; // Normal case, return Ty& |
169 | }; |
170 | |
171 | template<class To, class From> struct cast_retty_impl<To, From*> { |
172 | using ret_type = To *; // Pointer arg case, return Ty* |
173 | }; |
174 | |
175 | template<class To, class From> struct cast_retty_impl<To, const From*> { |
176 | using ret_type = const To *; // Constant pointer arg case, return const Ty* |
177 | }; |
178 | |
179 | template<class To, class From> struct cast_retty_impl<To, const From*const> { |
180 | using ret_type = const To *; // Constant pointer arg case, return const Ty* |
181 | }; |
182 | |
183 | template <class To, class From> |
184 | struct cast_retty_impl<To, std::unique_ptr<From>> { |
185 | private: |
186 | using PointerType = typename cast_retty_impl<To, From *>::ret_type; |
187 | using ResultType = typename std::remove_pointer<PointerType>::type; |
188 | |
189 | public: |
190 | using ret_type = std::unique_ptr<ResultType>; |
191 | }; |
192 | |
193 | template<class To, class From, class SimpleFrom> |
194 | struct cast_retty_wrap { |
195 | // When the simplified type and the from type are not the same, use the type |
196 | // simplifier to reduce the type, then reuse cast_retty_impl to get the |
197 | // resultant type. |
198 | using ret_type = typename cast_retty<To, SimpleFrom>::ret_type; |
199 | }; |
200 | |
201 | template<class To, class FromTy> |
202 | struct cast_retty_wrap<To, FromTy, FromTy> { |
203 | // When the simplified type is equal to the from type, use it directly. |
204 | using ret_type = typename cast_retty_impl<To,FromTy>::ret_type; |
205 | }; |
206 | |
207 | template<class To, class From> |
208 | struct cast_retty { |
209 | using ret_type = typename cast_retty_wrap< |
210 | To, From, typename simplify_type<From>::SimpleType>::ret_type; |
211 | }; |
212 | |
213 | // Ensure the non-simple values are converted using the simplify_type template |
214 | // that may be specialized by smart pointers... |
215 | // |
216 | template<class To, class From, class SimpleFrom> struct cast_convert_val { |
217 | // This is not a simple type, use the template to simplify it... |
218 | static typename cast_retty<To, From>::ret_type doit(From &Val) { |
219 | return cast_convert_val<To, SimpleFrom, |
220 | typename simplify_type<SimpleFrom>::SimpleType>::doit( |
221 | simplify_type<From>::getSimplifiedValue(Val)); |
222 | } |
223 | }; |
224 | |
225 | template<class To, class FromTy> struct cast_convert_val<To,FromTy,FromTy> { |
226 | // This _is_ a simple type, just cast it. |
227 | static typename cast_retty<To, FromTy>::ret_type doit(const FromTy &Val) { |
228 | typename cast_retty<To, FromTy>::ret_type Res2 |
229 | = (typename cast_retty<To, FromTy>::ret_type)const_cast<FromTy&>(Val); |
230 | return Res2; |
231 | } |
232 | }; |
233 | |
234 | template <class X> struct is_simple_type { |
235 | static const bool value = |
236 | std::is_same<X, typename simplify_type<X>::SimpleType>::value; |
237 | }; |
238 | |
239 | // cast<X> - Return the argument parameter cast to the specified type. This |
240 | // casting operator asserts that the type is correct, so it does not return null |
241 | // on failure. It does not allow a null argument (use cast_or_null for that). |
242 | // It is typically used like this: |
243 | // |
244 | // cast<Instruction>(myVal)->getParent() |
245 | // |
246 | template <class X, class Y> |
247 | inline typename std::enable_if<!is_simple_type<Y>::value, |
248 | typename cast_retty<X, const Y>::ret_type>::type |
249 | cast(const Y &Val) { |
250 | assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast<Ty>() argument of incompatible type!" ) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast<Ty>() argument of incompatible type!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/Casting.h" , 250, __PRETTY_FUNCTION__)); |
251 | return cast_convert_val< |
252 | X, const Y, typename simplify_type<const Y>::SimpleType>::doit(Val); |
253 | } |
254 | |
255 | template <class X, class Y> |
256 | inline typename cast_retty<X, Y>::ret_type cast(Y &Val) { |
257 | assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast<Ty>() argument of incompatible type!" ) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast<Ty>() argument of incompatible type!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/Casting.h" , 257, __PRETTY_FUNCTION__)); |
258 | return cast_convert_val<X, Y, |
259 | typename simplify_type<Y>::SimpleType>::doit(Val); |
260 | } |
261 | |
262 | template <class X, class Y> |
263 | inline typename cast_retty<X, Y *>::ret_type cast(Y *Val) { |
264 | assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast<Ty>() argument of incompatible type!" ) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast<Ty>() argument of incompatible type!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/Casting.h" , 264, __PRETTY_FUNCTION__)); |
265 | return cast_convert_val<X, Y*, |
266 | typename simplify_type<Y*>::SimpleType>::doit(Val); |
267 | } |
268 | |
269 | template <class X, class Y> |
270 | inline typename cast_retty<X, std::unique_ptr<Y>>::ret_type |
271 | cast(std::unique_ptr<Y> &&Val) { |
272 | assert(isa<X>(Val.get()) && "cast<Ty>() argument of incompatible type!")((isa<X>(Val.get()) && "cast<Ty>() argument of incompatible type!" ) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val.get()) && \"cast<Ty>() argument of incompatible type!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/Casting.h" , 272, __PRETTY_FUNCTION__)); |
273 | using ret_type = typename cast_retty<X, std::unique_ptr<Y>>::ret_type; |
274 | return ret_type( |
275 | cast_convert_val<X, Y *, typename simplify_type<Y *>::SimpleType>::doit( |
276 | Val.release())); |
277 | } |
278 | |
279 | // cast_or_null<X> - Functionally identical to cast, except that a null value is |
280 | // accepted. |
281 | // |
282 | template <class X, class Y> |
283 | LLVM_NODISCARD[[clang::warn_unused_result]] inline |
284 | typename std::enable_if<!is_simple_type<Y>::value, |
285 | typename cast_retty<X, const Y>::ret_type>::type |
286 | cast_or_null(const Y &Val) { |
287 | if (!Val) |
288 | return nullptr; |
289 | assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!" ) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast_or_null<Ty>() argument of incompatible type!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/Casting.h" , 289, __PRETTY_FUNCTION__)); |
290 | return cast<X>(Val); |
291 | } |
292 | |
293 | template <class X, class Y> |
294 | LLVM_NODISCARD[[clang::warn_unused_result]] inline |
295 | typename std::enable_if<!is_simple_type<Y>::value, |
296 | typename cast_retty<X, Y>::ret_type>::type |
297 | cast_or_null(Y &Val) { |
298 | if (!Val) |
299 | return nullptr; |
300 | assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!" ) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast_or_null<Ty>() argument of incompatible type!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/Casting.h" , 300, __PRETTY_FUNCTION__)); |
301 | return cast<X>(Val); |
302 | } |
303 | |
304 | template <class X, class Y> |
305 | LLVM_NODISCARD[[clang::warn_unused_result]] inline typename cast_retty<X, Y *>::ret_type |
306 | cast_or_null(Y *Val) { |
307 | if (!Val) return nullptr; |
308 | assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!" ) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast_or_null<Ty>() argument of incompatible type!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/Casting.h" , 308, __PRETTY_FUNCTION__)); |
309 | return cast<X>(Val); |
310 | } |
311 | |
312 | template <class X, class Y> |
313 | inline typename cast_retty<X, std::unique_ptr<Y>>::ret_type |
314 | cast_or_null(std::unique_ptr<Y> &&Val) { |
315 | if (!Val) |
316 | return nullptr; |
317 | return cast<X>(std::move(Val)); |
318 | } |
319 | |
320 | // dyn_cast<X> - Return the argument parameter cast to the specified type. This |
321 | // casting operator returns null if the argument is of the wrong type, so it can |
322 | // be used to test for a type as well as cast if successful. This should be |
323 | // used in the context of an if statement like this: |
324 | // |
325 | // if (const Instruction *I = dyn_cast<Instruction>(myVal)) { ... } |
326 | // |
327 | |
328 | template <class X, class Y> |
329 | LLVM_NODISCARD[[clang::warn_unused_result]] inline |
330 | typename std::enable_if<!is_simple_type<Y>::value, |
331 | typename cast_retty<X, const Y>::ret_type>::type |
332 | dyn_cast(const Y &Val) { |
333 | return isa<X>(Val) ? cast<X>(Val) : nullptr; |
334 | } |
335 | |
336 | template <class X, class Y> |
337 | LLVM_NODISCARD[[clang::warn_unused_result]] inline typename cast_retty<X, Y>::ret_type dyn_cast(Y &Val) { |
338 | return isa<X>(Val) ? cast<X>(Val) : nullptr; |
339 | } |
340 | |
341 | template <class X, class Y> |
342 | LLVM_NODISCARD[[clang::warn_unused_result]] inline typename cast_retty<X, Y *>::ret_type dyn_cast(Y *Val) { |
343 | return isa<X>(Val) ? cast<X>(Val) : nullptr; |
344 | } |
345 | |
346 | // dyn_cast_or_null<X> - Functionally identical to dyn_cast, except that a null |
347 | // value is accepted. |
348 | // |
349 | template <class X, class Y> |
350 | LLVM_NODISCARD[[clang::warn_unused_result]] inline |
351 | typename std::enable_if<!is_simple_type<Y>::value, |
352 | typename cast_retty<X, const Y>::ret_type>::type |
353 | dyn_cast_or_null(const Y &Val) { |
354 | return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr; |
355 | } |
356 | |
357 | template <class X, class Y> |
358 | LLVM_NODISCARD[[clang::warn_unused_result]] inline |
359 | typename std::enable_if<!is_simple_type<Y>::value, |
360 | typename cast_retty<X, Y>::ret_type>::type |
361 | dyn_cast_or_null(Y &Val) { |
362 | return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr; |
363 | } |
364 | |
365 | template <class X, class Y> |
366 | LLVM_NODISCARD[[clang::warn_unused_result]] inline typename cast_retty<X, Y *>::ret_type |
367 | dyn_cast_or_null(Y *Val) { |
368 | return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr; |
369 | } |
370 | |
371 | // unique_dyn_cast<X> - Given a unique_ptr<Y>, try to return a unique_ptr<X>, |
372 | // taking ownership of the input pointer iff isa<X>(Val) is true. If the |
373 | // cast is successful, From refers to nullptr on exit and the casted value |
374 | // is returned. If the cast is unsuccessful, the function returns nullptr |
375 | // and From is unchanged. |
376 | template <class X, class Y> |
377 | LLVM_NODISCARD[[clang::warn_unused_result]] inline auto unique_dyn_cast(std::unique_ptr<Y> &Val) |
378 | -> decltype(cast<X>(Val)) { |
379 | if (!isa<X>(Val)) |
380 | return nullptr; |
381 | return cast<X>(std::move(Val)); |
382 | } |
383 | |
384 | template <class X, class Y> |
385 | LLVM_NODISCARD[[clang::warn_unused_result]] inline auto unique_dyn_cast(std::unique_ptr<Y> &&Val) |
386 | -> decltype(cast<X>(Val)) { |
387 | return unique_dyn_cast<X, Y>(Val); |
388 | } |
389 | |
390 | // dyn_cast_or_null<X> - Functionally identical to unique_dyn_cast, except that |
391 | // a null value is accepted. |
392 | template <class X, class Y> |
393 | LLVM_NODISCARD[[clang::warn_unused_result]] inline auto unique_dyn_cast_or_null(std::unique_ptr<Y> &Val) |
394 | -> decltype(cast<X>(Val)) { |
395 | if (!Val) |
396 | return nullptr; |
397 | return unique_dyn_cast<X, Y>(Val); |
398 | } |
399 | |
400 | template <class X, class Y> |
401 | LLVM_NODISCARD[[clang::warn_unused_result]] inline auto unique_dyn_cast_or_null(std::unique_ptr<Y> &&Val) |
402 | -> decltype(cast<X>(Val)) { |
403 | return unique_dyn_cast_or_null<X, Y>(Val); |
404 | } |
405 | |
406 | } // end namespace llvm |
407 | |
408 | #endif // LLVM_SUPPORT_CASTING_H |