Bug Summary

File:llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
Warning:line 481, column 14
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name TailRecursionElimination.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -target-cpu x86-64 -dwarf-column-info -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-11/lib/clang/11.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/build-llvm/lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/build-llvm/include -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-11/lib/clang/11.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/build-llvm/lib/Transforms/Scalar -fdebug-prefix-map=/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-03-09-184146-41876-1 -x c++ /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp

/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp

1//===- TailRecursionElimination.cpp - Eliminate Tail Calls ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file transforms calls of the current function (self recursion) followed
10// by a return instruction with a branch to the entry of the function, creating
11// a loop. This pass also implements the following extensions to the basic
12// algorithm:
13//
14// 1. Trivial instructions between the call and return do not prevent the
15// transformation from taking place, though currently the analysis cannot
16// support moving any really useful instructions (only dead ones).
17// 2. This pass transforms functions that are prevented from being tail
18// recursive by an associative and commutative expression to use an
19// accumulator variable, thus compiling the typical naive factorial or
20// 'fib' implementation into efficient code.
21// 3. TRE is performed if the function returns void, if the return
22// returns the result returned by the call, or if the function returns a
23// run-time constant on all exits from the function. It is possible, though
24// unlikely, that the return returns something else (like constant 0), and
25// can still be TRE'd. It can be TRE'd if ALL OTHER return instructions in
26// the function return the exact same value.
27// 4. If it can prove that callees do not access their caller stack frame,
28// they are marked as eligible for tail call elimination (by the code
29// generator).
30//
31// There are several improvements that could be made:
32//
33// 1. If the function has any alloca instructions, these instructions will be
34// moved out of the entry block of the function, causing them to be
35// evaluated each time through the tail recursion. Safely keeping allocas
36// in the entry block requires analysis to proves that the tail-called
37// function does not read or write the stack object.
38// 2. Tail recursion is only performed if the call immediately precedes the
39// return instruction. It's possible that there could be a jump between
40// the call and the return.
41// 3. There can be intervening operations between the call and the return that
42// prevent the TRE from occurring. For example, there could be GEP's and
43// stores to memory that will not be read or written by the call. This
44// requires some substantial analysis (such as with DSA) to prove safe to
45// move ahead of the call, but doing so could allow many more TREs to be
46// performed, for example in TreeAdd/TreeAlloc from the treeadd benchmark.
47// 4. The algorithm we use to detect if callees access their caller stack
48// frames is very primitive.
49//
50//===----------------------------------------------------------------------===//
51
52#include "llvm/Transforms/Scalar/TailRecursionElimination.h"
53#include "llvm/ADT/STLExtras.h"
54#include "llvm/ADT/SmallPtrSet.h"
55#include "llvm/ADT/Statistic.h"
56#include "llvm/Analysis/CFG.h"
57#include "llvm/Analysis/CaptureTracking.h"
58#include "llvm/Analysis/DomTreeUpdater.h"
59#include "llvm/Analysis/GlobalsModRef.h"
60#include "llvm/Analysis/InlineCost.h"
61#include "llvm/Analysis/InstructionSimplify.h"
62#include "llvm/Analysis/Loads.h"
63#include "llvm/Analysis/OptimizationRemarkEmitter.h"
64#include "llvm/Analysis/PostDominators.h"
65#include "llvm/Analysis/TargetTransformInfo.h"
66#include "llvm/IR/CFG.h"
67#include "llvm/IR/CallSite.h"
68#include "llvm/IR/Constants.h"
69#include "llvm/IR/DataLayout.h"
70#include "llvm/IR/DerivedTypes.h"
71#include "llvm/IR/DiagnosticInfo.h"
72#include "llvm/IR/Dominators.h"
73#include "llvm/IR/Function.h"
74#include "llvm/IR/InstIterator.h"
75#include "llvm/IR/Instructions.h"
76#include "llvm/IR/IntrinsicInst.h"
77#include "llvm/IR/Module.h"
78#include "llvm/IR/ValueHandle.h"
79#include "llvm/InitializePasses.h"
80#include "llvm/Pass.h"
81#include "llvm/Support/Debug.h"
82#include "llvm/Support/raw_ostream.h"
83#include "llvm/Transforms/Scalar.h"
84#include "llvm/Transforms/Utils/BasicBlockUtils.h"
85using namespace llvm;
86
87#define DEBUG_TYPE"tailcallelim" "tailcallelim"
88
89STATISTIC(NumEliminated, "Number of tail calls removed")static llvm::Statistic NumEliminated = {"tailcallelim", "NumEliminated"
, "Number of tail calls removed"}
;
90STATISTIC(NumRetDuped, "Number of return duplicated")static llvm::Statistic NumRetDuped = {"tailcallelim", "NumRetDuped"
, "Number of return duplicated"}
;
91STATISTIC(NumAccumAdded, "Number of accumulators introduced")static llvm::Statistic NumAccumAdded = {"tailcallelim", "NumAccumAdded"
, "Number of accumulators introduced"}
;
92
93/// Scan the specified function for alloca instructions.
94/// If it contains any dynamic allocas, returns false.
95static bool canTRE(Function &F) {
96 // Because of PR962, we don't TRE dynamic allocas.
97 return llvm::all_of(instructions(F), [](Instruction &I) {
98 auto *AI = dyn_cast<AllocaInst>(&I);
99 return !AI || AI->isStaticAlloca();
100 });
101}
102
103namespace {
104struct AllocaDerivedValueTracker {
105 // Start at a root value and walk its use-def chain to mark calls that use the
106 // value or a derived value in AllocaUsers, and places where it may escape in
107 // EscapePoints.
108 void walk(Value *Root) {
109 SmallVector<Use *, 32> Worklist;
110 SmallPtrSet<Use *, 32> Visited;
111
112 auto AddUsesToWorklist = [&](Value *V) {
113 for (auto &U : V->uses()) {
114 if (!Visited.insert(&U).second)
115 continue;
116 Worklist.push_back(&U);
117 }
118 };
119
120 AddUsesToWorklist(Root);
121
122 while (!Worklist.empty()) {
123 Use *U = Worklist.pop_back_val();
124 Instruction *I = cast<Instruction>(U->getUser());
125
126 switch (I->getOpcode()) {
127 case Instruction::Call:
128 case Instruction::Invoke: {
129 CallSite CS(I);
130 // If the alloca-derived argument is passed byval it is not an escape
131 // point, or a use of an alloca. Calling with byval copies the contents
132 // of the alloca into argument registers or stack slots, which exist
133 // beyond the lifetime of the current frame.
134 if (CS.isArgOperand(U) && CS.isByValArgument(CS.getArgumentNo(U)))
135 continue;
136 bool IsNocapture =
137 CS.isDataOperand(U) && CS.doesNotCapture(CS.getDataOperandNo(U));
138 callUsesLocalStack(CS, IsNocapture);
139 if (IsNocapture) {
140 // If the alloca-derived argument is passed in as nocapture, then it
141 // can't propagate to the call's return. That would be capturing.
142 continue;
143 }
144 break;
145 }
146 case Instruction::Load: {
147 // The result of a load is not alloca-derived (unless an alloca has
148 // otherwise escaped, but this is a local analysis).
149 continue;
150 }
151 case Instruction::Store: {
152 if (U->getOperandNo() == 0)
153 EscapePoints.insert(I);
154 continue; // Stores have no users to analyze.
155 }
156 case Instruction::BitCast:
157 case Instruction::GetElementPtr:
158 case Instruction::PHI:
159 case Instruction::Select:
160 case Instruction::AddrSpaceCast:
161 break;
162 default:
163 EscapePoints.insert(I);
164 break;
165 }
166
167 AddUsesToWorklist(I);
168 }
169 }
170
171 void callUsesLocalStack(CallSite CS, bool IsNocapture) {
172 // Add it to the list of alloca users.
173 AllocaUsers.insert(CS.getInstruction());
174
175 // If it's nocapture then it can't capture this alloca.
176 if (IsNocapture)
177 return;
178
179 // If it can write to memory, it can leak the alloca value.
180 if (!CS.onlyReadsMemory())
181 EscapePoints.insert(CS.getInstruction());
182 }
183
184 SmallPtrSet<Instruction *, 32> AllocaUsers;
185 SmallPtrSet<Instruction *, 32> EscapePoints;
186};
187}
188
189static bool markTails(Function &F, bool &AllCallsAreTailCalls,
190 OptimizationRemarkEmitter *ORE) {
191 if (F.callsFunctionThatReturnsTwice())
192 return false;
193 AllCallsAreTailCalls = true;
194
195 // The local stack holds all alloca instructions and all byval arguments.
196 AllocaDerivedValueTracker Tracker;
197 for (Argument &Arg : F.args()) {
198 if (Arg.hasByValAttr())
199 Tracker.walk(&Arg);
200 }
201 for (auto &BB : F) {
202 for (auto &I : BB)
203 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
204 Tracker.walk(AI);
205 }
206
207 bool Modified = false;
208
209 // Track whether a block is reachable after an alloca has escaped. Blocks that
210 // contain the escaping instruction will be marked as being visited without an
211 // escaped alloca, since that is how the block began.
212 enum VisitType {
213 UNVISITED,
214 UNESCAPED,
215 ESCAPED
216 };
217 DenseMap<BasicBlock *, VisitType> Visited;
218
219 // We propagate the fact that an alloca has escaped from block to successor.
220 // Visit the blocks that are propagating the escapedness first. To do this, we
221 // maintain two worklists.
222 SmallVector<BasicBlock *, 32> WorklistUnescaped, WorklistEscaped;
223
224 // We may enter a block and visit it thinking that no alloca has escaped yet,
225 // then see an escape point and go back around a loop edge and come back to
226 // the same block twice. Because of this, we defer setting tail on calls when
227 // we first encounter them in a block. Every entry in this list does not
228 // statically use an alloca via use-def chain analysis, but may find an alloca
229 // through other means if the block turns out to be reachable after an escape
230 // point.
231 SmallVector<CallInst *, 32> DeferredTails;
232
233 BasicBlock *BB = &F.getEntryBlock();
234 VisitType Escaped = UNESCAPED;
235 do {
236 for (auto &I : *BB) {
237 if (Tracker.EscapePoints.count(&I))
238 Escaped = ESCAPED;
239
240 CallInst *CI = dyn_cast<CallInst>(&I);
241 if (!CI || CI->isTailCall() || isa<DbgInfoIntrinsic>(&I))
242 continue;
243
244 bool IsNoTail = CI->isNoTailCall() || CI->hasOperandBundles();
245
246 if (!IsNoTail && CI->doesNotAccessMemory()) {
247 // A call to a readnone function whose arguments are all things computed
248 // outside this function can be marked tail. Even if you stored the
249 // alloca address into a global, a readnone function can't load the
250 // global anyhow.
251 //
252 // Note that this runs whether we know an alloca has escaped or not. If
253 // it has, then we can't trust Tracker.AllocaUsers to be accurate.
254 bool SafeToTail = true;
255 for (auto &Arg : CI->arg_operands()) {
256 if (isa<Constant>(Arg.getUser()))
257 continue;
258 if (Argument *A = dyn_cast<Argument>(Arg.getUser()))
259 if (!A->hasByValAttr())
260 continue;
261 SafeToTail = false;
262 break;
263 }
264 if (SafeToTail) {
265 using namespace ore;
266 ORE->emit([&]() {
267 return OptimizationRemark(DEBUG_TYPE"tailcallelim", "tailcall-readnone", CI)
268 << "marked as tail call candidate (readnone)";
269 });
270 CI->setTailCall();
271 Modified = true;
272 continue;
273 }
274 }
275
276 if (!IsNoTail && Escaped == UNESCAPED && !Tracker.AllocaUsers.count(CI)) {
277 DeferredTails.push_back(CI);
278 } else {
279 AllCallsAreTailCalls = false;
280 }
281 }
282
283 for (auto *SuccBB : make_range(succ_begin(BB), succ_end(BB))) {
284 auto &State = Visited[SuccBB];
285 if (State < Escaped) {
286 State = Escaped;
287 if (State == ESCAPED)
288 WorklistEscaped.push_back(SuccBB);
289 else
290 WorklistUnescaped.push_back(SuccBB);
291 }
292 }
293
294 if (!WorklistEscaped.empty()) {
295 BB = WorklistEscaped.pop_back_val();
296 Escaped = ESCAPED;
297 } else {
298 BB = nullptr;
299 while (!WorklistUnescaped.empty()) {
300 auto *NextBB = WorklistUnescaped.pop_back_val();
301 if (Visited[NextBB] == UNESCAPED) {
302 BB = NextBB;
303 Escaped = UNESCAPED;
304 break;
305 }
306 }
307 }
308 } while (BB);
309
310 for (CallInst *CI : DeferredTails) {
311 if (Visited[CI->getParent()] != ESCAPED) {
312 // If the escape point was part way through the block, calls after the
313 // escape point wouldn't have been put into DeferredTails.
314 LLVM_DEBUG(dbgs() << "Marked as tail call candidate: " << *CI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("tailcallelim")) { dbgs() << "Marked as tail call candidate: "
<< *CI << "\n"; } } while (false)
;
315 CI->setTailCall();
316 Modified = true;
317 } else {
318 AllCallsAreTailCalls = false;
319 }
320 }
321
322 return Modified;
323}
324
325/// Return true if it is safe to move the specified
326/// instruction from after the call to before the call, assuming that all
327/// instructions between the call and this instruction are movable.
328///
329static bool canMoveAboveCall(Instruction *I, CallInst *CI, AliasAnalysis *AA) {
330 // FIXME: We can move load/store/call/free instructions above the call if the
331 // call does not mod/ref the memory location being processed.
332 if (I->mayHaveSideEffects()) // This also handles volatile loads.
333 return false;
334
335 if (LoadInst *L = dyn_cast<LoadInst>(I)) {
336 // Loads may always be moved above calls without side effects.
337 if (CI->mayHaveSideEffects()) {
338 // Non-volatile loads may be moved above a call with side effects if it
339 // does not write to memory and the load provably won't trap.
340 // Writes to memory only matter if they may alias the pointer
341 // being loaded from.
342 const DataLayout &DL = L->getModule()->getDataLayout();
343 if (isModSet(AA->getModRefInfo(CI, MemoryLocation::get(L))) ||
344 !isSafeToLoadUnconditionally(L->getPointerOperand(), L->getType(),
345 MaybeAlign(L->getAlignment()), DL, L))
346 return false;
347 }
348 }
349
350 // Otherwise, if this is a side-effect free instruction, check to make sure
351 // that it does not use the return value of the call. If it doesn't use the
352 // return value of the call, it must only use things that are defined before
353 // the call, or movable instructions between the call and the instruction
354 // itself.
355 return !is_contained(I->operands(), CI);
356}
357
358/// Return true if the specified value is the same when the return would exit
359/// as it was when the initial iteration of the recursive function was executed.
360///
361/// We currently handle static constants and arguments that are not modified as
362/// part of the recursion.
363static bool isDynamicConstant(Value *V, CallInst *CI, ReturnInst *RI) {
364 if (isa<Constant>(V)) return true; // Static constants are always dyn consts
365
366 // Check to see if this is an immutable argument, if so, the value
367 // will be available to initialize the accumulator.
368 if (Argument *Arg = dyn_cast<Argument>(V)) {
369 // Figure out which argument number this is...
370 unsigned ArgNo = 0;
371 Function *F = CI->getParent()->getParent();
372 for (Function::arg_iterator AI = F->arg_begin(); &*AI != Arg; ++AI)
373 ++ArgNo;
374
375 // If we are passing this argument into call as the corresponding
376 // argument operand, then the argument is dynamically constant.
377 // Otherwise, we cannot transform this function safely.
378 if (CI->getArgOperand(ArgNo) == Arg)
379 return true;
380 }
381
382 // Switch cases are always constant integers. If the value is being switched
383 // on and the return is only reachable from one of its cases, it's
384 // effectively constant.
385 if (BasicBlock *UniquePred = RI->getParent()->getUniquePredecessor())
386 if (SwitchInst *SI = dyn_cast<SwitchInst>(UniquePred->getTerminator()))
387 if (SI->getCondition() == V)
388 return SI->getDefaultDest() != RI->getParent();
389
390 // Not a constant or immutable argument, we can't safely transform.
391 return false;
392}
393
394/// Check to see if the function containing the specified tail call consistently
395/// returns the same runtime-constant value at all exit points except for
396/// IgnoreRI. If so, return the returned value.
397static Value *getCommonReturnValue(ReturnInst *IgnoreRI, CallInst *CI) {
398 Function *F = CI->getParent()->getParent();
399 Value *ReturnedValue = nullptr;
400
401 for (BasicBlock &BBI : *F) {
402 ReturnInst *RI = dyn_cast<ReturnInst>(BBI.getTerminator());
403 if (RI == nullptr || RI == IgnoreRI) continue;
404
405 // We can only perform this transformation if the value returned is
406 // evaluatable at the start of the initial invocation of the function,
407 // instead of at the end of the evaluation.
408 //
409 Value *RetOp = RI->getOperand(0);
410 if (!isDynamicConstant(RetOp, CI, RI))
411 return nullptr;
412
413 if (ReturnedValue && RetOp != ReturnedValue)
414 return nullptr; // Cannot transform if differing values are returned.
415 ReturnedValue = RetOp;
416 }
417 return ReturnedValue;
418}
419
420/// If the specified instruction can be transformed using accumulator recursion
421/// elimination, return the constant which is the start of the accumulator
422/// value. Otherwise return null.
423static Value *canTransformAccumulatorRecursion(Instruction *I, CallInst *CI) {
424 if (!I->isAssociative() || !I->isCommutative()) return nullptr;
425 assert(I->getNumOperands() == 2 &&((I->getNumOperands() == 2 && "Associative/commutative operations should have 2 args!"
) ? static_cast<void> (0) : __assert_fail ("I->getNumOperands() == 2 && \"Associative/commutative operations should have 2 args!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp"
, 426, __PRETTY_FUNCTION__))
426 "Associative/commutative operations should have 2 args!")((I->getNumOperands() == 2 && "Associative/commutative operations should have 2 args!"
) ? static_cast<void> (0) : __assert_fail ("I->getNumOperands() == 2 && \"Associative/commutative operations should have 2 args!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp"
, 426, __PRETTY_FUNCTION__))
;
427
428 // Exactly one operand should be the result of the call instruction.
429 if ((I->getOperand(0) == CI && I->getOperand(1) == CI) ||
430 (I->getOperand(0) != CI && I->getOperand(1) != CI))
431 return nullptr;
432
433 // The only user of this instruction we allow is a single return instruction.
434 if (!I->hasOneUse() || !isa<ReturnInst>(I->user_back()))
435 return nullptr;
436
437 // Ok, now we have to check all of the other return instructions in this
438 // function. If they return non-constants or differing values, then we cannot
439 // transform the function safely.
440 return getCommonReturnValue(cast<ReturnInst>(I->user_back()), CI);
441}
442
443static Instruction *firstNonDbg(BasicBlock::iterator I) {
444 while (isa<DbgInfoIntrinsic>(I))
445 ++I;
446 return &*I;
447}
448
449static CallInst *findTRECandidate(Instruction *TI,
450 bool CannotTailCallElimCallsMarkedTail,
451 const TargetTransformInfo *TTI) {
452 BasicBlock *BB = TI->getParent();
453 Function *F = BB->getParent();
21
'F' initialized here
454
455 if (&BB->front() == TI) // Make sure there is something before the terminator.
22
Assuming the condition is false
23
Taking false branch
456 return nullptr;
457
458 // Scan backwards from the return, checking to see if there is a tail call in
459 // this block. If so, set CI to it.
460 CallInst *CI = nullptr;
461 BasicBlock::iterator BBI(TI);
462 while (true) {
24
Loop condition is true. Entering loop body
463 CI = dyn_cast<CallInst>(BBI);
25
Calling 'dyn_cast<llvm::CallInst, llvm::ilist_iterator<llvm::ilist_detail::node_options<llvm::Instruction, true, false, void>, false, false>>'
29
Returning from 'dyn_cast<llvm::CallInst, llvm::ilist_iterator<llvm::ilist_detail::node_options<llvm::Instruction, true, false, void>, false, false>>'
464 if (CI && CI->getCalledFunction() == F)
30
Assuming 'CI' is non-null
31
Assuming the condition is true
32
Assuming pointer value is null
33
Taking true branch
465 break;
34
Execution continues on line 474
466
467 if (BBI == BB->begin())
468 return nullptr; // Didn't find a potential tail call.
469 --BBI;
470 }
471
472 // If this call is marked as a tail call, and if there are dynamic allocas in
473 // the function, we cannot perform this optimization.
474 if (CI->isTailCall() && CannotTailCallElimCallsMarkedTail
38.1
'CannotTailCallElimCallsMarkedTail' is false
38.1
'CannotTailCallElimCallsMarkedTail' is false
38.1
'CannotTailCallElimCallsMarkedTail' is false
38.1
'CannotTailCallElimCallsMarkedTail' is false
38.1
'CannotTailCallElimCallsMarkedTail' is false
)
35
Calling 'CallInst::isTailCall'
38
Returning from 'CallInst::isTailCall'
39
Taking false branch
475 return nullptr;
476
477 // As a special case, detect code like this:
478 // double fabs(double f) { return __builtin_fabs(f); } // a 'fabs' call
479 // and disable this xform in this case, because the code generator will
480 // lower the call to fabs into inline code.
481 if (BB == &F->getEntryBlock() &&
40
Called C++ object pointer is null
482 firstNonDbg(BB->front().getIterator()) == CI &&
483 firstNonDbg(std::next(BB->begin())) == TI && CI->getCalledFunction() &&
484 !TTI->isLoweredToCall(CI->getCalledFunction())) {
485 // A single-block function with just a call and a return. Check that
486 // the arguments match.
487 CallSite::arg_iterator I = CallSite(CI).arg_begin(),
488 E = CallSite(CI).arg_end();
489 Function::arg_iterator FI = F->arg_begin(),
490 FE = F->arg_end();
491 for (; I != E && FI != FE; ++I, ++FI)
492 if (*I != &*FI) break;
493 if (I == E && FI == FE)
494 return nullptr;
495 }
496
497 return CI;
498}
499
500static bool eliminateRecursiveTailCall(
501 CallInst *CI, ReturnInst *Ret, BasicBlock *&OldEntry,
502 bool &TailCallsAreMarkedTail, SmallVectorImpl<PHINode *> &ArgumentPHIs,
503 AliasAnalysis *AA, OptimizationRemarkEmitter *ORE, DomTreeUpdater &DTU) {
504 // If we are introducing accumulator recursion to eliminate operations after
505 // the call instruction that are both associative and commutative, the initial
506 // value for the accumulator is placed in this variable. If this value is set
507 // then we actually perform accumulator recursion elimination instead of
508 // simple tail recursion elimination. If the operation is an LLVM instruction
509 // (eg: "add") then it is recorded in AccumulatorRecursionInstr. If not, then
510 // we are handling the case when the return instruction returns a constant C
511 // which is different to the constant returned by other return instructions
512 // (which is recorded in AccumulatorRecursionEliminationInitVal). This is a
513 // special case of accumulator recursion, the operation being "return C".
514 Value *AccumulatorRecursionEliminationInitVal = nullptr;
515 Instruction *AccumulatorRecursionInstr = nullptr;
516
517 // Ok, we found a potential tail call. We can currently only transform the
518 // tail call if all of the instructions between the call and the return are
519 // movable to above the call itself, leaving the call next to the return.
520 // Check that this is the case now.
521 BasicBlock::iterator BBI(CI);
522 for (++BBI; &*BBI != Ret; ++BBI) {
523 if (canMoveAboveCall(&*BBI, CI, AA))
524 continue;
525
526 // If we can't move the instruction above the call, it might be because it
527 // is an associative and commutative operation that could be transformed
528 // using accumulator recursion elimination. Check to see if this is the
529 // case, and if so, remember the initial accumulator value for later.
530 if ((AccumulatorRecursionEliminationInitVal =
531 canTransformAccumulatorRecursion(&*BBI, CI))) {
532 // Yes, this is accumulator recursion. Remember which instruction
533 // accumulates.
534 AccumulatorRecursionInstr = &*BBI;
535 } else {
536 return false; // Otherwise, we cannot eliminate the tail recursion!
537 }
538 }
539
540 // We can only transform call/return pairs that either ignore the return value
541 // of the call and return void, ignore the value of the call and return a
542 // constant, return the value returned by the tail call, or that are being
543 // accumulator recursion variable eliminated.
544 if (Ret->getNumOperands() == 1 && Ret->getReturnValue() != CI &&
545 !isa<UndefValue>(Ret->getReturnValue()) &&
546 AccumulatorRecursionEliminationInitVal == nullptr &&
547 !getCommonReturnValue(nullptr, CI)) {
548 // One case remains that we are able to handle: the current return
549 // instruction returns a constant, and all other return instructions
550 // return a different constant.
551 if (!isDynamicConstant(Ret->getReturnValue(), CI, Ret))
552 return false; // Current return instruction does not return a constant.
553 // Check that all other return instructions return a common constant. If
554 // so, record it in AccumulatorRecursionEliminationInitVal.
555 AccumulatorRecursionEliminationInitVal = getCommonReturnValue(Ret, CI);
556 if (!AccumulatorRecursionEliminationInitVal)
557 return false;
558 }
559
560 BasicBlock *BB = Ret->getParent();
561 Function *F = BB->getParent();
562
563 using namespace ore;
564 ORE->emit([&]() {
565 return OptimizationRemark(DEBUG_TYPE"tailcallelim", "tailcall-recursion", CI)
566 << "transforming tail recursion into loop";
567 });
568
569 // OK! We can transform this tail call. If this is the first one found,
570 // create the new entry block, allowing us to branch back to the old entry.
571 if (!OldEntry) {
572 OldEntry = &F->getEntryBlock();
573 BasicBlock *NewEntry = BasicBlock::Create(F->getContext(), "", F, OldEntry);
574 NewEntry->takeName(OldEntry);
575 OldEntry->setName("tailrecurse");
576 BranchInst *BI = BranchInst::Create(OldEntry, NewEntry);
577 BI->setDebugLoc(CI->getDebugLoc());
578
579 // If this tail call is marked 'tail' and if there are any allocas in the
580 // entry block, move them up to the new entry block.
581 TailCallsAreMarkedTail = CI->isTailCall();
582 if (TailCallsAreMarkedTail)
583 // Move all fixed sized allocas from OldEntry to NewEntry.
584 for (BasicBlock::iterator OEBI = OldEntry->begin(), E = OldEntry->end(),
585 NEBI = NewEntry->begin(); OEBI != E; )
586 if (AllocaInst *AI = dyn_cast<AllocaInst>(OEBI++))
587 if (isa<ConstantInt>(AI->getArraySize()))
588 AI->moveBefore(&*NEBI);
589
590 // Now that we have created a new block, which jumps to the entry
591 // block, insert a PHI node for each argument of the function.
592 // For now, we initialize each PHI to only have the real arguments
593 // which are passed in.
594 Instruction *InsertPos = &OldEntry->front();
595 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end();
596 I != E; ++I) {
597 PHINode *PN = PHINode::Create(I->getType(), 2,
598 I->getName() + ".tr", InsertPos);
599 I->replaceAllUsesWith(PN); // Everyone use the PHI node now!
600 PN->addIncoming(&*I, NewEntry);
601 ArgumentPHIs.push_back(PN);
602 }
603 // The entry block was changed from OldEntry to NewEntry.
604 // The forward DominatorTree needs to be recalculated when the EntryBB is
605 // changed. In this corner-case we recalculate the entire tree.
606 DTU.recalculate(*NewEntry->getParent());
607 }
608
609 // If this function has self recursive calls in the tail position where some
610 // are marked tail and some are not, only transform one flavor or another. We
611 // have to choose whether we move allocas in the entry block to the new entry
612 // block or not, so we can't make a good choice for both. NOTE: We could do
613 // slightly better here in the case that the function has no entry block
614 // allocas.
615 if (TailCallsAreMarkedTail && !CI->isTailCall())
616 return false;
617
618 // Ok, now that we know we have a pseudo-entry block WITH all of the
619 // required PHI nodes, add entries into the PHI node for the actual
620 // parameters passed into the tail-recursive call.
621 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i)
622 ArgumentPHIs[i]->addIncoming(CI->getArgOperand(i), BB);
623
624 // If we are introducing an accumulator variable to eliminate the recursion,
625 // do so now. Note that we _know_ that no subsequent tail recursion
626 // eliminations will happen on this function because of the way the
627 // accumulator recursion predicate is set up.
628 //
629 if (AccumulatorRecursionEliminationInitVal) {
630 Instruction *AccRecInstr = AccumulatorRecursionInstr;
631 // Start by inserting a new PHI node for the accumulator.
632 pred_iterator PB = pred_begin(OldEntry), PE = pred_end(OldEntry);
633 PHINode *AccPN = PHINode::Create(
634 AccumulatorRecursionEliminationInitVal->getType(),
635 std::distance(PB, PE) + 1, "accumulator.tr", &OldEntry->front());
636
637 // Loop over all of the predecessors of the tail recursion block. For the
638 // real entry into the function we seed the PHI with the initial value,
639 // computed earlier. For any other existing branches to this block (due to
640 // other tail recursions eliminated) the accumulator is not modified.
641 // Because we haven't added the branch in the current block to OldEntry yet,
642 // it will not show up as a predecessor.
643 for (pred_iterator PI = PB; PI != PE; ++PI) {
644 BasicBlock *P = *PI;
645 if (P == &F->getEntryBlock())
646 AccPN->addIncoming(AccumulatorRecursionEliminationInitVal, P);
647 else
648 AccPN->addIncoming(AccPN, P);
649 }
650
651 if (AccRecInstr) {
652 // Add an incoming argument for the current block, which is computed by
653 // our associative and commutative accumulator instruction.
654 AccPN->addIncoming(AccRecInstr, BB);
655
656 // Next, rewrite the accumulator recursion instruction so that it does not
657 // use the result of the call anymore, instead, use the PHI node we just
658 // inserted.
659 AccRecInstr->setOperand(AccRecInstr->getOperand(0) != CI, AccPN);
660 } else {
661 // Add an incoming argument for the current block, which is just the
662 // constant returned by the current return instruction.
663 AccPN->addIncoming(Ret->getReturnValue(), BB);
664 }
665
666 // Finally, rewrite any return instructions in the program to return the PHI
667 // node instead of the "initval" that they do currently. This loop will
668 // actually rewrite the return value we are destroying, but that's ok.
669 for (BasicBlock &BBI : *F)
670 if (ReturnInst *RI = dyn_cast<ReturnInst>(BBI.getTerminator()))
671 RI->setOperand(0, AccPN);
672 ++NumAccumAdded;
673 }
674
675 // Now that all of the PHI nodes are in place, remove the call and
676 // ret instructions, replacing them with an unconditional branch.
677 BranchInst *NewBI = BranchInst::Create(OldEntry, Ret);
678 NewBI->setDebugLoc(CI->getDebugLoc());
679
680 BB->getInstList().erase(Ret); // Remove return.
681 BB->getInstList().erase(CI); // Remove call.
682 DTU.applyUpdates({{DominatorTree::Insert, BB, OldEntry}});
683 ++NumEliminated;
684 return true;
685}
686
687static bool foldReturnAndProcessPred(
688 BasicBlock *BB, ReturnInst *Ret, BasicBlock *&OldEntry,
689 bool &TailCallsAreMarkedTail, SmallVectorImpl<PHINode *> &ArgumentPHIs,
690 bool CannotTailCallElimCallsMarkedTail, const TargetTransformInfo *TTI,
691 AliasAnalysis *AA, OptimizationRemarkEmitter *ORE, DomTreeUpdater &DTU) {
692 bool Change = false;
693
694 // Make sure this block is a trivial return block.
695 assert(BB->getFirstNonPHIOrDbg() == Ret &&((BB->getFirstNonPHIOrDbg() == Ret && "Trying to fold non-trivial return block"
) ? static_cast<void> (0) : __assert_fail ("BB->getFirstNonPHIOrDbg() == Ret && \"Trying to fold non-trivial return block\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp"
, 696, __PRETTY_FUNCTION__))
696 "Trying to fold non-trivial return block")((BB->getFirstNonPHIOrDbg() == Ret && "Trying to fold non-trivial return block"
) ? static_cast<void> (0) : __assert_fail ("BB->getFirstNonPHIOrDbg() == Ret && \"Trying to fold non-trivial return block\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp"
, 696, __PRETTY_FUNCTION__))
;
697
698 // If the return block contains nothing but the return and PHI's,
699 // there might be an opportunity to duplicate the return in its
700 // predecessors and perform TRE there. Look for predecessors that end
701 // in unconditional branch and recursive call(s).
702 SmallVector<BranchInst*, 8> UncondBranchPreds;
703 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
704 BasicBlock *Pred = *PI;
705 Instruction *PTI = Pred->getTerminator();
706 if (BranchInst *BI = dyn_cast<BranchInst>(PTI))
707 if (BI->isUnconditional())
708 UncondBranchPreds.push_back(BI);
709 }
710
711 while (!UncondBranchPreds.empty()) {
712 BranchInst *BI = UncondBranchPreds.pop_back_val();
713 BasicBlock *Pred = BI->getParent();
714 if (CallInst *CI = findTRECandidate(BI, CannotTailCallElimCallsMarkedTail, TTI)){
715 LLVM_DEBUG(dbgs() << "FOLDING: " << *BBdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("tailcallelim")) { dbgs() << "FOLDING: " << *BB <<
"INTO UNCOND BRANCH PRED: " << *Pred; } } while (false
)
716 << "INTO UNCOND BRANCH PRED: " << *Pred)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("tailcallelim")) { dbgs() << "FOLDING: " << *BB <<
"INTO UNCOND BRANCH PRED: " << *Pred; } } while (false
)
;
717 ReturnInst *RI = FoldReturnIntoUncondBranch(Ret, BB, Pred, &DTU);
718
719 // Cleanup: if all predecessors of BB have been eliminated by
720 // FoldReturnIntoUncondBranch, delete it. It is important to empty it,
721 // because the ret instruction in there is still using a value which
722 // eliminateRecursiveTailCall will attempt to remove.
723 if (!BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB))
724 DTU.deleteBB(BB);
725
726 eliminateRecursiveTailCall(CI, RI, OldEntry, TailCallsAreMarkedTail,
727 ArgumentPHIs, AA, ORE, DTU);
728 ++NumRetDuped;
729 Change = true;
730 }
731 }
732
733 return Change;
734}
735
736static bool processReturningBlock(
737 ReturnInst *Ret, BasicBlock *&OldEntry, bool &TailCallsAreMarkedTail,
738 SmallVectorImpl<PHINode *> &ArgumentPHIs,
739 bool CannotTailCallElimCallsMarkedTail, const TargetTransformInfo *TTI,
740 AliasAnalysis *AA, OptimizationRemarkEmitter *ORE, DomTreeUpdater &DTU) {
741 CallInst *CI = findTRECandidate(Ret, CannotTailCallElimCallsMarkedTail, TTI);
20
Calling 'findTRECandidate'
742 if (!CI)
743 return false;
744
745 return eliminateRecursiveTailCall(CI, Ret, OldEntry, TailCallsAreMarkedTail,
746 ArgumentPHIs, AA, ORE, DTU);
747}
748
749static bool eliminateTailRecursion(Function &F, const TargetTransformInfo *TTI,
750 AliasAnalysis *AA,
751 OptimizationRemarkEmitter *ORE,
752 DomTreeUpdater &DTU) {
753 if (F.getFnAttribute("disable-tail-calls").getValueAsString() == "true")
2
Assuming the condition is false
3
Taking false branch
754 return false;
755
756 bool MadeChange = false;
757 bool AllCallsAreTailCalls = false;
758 MadeChange |= markTails(F, AllCallsAreTailCalls, ORE);
759 if (!AllCallsAreTailCalls)
4
Assuming 'AllCallsAreTailCalls' is true
5
Taking false branch
760 return MadeChange;
761
762 // If this function is a varargs function, we won't be able to PHI the args
763 // right, so don't even try to convert it...
764 if (F.getFunctionType()->isVarArg())
6
Calling 'FunctionType::isVarArg'
9
Returning from 'FunctionType::isVarArg'
10
Taking false branch
765 return false;
766
767 BasicBlock *OldEntry = nullptr;
768 bool TailCallsAreMarkedTail = false;
769 SmallVector<PHINode*, 8> ArgumentPHIs;
770
771 // If false, we cannot perform TRE on tail calls marked with the 'tail'
772 // attribute, because doing so would cause the stack size to increase (real
773 // TRE would deallocate variable sized allocas, TRE doesn't).
774 bool CanTRETailMarkedCall = canTRE(F);
775
776 // Change any tail recursive calls to loops.
777 //
778 // FIXME: The code generator produces really bad code when an 'escaping
779 // alloca' is changed from being a static alloca to being a dynamic alloca.
780 // Until this is resolved, disable this transformation if that would ever
781 // happen. This bug is PR962.
782 for (Function::iterator BBI = F.begin(), E = F.end(); BBI != E; /*in loop*/) {
11
Calling 'operator!='
14
Returning from 'operator!='
15
Loop condition is true. Entering loop body
783 BasicBlock *BB = &*BBI++; // foldReturnAndProcessPred may delete BB.
784 if (ReturnInst *Ret
16.1
'Ret' is non-null
16.1
'Ret' is non-null
16.1
'Ret' is non-null
16.1
'Ret' is non-null
16.1
'Ret' is non-null
= dyn_cast<ReturnInst>(BB->getTerminator())) {
16
Assuming the object is a 'ReturnInst'
17
Taking true branch
785 bool Change = processReturningBlock(Ret, OldEntry, TailCallsAreMarkedTail,
19
Calling 'processReturningBlock'
786 ArgumentPHIs, !CanTRETailMarkedCall,
18
Assuming 'CanTRETailMarkedCall' is true
787 TTI, AA, ORE, DTU);
788 if (!Change && BB->getFirstNonPHIOrDbg() == Ret)
789 Change = foldReturnAndProcessPred(
790 BB, Ret, OldEntry, TailCallsAreMarkedTail, ArgumentPHIs,
791 !CanTRETailMarkedCall, TTI, AA, ORE, DTU);
792 MadeChange |= Change;
793 }
794 }
795
796 // If we eliminated any tail recursions, it's possible that we inserted some
797 // silly PHI nodes which just merge an initial value (the incoming operand)
798 // with themselves. Check to see if we did and clean up our mess if so. This
799 // occurs when a function passes an argument straight through to its tail
800 // call.
801 for (PHINode *PN : ArgumentPHIs) {
802 // If the PHI Node is a dynamic constant, replace it with the value it is.
803 if (Value *PNV = SimplifyInstruction(PN, F.getParent()->getDataLayout())) {
804 PN->replaceAllUsesWith(PNV);
805 PN->eraseFromParent();
806 }
807 }
808
809 return MadeChange;
810}
811
812namespace {
813struct TailCallElim : public FunctionPass {
814 static char ID; // Pass identification, replacement for typeid
815 TailCallElim() : FunctionPass(ID) {
816 initializeTailCallElimPass(*PassRegistry::getPassRegistry());
817 }
818
819 void getAnalysisUsage(AnalysisUsage &AU) const override {
820 AU.addRequired<TargetTransformInfoWrapperPass>();
821 AU.addRequired<AAResultsWrapperPass>();
822 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
823 AU.addPreserved<GlobalsAAWrapperPass>();
824 AU.addPreserved<DominatorTreeWrapperPass>();
825 AU.addPreserved<PostDominatorTreeWrapperPass>();
826 }
827
828 bool runOnFunction(Function &F) override {
829 if (skipFunction(F))
830 return false;
831
832 auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
833 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
834 auto *PDTWP = getAnalysisIfAvailable<PostDominatorTreeWrapperPass>();
835 auto *PDT = PDTWP ? &PDTWP->getPostDomTree() : nullptr;
836 // There is no noticable performance difference here between Lazy and Eager
837 // UpdateStrategy based on some test results. It is feasible to switch the
838 // UpdateStrategy to Lazy if we find it profitable later.
839 DomTreeUpdater DTU(DT, PDT, DomTreeUpdater::UpdateStrategy::Eager);
840
841 return eliminateTailRecursion(
842 F, &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F),
843 &getAnalysis<AAResultsWrapperPass>().getAAResults(),
844 &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(), DTU);
845 }
846};
847}
848
849char TailCallElim::ID = 0;
850INITIALIZE_PASS_BEGIN(TailCallElim, "tailcallelim", "Tail Call Elimination",static void *initializeTailCallElimPassOnce(PassRegistry &
Registry) {
851 false, false)static void *initializeTailCallElimPassOnce(PassRegistry &
Registry) {
852INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)initializeTargetTransformInfoWrapperPassPass(Registry);
853INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)initializeOptimizationRemarkEmitterWrapperPassPass(Registry);
854INITIALIZE_PASS_END(TailCallElim, "tailcallelim", "Tail Call Elimination",PassInfo *PI = new PassInfo( "Tail Call Elimination", "tailcallelim"
, &TailCallElim::ID, PassInfo::NormalCtor_t(callDefaultCtor
<TailCallElim>), false, false); Registry.registerPass(*
PI, true); return PI; } static llvm::once_flag InitializeTailCallElimPassFlag
; void llvm::initializeTailCallElimPass(PassRegistry &Registry
) { llvm::call_once(InitializeTailCallElimPassFlag, initializeTailCallElimPassOnce
, std::ref(Registry)); }
855 false, false)PassInfo *PI = new PassInfo( "Tail Call Elimination", "tailcallelim"
, &TailCallElim::ID, PassInfo::NormalCtor_t(callDefaultCtor
<TailCallElim>), false, false); Registry.registerPass(*
PI, true); return PI; } static llvm::once_flag InitializeTailCallElimPassFlag
; void llvm::initializeTailCallElimPass(PassRegistry &Registry
) { llvm::call_once(InitializeTailCallElimPassFlag, initializeTailCallElimPassOnce
, std::ref(Registry)); }
856
857// Public interface to the TailCallElimination pass
858FunctionPass *llvm::createTailCallEliminationPass() {
859 return new TailCallElim();
860}
861
862PreservedAnalyses TailCallElimPass::run(Function &F,
863 FunctionAnalysisManager &AM) {
864
865 TargetTransformInfo &TTI = AM.getResult<TargetIRAnalysis>(F);
866 AliasAnalysis &AA = AM.getResult<AAManager>(F);
867 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
868 auto *DT = AM.getCachedResult<DominatorTreeAnalysis>(F);
869 auto *PDT = AM.getCachedResult<PostDominatorTreeAnalysis>(F);
870 // There is no noticable performance difference here between Lazy and Eager
871 // UpdateStrategy based on some test results. It is feasible to switch the
872 // UpdateStrategy to Lazy if we find it profitable later.
873 DomTreeUpdater DTU(DT, PDT, DomTreeUpdater::UpdateStrategy::Eager);
874 bool Changed = eliminateTailRecursion(F, &TTI, &AA, &ORE, DTU);
1
Calling 'eliminateTailRecursion'
875
876 if (!Changed)
877 return PreservedAnalyses::all();
878 PreservedAnalyses PA;
879 PA.preserve<GlobalsAA>();
880 PA.preserve<DominatorTreeAnalysis>();
881 PA.preserve<PostDominatorTreeAnalysis>();
882 return PA;
883}

/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/DerivedTypes.h

1//===- llvm/DerivedTypes.h - Classes for handling data types ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the declarations of classes that represent "derived
10// types". These are things like "arrays of x" or "structure of x, y, z" or
11// "function returning x taking (y,z) as parameters", etc...
12//
13// The implementations of these classes live in the Type.cpp file.
14//
15//===----------------------------------------------------------------------===//
16
17#ifndef LLVM_IR_DERIVEDTYPES_H
18#define LLVM_IR_DERIVEDTYPES_H
19
20#include "llvm/ADT/ArrayRef.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/StringRef.h"
23#include "llvm/IR/Type.h"
24#include "llvm/Support/Casting.h"
25#include "llvm/Support/Compiler.h"
26#include "llvm/Support/TypeSize.h"
27#include <cassert>
28#include <cstdint>
29
30namespace llvm {
31
32class Value;
33class APInt;
34class LLVMContext;
35
36/// Class to represent integer types. Note that this class is also used to
37/// represent the built-in integer types: Int1Ty, Int8Ty, Int16Ty, Int32Ty and
38/// Int64Ty.
39/// Integer representation type
40class IntegerType : public Type {
41 friend class LLVMContextImpl;
42
43protected:
44 explicit IntegerType(LLVMContext &C, unsigned NumBits) : Type(C, IntegerTyID){
45 setSubclassData(NumBits);
46 }
47
48public:
49 /// This enum is just used to hold constants we need for IntegerType.
50 enum {
51 MIN_INT_BITS = 1, ///< Minimum number of bits that can be specified
52 MAX_INT_BITS = (1<<24)-1 ///< Maximum number of bits that can be specified
53 ///< Note that bit width is stored in the Type classes SubclassData field
54 ///< which has 24 bits. This yields a maximum bit width of 16,777,215
55 ///< bits.
56 };
57
58 /// This static method is the primary way of constructing an IntegerType.
59 /// If an IntegerType with the same NumBits value was previously instantiated,
60 /// that instance will be returned. Otherwise a new one will be created. Only
61 /// one instance with a given NumBits value is ever created.
62 /// Get or create an IntegerType instance.
63 static IntegerType *get(LLVMContext &C, unsigned NumBits);
64
65 /// Returns type twice as wide the input type.
66 IntegerType *getExtendedType() const {
67 return Type::getIntNTy(getContext(), 2 * getScalarSizeInBits());
68 }
69
70 /// Get the number of bits in this IntegerType
71 unsigned getBitWidth() const { return getSubclassData(); }
72
73 /// Return a bitmask with ones set for all of the bits that can be set by an
74 /// unsigned version of this type. This is 0xFF for i8, 0xFFFF for i16, etc.
75 uint64_t getBitMask() const {
76 return ~uint64_t(0UL) >> (64-getBitWidth());
77 }
78
79 /// Return a uint64_t with just the most significant bit set (the sign bit, if
80 /// the value is treated as a signed number).
81 uint64_t getSignBit() const {
82 return 1ULL << (getBitWidth()-1);
83 }
84
85 /// For example, this is 0xFF for an 8 bit integer, 0xFFFF for i16, etc.
86 /// @returns a bit mask with ones set for all the bits of this type.
87 /// Get a bit mask for this type.
88 APInt getMask() const;
89
90 /// This method determines if the width of this IntegerType is a power-of-2
91 /// in terms of 8 bit bytes.
92 /// @returns true if this is a power-of-2 byte width.
93 /// Is this a power-of-2 byte-width IntegerType ?
94 bool isPowerOf2ByteWidth() const;
95
96 /// Methods for support type inquiry through isa, cast, and dyn_cast.
97 static bool classof(const Type *T) {
98 return T->getTypeID() == IntegerTyID;
99 }
100};
101
102unsigned Type::getIntegerBitWidth() const {
103 return cast<IntegerType>(this)->getBitWidth();
104}
105
106/// Class to represent function types
107///
108class FunctionType : public Type {
109 FunctionType(Type *Result, ArrayRef<Type*> Params, bool IsVarArgs);
110
111public:
112 FunctionType(const FunctionType &) = delete;
113 FunctionType &operator=(const FunctionType &) = delete;
114
115 /// This static method is the primary way of constructing a FunctionType.
116 static FunctionType *get(Type *Result,
117 ArrayRef<Type*> Params, bool isVarArg);
118
119 /// Create a FunctionType taking no parameters.
120 static FunctionType *get(Type *Result, bool isVarArg);
121
122 /// Return true if the specified type is valid as a return type.
123 static bool isValidReturnType(Type *RetTy);
124
125 /// Return true if the specified type is valid as an argument type.
126 static bool isValidArgumentType(Type *ArgTy);
127
128 bool isVarArg() const { return getSubclassData()!=0; }
7
Assuming the condition is false
8
Returning zero, which participates in a condition later
129 Type *getReturnType() const { return ContainedTys[0]; }
130
131 using param_iterator = Type::subtype_iterator;
132
133 param_iterator param_begin() const { return ContainedTys + 1; }
134 param_iterator param_end() const { return &ContainedTys[NumContainedTys]; }
135 ArrayRef<Type *> params() const {
136 return makeArrayRef(param_begin(), param_end());
137 }
138
139 /// Parameter type accessors.
140 Type *getParamType(unsigned i) const { return ContainedTys[i+1]; }
141
142 /// Return the number of fixed parameters this function type requires.
143 /// This does not consider varargs.
144 unsigned getNumParams() const { return NumContainedTys - 1; }
145
146 /// Methods for support type inquiry through isa, cast, and dyn_cast.
147 static bool classof(const Type *T) {
148 return T->getTypeID() == FunctionTyID;
149 }
150};
151static_assert(alignof(FunctionType) >= alignof(Type *),
152 "Alignment sufficient for objects appended to FunctionType");
153
154bool Type::isFunctionVarArg() const {
155 return cast<FunctionType>(this)->isVarArg();
156}
157
158Type *Type::getFunctionParamType(unsigned i) const {
159 return cast<FunctionType>(this)->getParamType(i);
160}
161
162unsigned Type::getFunctionNumParams() const {
163 return cast<FunctionType>(this)->getNumParams();
164}
165
166/// A handy container for a FunctionType+Callee-pointer pair, which can be
167/// passed around as a single entity. This assists in replacing the use of
168/// PointerType::getElementType() to access the function's type, since that's
169/// slated for removal as part of the [opaque pointer types] project.
170class FunctionCallee {
171public:
172 // Allow implicit conversion from types which have a getFunctionType member
173 // (e.g. Function and InlineAsm).
174 template <typename T, typename U = decltype(&T::getFunctionType)>
175 FunctionCallee(T *Fn)
176 : FnTy(Fn ? Fn->getFunctionType() : nullptr), Callee(Fn) {}
177
178 FunctionCallee(FunctionType *FnTy, Value *Callee)
179 : FnTy(FnTy), Callee(Callee) {
180 assert((FnTy == nullptr) == (Callee == nullptr))(((FnTy == nullptr) == (Callee == nullptr)) ? static_cast<
void> (0) : __assert_fail ("(FnTy == nullptr) == (Callee == nullptr)"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/DerivedTypes.h"
, 180, __PRETTY_FUNCTION__))
;
181 }
182
183 FunctionCallee(std::nullptr_t) {}
184
185 FunctionCallee() = default;
186
187 FunctionType *getFunctionType() { return FnTy; }
188
189 Value *getCallee() { return Callee; }
190
191 explicit operator bool() { return Callee; }
192
193private:
194 FunctionType *FnTy = nullptr;
195 Value *Callee = nullptr;
196};
197
198/// Common super class of ArrayType, StructType and VectorType.
199class CompositeType : public Type {
200protected:
201 explicit CompositeType(LLVMContext &C, TypeID tid) : Type(C, tid) {}
202
203public:
204 /// Given an index value into the type, return the type of the element.
205 Type *getTypeAtIndex(const Value *V) const;
206 Type *getTypeAtIndex(unsigned Idx) const;
207 bool indexValid(const Value *V) const;
208 bool indexValid(unsigned Idx) const;
209
210 /// Methods for support type inquiry through isa, cast, and dyn_cast.
211 static bool classof(const Type *T) {
212 return T->getTypeID() == ArrayTyID ||
213 T->getTypeID() == StructTyID ||
214 T->getTypeID() == VectorTyID;
215 }
216};
217
218/// Class to represent struct types. There are two different kinds of struct
219/// types: Literal structs and Identified structs.
220///
221/// Literal struct types (e.g. { i32, i32 }) are uniqued structurally, and must
222/// always have a body when created. You can get one of these by using one of
223/// the StructType::get() forms.
224///
225/// Identified structs (e.g. %foo or %42) may optionally have a name and are not
226/// uniqued. The names for identified structs are managed at the LLVMContext
227/// level, so there can only be a single identified struct with a given name in
228/// a particular LLVMContext. Identified structs may also optionally be opaque
229/// (have no body specified). You get one of these by using one of the
230/// StructType::create() forms.
231///
232/// Independent of what kind of struct you have, the body of a struct type are
233/// laid out in memory consecutively with the elements directly one after the
234/// other (if the struct is packed) or (if not packed) with padding between the
235/// elements as defined by DataLayout (which is required to match what the code
236/// generator for a target expects).
237///
238class StructType : public CompositeType {
239 StructType(LLVMContext &C) : CompositeType(C, StructTyID) {}
240
241 enum {
242 /// This is the contents of the SubClassData field.
243 SCDB_HasBody = 1,
244 SCDB_Packed = 2,
245 SCDB_IsLiteral = 4,
246 SCDB_IsSized = 8
247 };
248
249 /// For a named struct that actually has a name, this is a pointer to the
250 /// symbol table entry (maintained by LLVMContext) for the struct.
251 /// This is null if the type is an literal struct or if it is a identified
252 /// type that has an empty name.
253 void *SymbolTableEntry = nullptr;
254
255public:
256 StructType(const StructType &) = delete;
257 StructType &operator=(const StructType &) = delete;
258
259 /// This creates an identified struct.
260 static StructType *create(LLVMContext &Context, StringRef Name);
261 static StructType *create(LLVMContext &Context);
262
263 static StructType *create(ArrayRef<Type *> Elements, StringRef Name,
264 bool isPacked = false);
265 static StructType *create(ArrayRef<Type *> Elements);
266 static StructType *create(LLVMContext &Context, ArrayRef<Type *> Elements,
267 StringRef Name, bool isPacked = false);
268 static StructType *create(LLVMContext &Context, ArrayRef<Type *> Elements);
269 template <class... Tys>
270 static std::enable_if_t<are_base_of<Type, Tys...>::value, StructType *>
271 create(StringRef Name, Type *elt1, Tys *... elts) {
272 assert(elt1 && "Cannot create a struct type with no elements with this")((elt1 && "Cannot create a struct type with no elements with this"
) ? static_cast<void> (0) : __assert_fail ("elt1 && \"Cannot create a struct type with no elements with this\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/DerivedTypes.h"
, 272, __PRETTY_FUNCTION__))
;
273 SmallVector<llvm::Type *, 8> StructFields({elt1, elts...});
274 return create(StructFields, Name);
275 }
276
277 /// This static method is the primary way to create a literal StructType.
278 static StructType *get(LLVMContext &Context, ArrayRef<Type*> Elements,
279 bool isPacked = false);
280
281 /// Create an empty structure type.
282 static StructType *get(LLVMContext &Context, bool isPacked = false);
283
284 /// This static method is a convenience method for creating structure types by
285 /// specifying the elements as arguments. Note that this method always returns
286 /// a non-packed struct, and requires at least one element type.
287 template <class... Tys>
288 static std::enable_if_t<are_base_of<Type, Tys...>::value, StructType *>
289 get(Type *elt1, Tys *... elts) {
290 assert(elt1 && "Cannot create a struct type with no elements with this")((elt1 && "Cannot create a struct type with no elements with this"
) ? static_cast<void> (0) : __assert_fail ("elt1 && \"Cannot create a struct type with no elements with this\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/DerivedTypes.h"
, 290, __PRETTY_FUNCTION__))
;
291 LLVMContext &Ctx = elt1->getContext();
292 SmallVector<llvm::Type *, 8> StructFields({elt1, elts...});
293 return llvm::StructType::get(Ctx, StructFields);
294 }
295
296 bool isPacked() const { return (getSubclassData() & SCDB_Packed) != 0; }
297
298 /// Return true if this type is uniqued by structural equivalence, false if it
299 /// is a struct definition.
300 bool isLiteral() const { return (getSubclassData() & SCDB_IsLiteral) != 0; }
301
302 /// Return true if this is a type with an identity that has no body specified
303 /// yet. These prints as 'opaque' in .ll files.
304 bool isOpaque() const { return (getSubclassData() & SCDB_HasBody) == 0; }
305
306 /// isSized - Return true if this is a sized type.
307 bool isSized(SmallPtrSetImpl<Type *> *Visited = nullptr) const;
308
309 /// Return true if this is a named struct that has a non-empty name.
310 bool hasName() const { return SymbolTableEntry != nullptr; }
311
312 /// Return the name for this struct type if it has an identity.
313 /// This may return an empty string for an unnamed struct type. Do not call
314 /// this on an literal type.
315 StringRef getName() const;
316
317 /// Change the name of this type to the specified name, or to a name with a
318 /// suffix if there is a collision. Do not call this on an literal type.
319 void setName(StringRef Name);
320
321 /// Specify a body for an opaque identified type.
322 void setBody(ArrayRef<Type*> Elements, bool isPacked = false);
323
324 template <typename... Tys>
325 std::enable_if_t<are_base_of<Type, Tys...>::value, void>
326 setBody(Type *elt1, Tys *... elts) {
327 assert(elt1 && "Cannot create a struct type with no elements with this")((elt1 && "Cannot create a struct type with no elements with this"
) ? static_cast<void> (0) : __assert_fail ("elt1 && \"Cannot create a struct type with no elements with this\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/DerivedTypes.h"
, 327, __PRETTY_FUNCTION__))
;
328 SmallVector<llvm::Type *, 8> StructFields({elt1, elts...});
329 setBody(StructFields);
330 }
331
332 /// Return true if the specified type is valid as a element type.
333 static bool isValidElementType(Type *ElemTy);
334
335 // Iterator access to the elements.
336 using element_iterator = Type::subtype_iterator;
337
338 element_iterator element_begin() const { return ContainedTys; }
339 element_iterator element_end() const { return &ContainedTys[NumContainedTys];}
340 ArrayRef<Type *> const elements() const {
341 return makeArrayRef(element_begin(), element_end());
342 }
343
344 /// Return true if this is layout identical to the specified struct.
345 bool isLayoutIdentical(StructType *Other) const;
346
347 /// Random access to the elements
348 unsigned getNumElements() const { return NumContainedTys; }
349 Type *getElementType(unsigned N) const {
350 assert(N < NumContainedTys && "Element number out of range!")((N < NumContainedTys && "Element number out of range!"
) ? static_cast<void> (0) : __assert_fail ("N < NumContainedTys && \"Element number out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/DerivedTypes.h"
, 350, __PRETTY_FUNCTION__))
;
351 return ContainedTys[N];
352 }
353
354 /// Methods for support type inquiry through isa, cast, and dyn_cast.
355 static bool classof(const Type *T) {
356 return T->getTypeID() == StructTyID;
357 }
358};
359
360StringRef Type::getStructName() const {
361 return cast<StructType>(this)->getName();
362}
363
364unsigned Type::getStructNumElements() const {
365 return cast<StructType>(this)->getNumElements();
366}
367
368Type *Type::getStructElementType(unsigned N) const {
369 return cast<StructType>(this)->getElementType(N);
370}
371
372/// This is the superclass of the array and vector type classes. Both of these
373/// represent "arrays" in memory. The array type represents a specifically sized
374/// array, and the vector type represents a specifically sized array that allows
375/// for use of SIMD instructions. SequentialType holds the common features of
376/// both, which stem from the fact that both lay their components out in memory
377/// identically.
378class SequentialType : public CompositeType {
379 Type *ContainedType; ///< Storage for the single contained type.
380 uint64_t NumElements;
381
382protected:
383 SequentialType(TypeID TID, Type *ElType, uint64_t NumElements)
384 : CompositeType(ElType->getContext(), TID), ContainedType(ElType),
385 NumElements(NumElements) {
386 ContainedTys = &ContainedType;
387 NumContainedTys = 1;
388 }
389
390public:
391 SequentialType(const SequentialType &) = delete;
392 SequentialType &operator=(const SequentialType &) = delete;
393
394 /// For scalable vectors, this will return the minimum number of elements
395 /// in the vector.
396 uint64_t getNumElements() const { return NumElements; }
397 Type *getElementType() const { return ContainedType; }
398
399 /// Methods for support type inquiry through isa, cast, and dyn_cast.
400 static bool classof(const Type *T) {
401 return T->getTypeID() == ArrayTyID || T->getTypeID() == VectorTyID;
402 }
403};
404
405/// Class to represent array types.
406class ArrayType : public SequentialType {
407 ArrayType(Type *ElType, uint64_t NumEl);
408
409public:
410 ArrayType(const ArrayType &) = delete;
411 ArrayType &operator=(const ArrayType &) = delete;
412
413 /// This static method is the primary way to construct an ArrayType
414 static ArrayType *get(Type *ElementType, uint64_t NumElements);
415
416 /// Return true if the specified type is valid as a element type.
417 static bool isValidElementType(Type *ElemTy);
418
419 /// Methods for support type inquiry through isa, cast, and dyn_cast.
420 static bool classof(const Type *T) {
421 return T->getTypeID() == ArrayTyID;
422 }
423};
424
425uint64_t Type::getArrayNumElements() const {
426 return cast<ArrayType>(this)->getNumElements();
427}
428
429/// Class to represent vector types.
430class VectorType : public SequentialType {
431 /// A fully specified VectorType is of the form <vscale x n x Ty>. 'n' is the
432 /// minimum number of elements of type Ty contained within the vector, and
433 /// 'vscale x' indicates that the total element count is an integer multiple
434 /// of 'n', where the multiple is either guaranteed to be one, or is
435 /// statically unknown at compile time.
436 ///
437 /// If the multiple is known to be 1, then the extra term is discarded in
438 /// textual IR:
439 ///
440 /// <4 x i32> - a vector containing 4 i32s
441 /// <vscale x 4 x i32> - a vector containing an unknown integer multiple
442 /// of 4 i32s
443
444 VectorType(Type *ElType, unsigned NumEl, bool Scalable = false);
445 VectorType(Type *ElType, ElementCount EC);
446
447 // If true, the total number of elements is an unknown multiple of the
448 // minimum 'NumElements' from SequentialType. Otherwise the total number
449 // of elements is exactly equal to 'NumElements'.
450 bool Scalable;
451
452public:
453 VectorType(const VectorType &) = delete;
454 VectorType &operator=(const VectorType &) = delete;
455
456 /// This static method is the primary way to construct an VectorType.
457 static VectorType *get(Type *ElementType, ElementCount EC);
458 static VectorType *get(Type *ElementType, unsigned NumElements,
459 bool Scalable = false) {
460 return VectorType::get(ElementType, {NumElements, Scalable});
461 }
462
463 /// This static method gets a VectorType with the same number of elements as
464 /// the input type, and the element type is an integer type of the same width
465 /// as the input element type.
466 static VectorType *getInteger(VectorType *VTy) {
467 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
468 assert(EltBits && "Element size must be of a non-zero size")((EltBits && "Element size must be of a non-zero size"
) ? static_cast<void> (0) : __assert_fail ("EltBits && \"Element size must be of a non-zero size\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/DerivedTypes.h"
, 468, __PRETTY_FUNCTION__))
;
469 Type *EltTy = IntegerType::get(VTy->getContext(), EltBits);
470 return VectorType::get(EltTy, VTy->getElementCount());
471 }
472
473 /// This static method is like getInteger except that the element types are
474 /// twice as wide as the elements in the input type.
475 static VectorType *getExtendedElementVectorType(VectorType *VTy) {
476 assert(VTy->isIntOrIntVectorTy() && "VTy expected to be a vector of ints.")((VTy->isIntOrIntVectorTy() && "VTy expected to be a vector of ints."
) ? static_cast<void> (0) : __assert_fail ("VTy->isIntOrIntVectorTy() && \"VTy expected to be a vector of ints.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/DerivedTypes.h"
, 476, __PRETTY_FUNCTION__))
;
477 auto *EltTy = cast<IntegerType>(VTy->getElementType());
478 return VectorType::get(EltTy->getExtendedType(), VTy->getElementCount());
479 }
480
481 // This static method gets a VectorType with the same number of elements as
482 // the input type, and the element type is an integer or float type which
483 // is half as wide as the elements in the input type.
484 static VectorType *getTruncatedElementVectorType(VectorType *VTy) {
485 Type *EltTy;
486 if (VTy->getElementType()->isFloatingPointTy()) {
487 switch(VTy->getElementType()->getTypeID()) {
488 case DoubleTyID:
489 EltTy = Type::getFloatTy(VTy->getContext());
490 break;
491 case FloatTyID:
492 EltTy = Type::getHalfTy(VTy->getContext());
493 break;
494 default:
495 llvm_unreachable("Cannot create narrower fp vector element type")::llvm::llvm_unreachable_internal("Cannot create narrower fp vector element type"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/DerivedTypes.h"
, 495)
;
496 }
497 } else {
498 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
499 assert((EltBits & 1) == 0 &&(((EltBits & 1) == 0 && "Cannot truncate vector element with odd bit-width"
) ? static_cast<void> (0) : __assert_fail ("(EltBits & 1) == 0 && \"Cannot truncate vector element with odd bit-width\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/DerivedTypes.h"
, 500, __PRETTY_FUNCTION__))
500 "Cannot truncate vector element with odd bit-width")(((EltBits & 1) == 0 && "Cannot truncate vector element with odd bit-width"
) ? static_cast<void> (0) : __assert_fail ("(EltBits & 1) == 0 && \"Cannot truncate vector element with odd bit-width\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/DerivedTypes.h"
, 500, __PRETTY_FUNCTION__))
;
501 EltTy = IntegerType::get(VTy->getContext(), EltBits / 2);
502 }
503 return VectorType::get(EltTy, VTy->getElementCount());
504 }
505
506 // This static method returns a VectorType with a smaller number of elements
507 // of a larger type than the input element type. For example, a <16 x i8>
508 // subdivided twice would return <4 x i32>
509 static VectorType *getSubdividedVectorType(VectorType *VTy, int NumSubdivs) {
510 for (int i = 0; i < NumSubdivs; ++i) {
511 VTy = VectorType::getDoubleElementsVectorType(VTy);
512 VTy = VectorType::getTruncatedElementVectorType(VTy);
513 }
514 return VTy;
515 }
516
517 /// This static method returns a VectorType with half as many elements as the
518 /// input type and the same element type.
519 static VectorType *getHalfElementsVectorType(VectorType *VTy) {
520 auto EltCnt = VTy->getElementCount();
521 assert ((EltCnt.Min & 1) == 0 &&(((EltCnt.Min & 1) == 0 && "Cannot halve vector with odd number of elements."
) ? static_cast<void> (0) : __assert_fail ("(EltCnt.Min & 1) == 0 && \"Cannot halve vector with odd number of elements.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/DerivedTypes.h"
, 522, __PRETTY_FUNCTION__))
522 "Cannot halve vector with odd number of elements.")(((EltCnt.Min & 1) == 0 && "Cannot halve vector with odd number of elements."
) ? static_cast<void> (0) : __assert_fail ("(EltCnt.Min & 1) == 0 && \"Cannot halve vector with odd number of elements.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/DerivedTypes.h"
, 522, __PRETTY_FUNCTION__))
;
523 return VectorType::get(VTy->getElementType(), EltCnt/2);
524 }
525
526 /// This static method returns a VectorType with twice as many elements as the
527 /// input type and the same element type.
528 static VectorType *getDoubleElementsVectorType(VectorType *VTy) {
529 auto EltCnt = VTy->getElementCount();
530 assert((VTy->getNumElements() * 2ull) <= UINT_MAX &&(((VTy->getNumElements() * 2ull) <= (2147483647 *2U +1U
) && "Too many elements in vector") ? static_cast<
void> (0) : __assert_fail ("(VTy->getNumElements() * 2ull) <= UINT_MAX && \"Too many elements in vector\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/DerivedTypes.h"
, 531, __PRETTY_FUNCTION__))
531 "Too many elements in vector")(((VTy->getNumElements() * 2ull) <= (2147483647 *2U +1U
) && "Too many elements in vector") ? static_cast<
void> (0) : __assert_fail ("(VTy->getNumElements() * 2ull) <= UINT_MAX && \"Too many elements in vector\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/DerivedTypes.h"
, 531, __PRETTY_FUNCTION__))
;
532 return VectorType::get(VTy->getElementType(), EltCnt*2);
533 }
534
535 /// Return true if the specified type is valid as a element type.
536 static bool isValidElementType(Type *ElemTy);
537
538 /// Return an ElementCount instance to represent the (possibly scalable)
539 /// number of elements in the vector.
540 ElementCount getElementCount() const {
541 uint64_t MinimumEltCnt = getNumElements();
542 assert(MinimumEltCnt <= UINT_MAX && "Too many elements in vector")((MinimumEltCnt <= (2147483647 *2U +1U) && "Too many elements in vector"
) ? static_cast<void> (0) : __assert_fail ("MinimumEltCnt <= UINT_MAX && \"Too many elements in vector\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/DerivedTypes.h"
, 542, __PRETTY_FUNCTION__))
;
543 return { (unsigned)MinimumEltCnt, Scalable };
544 }
545
546 /// Returns whether or not this is a scalable vector (meaning the total
547 /// element count is a multiple of the minimum).
548 bool isScalable() const {
549 return Scalable;
550 }
551
552 /// Return the minimum number of bits in the Vector type.
553 /// Returns zero when the vector is a vector of pointers.
554 unsigned getBitWidth() const {
555 return getNumElements() * getElementType()->getPrimitiveSizeInBits();
556 }
557
558 /// Methods for support type inquiry through isa, cast, and dyn_cast.
559 static bool classof(const Type *T) {
560 return T->getTypeID() == VectorTyID;
561 }
562};
563
564unsigned Type::getVectorNumElements() const {
565 return cast<VectorType>(this)->getNumElements();
566}
567
568bool Type::getVectorIsScalable() const {
569 return cast<VectorType>(this)->isScalable();
570}
571
572ElementCount Type::getVectorElementCount() const {
573 return cast<VectorType>(this)->getElementCount();
574}
575
576/// Class to represent pointers.
577class PointerType : public Type {
578 explicit PointerType(Type *ElType, unsigned AddrSpace);
579
580 Type *PointeeTy;
581
582public:
583 PointerType(const PointerType &) = delete;
584 PointerType &operator=(const PointerType &) = delete;
585
586 /// This constructs a pointer to an object of the specified type in a numbered
587 /// address space.
588 static PointerType *get(Type *ElementType, unsigned AddressSpace);
589
590 /// This constructs a pointer to an object of the specified type in the
591 /// generic address space (address space zero).
592 static PointerType *getUnqual(Type *ElementType) {
593 return PointerType::get(ElementType, 0);
594 }
595
596 Type *getElementType() const { return PointeeTy; }
597
598 /// Return true if the specified type is valid as a element type.
599 static bool isValidElementType(Type *ElemTy);
600
601 /// Return true if we can load or store from a pointer to this type.
602 static bool isLoadableOrStorableType(Type *ElemTy);
603
604 /// Return the address space of the Pointer type.
605 inline unsigned getAddressSpace() const { return getSubclassData(); }
606
607 /// Implement support type inquiry through isa, cast, and dyn_cast.
608 static bool classof(const Type *T) {
609 return T->getTypeID() == PointerTyID;
610 }
611};
612
613Type *Type::getExtendedType() const {
614 assert(((isIntOrIntVectorTy() && "Original type expected to be a vector of integers or a scalar integer."
) ? static_cast<void> (0) : __assert_fail ("isIntOrIntVectorTy() && \"Original type expected to be a vector of integers or a scalar integer.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/DerivedTypes.h"
, 616, __PRETTY_FUNCTION__))
615 isIntOrIntVectorTy() &&((isIntOrIntVectorTy() && "Original type expected to be a vector of integers or a scalar integer."
) ? static_cast<void> (0) : __assert_fail ("isIntOrIntVectorTy() && \"Original type expected to be a vector of integers or a scalar integer.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/DerivedTypes.h"
, 616, __PRETTY_FUNCTION__))
616 "Original type expected to be a vector of integers or a scalar integer.")((isIntOrIntVectorTy() && "Original type expected to be a vector of integers or a scalar integer."
) ? static_cast<void> (0) : __assert_fail ("isIntOrIntVectorTy() && \"Original type expected to be a vector of integers or a scalar integer.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/DerivedTypes.h"
, 616, __PRETTY_FUNCTION__))
;
617 if (auto *VTy = dyn_cast<VectorType>(this))
618 return VectorType::getExtendedElementVectorType(
619 const_cast<VectorType *>(VTy));
620 return cast<IntegerType>(this)->getExtendedType();
621}
622
623Type *Type::getWithNewBitWidth(unsigned NewBitWidth) const {
624 assert(((isIntOrIntVectorTy() && "Original type expected to be a vector of integers or a scalar integer."
) ? static_cast<void> (0) : __assert_fail ("isIntOrIntVectorTy() && \"Original type expected to be a vector of integers or a scalar integer.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/DerivedTypes.h"
, 626, __PRETTY_FUNCTION__))
625 isIntOrIntVectorTy() &&((isIntOrIntVectorTy() && "Original type expected to be a vector of integers or a scalar integer."
) ? static_cast<void> (0) : __assert_fail ("isIntOrIntVectorTy() && \"Original type expected to be a vector of integers or a scalar integer.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/DerivedTypes.h"
, 626, __PRETTY_FUNCTION__))
626 "Original type expected to be a vector of integers or a scalar integer.")((isIntOrIntVectorTy() && "Original type expected to be a vector of integers or a scalar integer."
) ? static_cast<void> (0) : __assert_fail ("isIntOrIntVectorTy() && \"Original type expected to be a vector of integers or a scalar integer.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/DerivedTypes.h"
, 626, __PRETTY_FUNCTION__))
;
627 Type *NewType = getIntNTy(getContext(), NewBitWidth);
628 if (isVectorTy())
629 NewType = VectorType::get(NewType, getVectorElementCount());
630 return NewType;
631}
632
633unsigned Type::getPointerAddressSpace() const {
634 return cast<PointerType>(getScalarType())->getAddressSpace();
635}
636
637} // end namespace llvm
638
639#endif // LLVM_IR_DERIVEDTYPES_H

/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/ADT/ilist_iterator.h

1//===- llvm/ADT/ilist_iterator.h - Intrusive List Iterator ------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef LLVM_ADT_ILIST_ITERATOR_H
10#define LLVM_ADT_ILIST_ITERATOR_H
11
12#include "llvm/ADT/ilist_node.h"
13#include <cassert>
14#include <cstddef>
15#include <iterator>
16#include <type_traits>
17
18namespace llvm {
19
20namespace ilist_detail {
21
22/// Find const-correct node types.
23template <class OptionsT, bool IsConst> struct IteratorTraits;
24template <class OptionsT> struct IteratorTraits<OptionsT, false> {
25 using value_type = typename OptionsT::value_type;
26 using pointer = typename OptionsT::pointer;
27 using reference = typename OptionsT::reference;
28 using node_pointer = ilist_node_impl<OptionsT> *;
29 using node_reference = ilist_node_impl<OptionsT> &;
30};
31template <class OptionsT> struct IteratorTraits<OptionsT, true> {
32 using value_type = const typename OptionsT::value_type;
33 using pointer = typename OptionsT::const_pointer;
34 using reference = typename OptionsT::const_reference;
35 using node_pointer = const ilist_node_impl<OptionsT> *;
36 using node_reference = const ilist_node_impl<OptionsT> &;
37};
38
39template <bool IsReverse> struct IteratorHelper;
40template <> struct IteratorHelper<false> : ilist_detail::NodeAccess {
41 using Access = ilist_detail::NodeAccess;
42
43 template <class T> static void increment(T *&I) { I = Access::getNext(*I); }
44 template <class T> static void decrement(T *&I) { I = Access::getPrev(*I); }
45};
46template <> struct IteratorHelper<true> : ilist_detail::NodeAccess {
47 using Access = ilist_detail::NodeAccess;
48
49 template <class T> static void increment(T *&I) { I = Access::getPrev(*I); }
50 template <class T> static void decrement(T *&I) { I = Access::getNext(*I); }
51};
52
53} // end namespace ilist_detail
54
55/// Iterator for intrusive lists based on ilist_node.
56template <class OptionsT, bool IsReverse, bool IsConst>
57class ilist_iterator : ilist_detail::SpecificNodeAccess<OptionsT> {
58 friend ilist_iterator<OptionsT, IsReverse, !IsConst>;
59 friend ilist_iterator<OptionsT, !IsReverse, IsConst>;
60 friend ilist_iterator<OptionsT, !IsReverse, !IsConst>;
61
62 using Traits = ilist_detail::IteratorTraits<OptionsT, IsConst>;
63 using Access = ilist_detail::SpecificNodeAccess<OptionsT>;
64
65public:
66 using value_type = typename Traits::value_type;
67 using pointer = typename Traits::pointer;
68 using reference = typename Traits::reference;
69 using difference_type = ptrdiff_t;
70 using iterator_category = std::bidirectional_iterator_tag;
71 using const_pointer = typename OptionsT::const_pointer;
72 using const_reference = typename OptionsT::const_reference;
73
74private:
75 using node_pointer = typename Traits::node_pointer;
76 using node_reference = typename Traits::node_reference;
77
78 node_pointer NodePtr = nullptr;
79
80public:
81 /// Create from an ilist_node.
82 explicit ilist_iterator(node_reference N) : NodePtr(&N) {}
83
84 explicit ilist_iterator(pointer NP) : NodePtr(Access::getNodePtr(NP)) {}
85 explicit ilist_iterator(reference NR) : NodePtr(Access::getNodePtr(&NR)) {}
86 ilist_iterator() = default;
87
88 // This is templated so that we can allow constructing a const iterator from
89 // a nonconst iterator...
90 template <bool RHSIsConst>
91 ilist_iterator(const ilist_iterator<OptionsT, IsReverse, RHSIsConst> &RHS,
92 std::enable_if_t<IsConst || !RHSIsConst, void *> = nullptr)
93 : NodePtr(RHS.NodePtr) {}
94
95 // This is templated so that we can allow assigning to a const iterator from
96 // a nonconst iterator...
97 template <bool RHSIsConst>
98 std::enable_if_t<IsConst || !RHSIsConst, ilist_iterator &>
99 operator=(const ilist_iterator<OptionsT, IsReverse, RHSIsConst> &RHS) {
100 NodePtr = RHS.NodePtr;
101 return *this;
102 }
103
104 /// Explicit conversion between forward/reverse iterators.
105 ///
106 /// Translate between forward and reverse iterators without changing range
107 /// boundaries. The resulting iterator will dereference (and have a handle)
108 /// to the previous node, which is somewhat unexpected; but converting the
109 /// two endpoints in a range will give the same range in reverse.
110 ///
111 /// This matches std::reverse_iterator conversions.
112 explicit ilist_iterator(
113 const ilist_iterator<OptionsT, !IsReverse, IsConst> &RHS)
114 : ilist_iterator(++RHS.getReverse()) {}
115
116 /// Get a reverse iterator to the same node.
117 ///
118 /// Gives a reverse iterator that will dereference (and have a handle) to the
119 /// same node. Converting the endpoint iterators in a range will give a
120 /// different range; for range operations, use the explicit conversions.
121 ilist_iterator<OptionsT, !IsReverse, IsConst> getReverse() const {
122 if (NodePtr)
123 return ilist_iterator<OptionsT, !IsReverse, IsConst>(*NodePtr);
124 return ilist_iterator<OptionsT, !IsReverse, IsConst>();
125 }
126
127 /// Const-cast.
128 ilist_iterator<OptionsT, IsReverse, false> getNonConst() const {
129 if (NodePtr)
130 return ilist_iterator<OptionsT, IsReverse, false>(
131 const_cast<typename ilist_iterator<OptionsT, IsReverse,
132 false>::node_reference>(*NodePtr));
133 return ilist_iterator<OptionsT, IsReverse, false>();
134 }
135
136 // Accessors...
137 reference operator*() const {
138 assert(!NodePtr->isKnownSentinel())((!NodePtr->isKnownSentinel()) ? static_cast<void> (
0) : __assert_fail ("!NodePtr->isKnownSentinel()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/ADT/ilist_iterator.h"
, 138, __PRETTY_FUNCTION__))
;
139 return *Access::getValuePtr(NodePtr);
140 }
141 pointer operator->() const { return &operator*(); }
142
143 // Comparison operators
144 friend bool operator==(const ilist_iterator &LHS, const ilist_iterator &RHS) {
145 return LHS.NodePtr == RHS.NodePtr;
146 }
147 friend bool operator!=(const ilist_iterator &LHS, const ilist_iterator &RHS) {
148 return LHS.NodePtr != RHS.NodePtr;
12
Assuming 'LHS.NodePtr' is not equal to 'RHS.NodePtr'
13
Returning the value 1, which participates in a condition later
149 }
150
151 // Increment and decrement operators...
152 ilist_iterator &operator--() {
153 NodePtr = IsReverse ? NodePtr->getNext() : NodePtr->getPrev();
154 return *this;
155 }
156 ilist_iterator &operator++() {
157 NodePtr = IsReverse ? NodePtr->getPrev() : NodePtr->getNext();
158 return *this;
159 }
160 ilist_iterator operator--(int) {
161 ilist_iterator tmp = *this;
162 --*this;
163 return tmp;
164 }
165 ilist_iterator operator++(int) {
166 ilist_iterator tmp = *this;
167 ++*this;
168 return tmp;
169 }
170
171 /// Get the underlying ilist_node.
172 node_pointer getNodePtr() const { return static_cast<node_pointer>(NodePtr); }
173
174 /// Check for end. Only valid if ilist_sentinel_tracking<true>.
175 bool isEnd() const { return NodePtr ? NodePtr->isSentinel() : false; }
176};
177
178template <typename From> struct simplify_type;
179
180/// Allow ilist_iterators to convert into pointers to a node automatically when
181/// used by the dyn_cast, cast, isa mechanisms...
182///
183/// FIXME: remove this, since there is no implicit conversion to NodeTy.
184template <class OptionsT, bool IsConst>
185struct simplify_type<ilist_iterator<OptionsT, false, IsConst>> {
186 using iterator = ilist_iterator<OptionsT, false, IsConst>;
187 using SimpleType = typename iterator::pointer;
188
189 static SimpleType getSimplifiedValue(const iterator &Node) { return &*Node; }
190};
191template <class OptionsT, bool IsConst>
192struct simplify_type<const ilist_iterator<OptionsT, false, IsConst>>
193 : simplify_type<ilist_iterator<OptionsT, false, IsConst>> {};
194
195} // end namespace llvm
196
197#endif // LLVM_ADT_ILIST_ITERATOR_H

/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/Support/Casting.h

1//===- llvm/Support/Casting.h - Allow flexible, checked, casts --*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the isa<X>(), cast<X>(), dyn_cast<X>(), cast_or_null<X>(),
10// and dyn_cast_or_null<X>() templates.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_SUPPORT_CASTING_H
15#define LLVM_SUPPORT_CASTING_H
16
17#include "llvm/Support/Compiler.h"
18#include "llvm/Support/type_traits.h"
19#include <cassert>
20#include <memory>
21#include <type_traits>
22
23namespace llvm {
24
25//===----------------------------------------------------------------------===//
26// isa<x> Support Templates
27//===----------------------------------------------------------------------===//
28
29// Define a template that can be specialized by smart pointers to reflect the
30// fact that they are automatically dereferenced, and are not involved with the
31// template selection process... the default implementation is a noop.
32//
33template<typename From> struct simplify_type {
34 using SimpleType = From; // The real type this represents...
35
36 // An accessor to get the real value...
37 static SimpleType &getSimplifiedValue(From &Val) { return Val; }
38};
39
40template<typename From> struct simplify_type<const From> {
41 using NonConstSimpleType = typename simplify_type<From>::SimpleType;
42 using SimpleType =
43 typename add_const_past_pointer<NonConstSimpleType>::type;
44 using RetType =
45 typename add_lvalue_reference_if_not_pointer<SimpleType>::type;
46
47 static RetType getSimplifiedValue(const From& Val) {
48 return simplify_type<From>::getSimplifiedValue(const_cast<From&>(Val));
49 }
50};
51
52// The core of the implementation of isa<X> is here; To and From should be
53// the names of classes. This template can be specialized to customize the
54// implementation of isa<> without rewriting it from scratch.
55template <typename To, typename From, typename Enabler = void>
56struct isa_impl {
57 static inline bool doit(const From &Val) {
58 return To::classof(&Val);
59 }
60};
61
62/// Always allow upcasts, and perform no dynamic check for them.
63template <typename To, typename From>
64struct isa_impl<To, From, std::enable_if_t<std::is_base_of<To, From>::value>> {
65 static inline bool doit(const From &) { return true; }
66};
67
68template <typename To, typename From> struct isa_impl_cl {
69 static inline bool doit(const From &Val) {
70 return isa_impl<To, From>::doit(Val);
71 }
72};
73
74template <typename To, typename From> struct isa_impl_cl<To, const From> {
75 static inline bool doit(const From &Val) {
76 return isa_impl<To, From>::doit(Val);
77 }
78};
79
80template <typename To, typename From>
81struct isa_impl_cl<To, const std::unique_ptr<From>> {
82 static inline bool doit(const std::unique_ptr<From> &Val) {
83 assert(Val && "isa<> used on a null pointer")((Val && "isa<> used on a null pointer") ? static_cast
<void> (0) : __assert_fail ("Val && \"isa<> used on a null pointer\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/Support/Casting.h"
, 83, __PRETTY_FUNCTION__))
;
84 return isa_impl_cl<To, From>::doit(*Val);
85 }
86};
87
88template <typename To, typename From> struct isa_impl_cl<To, From*> {
89 static inline bool doit(const From *Val) {
90 assert(Val && "isa<> used on a null pointer")((Val && "isa<> used on a null pointer") ? static_cast
<void> (0) : __assert_fail ("Val && \"isa<> used on a null pointer\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/Support/Casting.h"
, 90, __PRETTY_FUNCTION__))
;
91 return isa_impl<To, From>::doit(*Val);
92 }
93};
94
95template <typename To, typename From> struct isa_impl_cl<To, From*const> {
96 static inline bool doit(const From *Val) {
97 assert(Val && "isa<> used on a null pointer")((Val && "isa<> used on a null pointer") ? static_cast
<void> (0) : __assert_fail ("Val && \"isa<> used on a null pointer\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/Support/Casting.h"
, 97, __PRETTY_FUNCTION__))
;
98 return isa_impl<To, From>::doit(*Val);
99 }
100};
101
102template <typename To, typename From> struct isa_impl_cl<To, const From*> {
103 static inline bool doit(const From *Val) {
104 assert(Val && "isa<> used on a null pointer")((Val && "isa<> used on a null pointer") ? static_cast
<void> (0) : __assert_fail ("Val && \"isa<> used on a null pointer\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/Support/Casting.h"
, 104, __PRETTY_FUNCTION__))
;
105 return isa_impl<To, From>::doit(*Val);
106 }
107};
108
109template <typename To, typename From> struct isa_impl_cl<To, const From*const> {
110 static inline bool doit(const From *Val) {
111 assert(Val && "isa<> used on a null pointer")((Val && "isa<> used on a null pointer") ? static_cast
<void> (0) : __assert_fail ("Val && \"isa<> used on a null pointer\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/Support/Casting.h"
, 111, __PRETTY_FUNCTION__))
;
112 return isa_impl<To, From>::doit(*Val);
113 }
114};
115
116template<typename To, typename From, typename SimpleFrom>
117struct isa_impl_wrap {
118 // When From != SimplifiedType, we can simplify the type some more by using
119 // the simplify_type template.
120 static bool doit(const From &Val) {
121 return isa_impl_wrap<To, SimpleFrom,
122 typename simplify_type<SimpleFrom>::SimpleType>::doit(
123 simplify_type<const From>::getSimplifiedValue(Val));
124 }
125};
126
127template<typename To, typename FromTy>
128struct isa_impl_wrap<To, FromTy, FromTy> {
129 // When From == SimpleType, we are as simple as we are going to get.
130 static bool doit(const FromTy &Val) {
131 return isa_impl_cl<To,FromTy>::doit(Val);
132 }
133};
134
135// isa<X> - Return true if the parameter to the template is an instance of the
136// template type argument. Used like this:
137//
138// if (isa<Type>(myVal)) { ... }
139//
140template <class X, class Y> LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isa(const Y &Val) {
141 return isa_impl_wrap<X, const Y,
142 typename simplify_type<const Y>::SimpleType>::doit(Val);
143}
144
145// isa_and_nonnull<X> - Functionally identical to isa, except that a null value
146// is accepted.
147//
148template <class X, class Y>
149LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isa_and_nonnull(const Y &Val) {
150 if (!Val)
151 return false;
152 return isa<X>(Val);
153}
154
155//===----------------------------------------------------------------------===//
156// cast<x> Support Templates
157//===----------------------------------------------------------------------===//
158
159template<class To, class From> struct cast_retty;
160
161// Calculate what type the 'cast' function should return, based on a requested
162// type of To and a source type of From.
163template<class To, class From> struct cast_retty_impl {
164 using ret_type = To &; // Normal case, return Ty&
165};
166template<class To, class From> struct cast_retty_impl<To, const From> {
167 using ret_type = const To &; // Normal case, return Ty&
168};
169
170template<class To, class From> struct cast_retty_impl<To, From*> {
171 using ret_type = To *; // Pointer arg case, return Ty*
172};
173
174template<class To, class From> struct cast_retty_impl<To, const From*> {
175 using ret_type = const To *; // Constant pointer arg case, return const Ty*
176};
177
178template<class To, class From> struct cast_retty_impl<To, const From*const> {
179 using ret_type = const To *; // Constant pointer arg case, return const Ty*
180};
181
182template <class To, class From>
183struct cast_retty_impl<To, std::unique_ptr<From>> {
184private:
185 using PointerType = typename cast_retty_impl<To, From *>::ret_type;
186 using ResultType = std::remove_pointer_t<PointerType>;
187
188public:
189 using ret_type = std::unique_ptr<ResultType>;
190};
191
192template<class To, class From, class SimpleFrom>
193struct cast_retty_wrap {
194 // When the simplified type and the from type are not the same, use the type
195 // simplifier to reduce the type, then reuse cast_retty_impl to get the
196 // resultant type.
197 using ret_type = typename cast_retty<To, SimpleFrom>::ret_type;
198};
199
200template<class To, class FromTy>
201struct cast_retty_wrap<To, FromTy, FromTy> {
202 // When the simplified type is equal to the from type, use it directly.
203 using ret_type = typename cast_retty_impl<To,FromTy>::ret_type;
204};
205
206template<class To, class From>
207struct cast_retty {
208 using ret_type = typename cast_retty_wrap<
209 To, From, typename simplify_type<From>::SimpleType>::ret_type;
210};
211
212// Ensure the non-simple values are converted using the simplify_type template
213// that may be specialized by smart pointers...
214//
215template<class To, class From, class SimpleFrom> struct cast_convert_val {
216 // This is not a simple type, use the template to simplify it...
217 static typename cast_retty<To, From>::ret_type doit(From &Val) {
218 return cast_convert_val<To, SimpleFrom,
219 typename simplify_type<SimpleFrom>::SimpleType>::doit(
220 simplify_type<From>::getSimplifiedValue(Val));
221 }
222};
223
224template<class To, class FromTy> struct cast_convert_val<To,FromTy,FromTy> {
225 // This _is_ a simple type, just cast it.
226 static typename cast_retty<To, FromTy>::ret_type doit(const FromTy &Val) {
227 typename cast_retty<To, FromTy>::ret_type Res2
228 = (typename cast_retty<To, FromTy>::ret_type)const_cast<FromTy&>(Val);
229 return Res2;
230 }
231};
232
233template <class X> struct is_simple_type {
234 static const bool value =
235 std::is_same<X, typename simplify_type<X>::SimpleType>::value;
236};
237
238// cast<X> - Return the argument parameter cast to the specified type. This
239// casting operator asserts that the type is correct, so it does not return null
240// on failure. It does not allow a null argument (use cast_or_null for that).
241// It is typically used like this:
242//
243// cast<Instruction>(myVal)->getParent()
244//
245template <class X, class Y>
246inline std::enable_if_t<!is_simple_type<Y>::value,
247 typename cast_retty<X, const Y>::ret_type>
248cast(const Y &Val) {
249 assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast<Ty>() argument of incompatible type!"
) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/Support/Casting.h"
, 249, __PRETTY_FUNCTION__))
;
250 return cast_convert_val<
251 X, const Y, typename simplify_type<const Y>::SimpleType>::doit(Val);
252}
253
254template <class X, class Y>
255inline typename cast_retty<X, Y>::ret_type cast(Y &Val) {
256 assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast<Ty>() argument of incompatible type!"
) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/Support/Casting.h"
, 256, __PRETTY_FUNCTION__))
;
257 return cast_convert_val<X, Y,
258 typename simplify_type<Y>::SimpleType>::doit(Val);
259}
260
261template <class X, class Y>
262inline typename cast_retty<X, Y *>::ret_type cast(Y *Val) {
263 assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast<Ty>() argument of incompatible type!"
) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/Support/Casting.h"
, 263, __PRETTY_FUNCTION__))
;
264 return cast_convert_val<X, Y*,
265 typename simplify_type<Y*>::SimpleType>::doit(Val);
266}
267
268template <class X, class Y>
269inline typename cast_retty<X, std::unique_ptr<Y>>::ret_type
270cast(std::unique_ptr<Y> &&Val) {
271 assert(isa<X>(Val.get()) && "cast<Ty>() argument of incompatible type!")((isa<X>(Val.get()) && "cast<Ty>() argument of incompatible type!"
) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val.get()) && \"cast<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/Support/Casting.h"
, 271, __PRETTY_FUNCTION__))
;
272 using ret_type = typename cast_retty<X, std::unique_ptr<Y>>::ret_type;
273 return ret_type(
274 cast_convert_val<X, Y *, typename simplify_type<Y *>::SimpleType>::doit(
275 Val.release()));
276}
277
278// cast_or_null<X> - Functionally identical to cast, except that a null value is
279// accepted.
280//
281template <class X, class Y>
282LLVM_NODISCARD[[clang::warn_unused_result]] inline std::enable_if_t<
283 !is_simple_type<Y>::value, typename cast_retty<X, const Y>::ret_type>
284cast_or_null(const Y &Val) {
285 if (!Val)
286 return nullptr;
287 assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!"
) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast_or_null<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/Support/Casting.h"
, 287, __PRETTY_FUNCTION__))
;
288 return cast<X>(Val);
289}
290
291template <class X, class Y>
292LLVM_NODISCARD[[clang::warn_unused_result]] inline std::enable_if_t<!is_simple_type<Y>::value,
293 typename cast_retty<X, Y>::ret_type>
294cast_or_null(Y &Val) {
295 if (!Val)
296 return nullptr;
297 assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!"
) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast_or_null<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/Support/Casting.h"
, 297, __PRETTY_FUNCTION__))
;
298 return cast<X>(Val);
299}
300
301template <class X, class Y>
302LLVM_NODISCARD[[clang::warn_unused_result]] inline typename cast_retty<X, Y *>::ret_type
303cast_or_null(Y *Val) {
304 if (!Val) return nullptr;
305 assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!"
) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast_or_null<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/Support/Casting.h"
, 305, __PRETTY_FUNCTION__))
;
306 return cast<X>(Val);
307}
308
309template <class X, class Y>
310inline typename cast_retty<X, std::unique_ptr<Y>>::ret_type
311cast_or_null(std::unique_ptr<Y> &&Val) {
312 if (!Val)
313 return nullptr;
314 return cast<X>(std::move(Val));
315}
316
317// dyn_cast<X> - Return the argument parameter cast to the specified type. This
318// casting operator returns null if the argument is of the wrong type, so it can
319// be used to test for a type as well as cast if successful. This should be
320// used in the context of an if statement like this:
321//
322// if (const Instruction *I = dyn_cast<Instruction>(myVal)) { ... }
323//
324
325template <class X, class Y>
326LLVM_NODISCARD[[clang::warn_unused_result]] inline std::enable_if_t<
327 !is_simple_type<Y>::value, typename cast_retty<X, const Y>::ret_type>
328dyn_cast(const Y &Val) {
329 return isa<X>(Val) ? cast<X>(Val) : nullptr;
330}
331
332template <class X, class Y>
333LLVM_NODISCARD[[clang::warn_unused_result]] inline typename cast_retty<X, Y>::ret_type dyn_cast(Y &Val) {
334 return isa<X>(Val) ? cast<X>(Val) : nullptr;
26
Assuming 'Val' is a 'CallInst'
27
'?' condition is true
28
Returning pointer, which participates in a condition later
335}
336
337template <class X, class Y>
338LLVM_NODISCARD[[clang::warn_unused_result]] inline typename cast_retty<X, Y *>::ret_type dyn_cast(Y *Val) {
339 return isa<X>(Val) ? cast<X>(Val) : nullptr;
340}
341
342// dyn_cast_or_null<X> - Functionally identical to dyn_cast, except that a null
343// value is accepted.
344//
345template <class X, class Y>
346LLVM_NODISCARD[[clang::warn_unused_result]] inline std::enable_if_t<
347 !is_simple_type<Y>::value, typename cast_retty<X, const Y>::ret_type>
348dyn_cast_or_null(const Y &Val) {
349 return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
350}
351
352template <class X, class Y>
353LLVM_NODISCARD[[clang::warn_unused_result]] inline std::enable_if_t<!is_simple_type<Y>::value,
354 typename cast_retty<X, Y>::ret_type>
355dyn_cast_or_null(Y &Val) {
356 return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
357}
358
359template <class X, class Y>
360LLVM_NODISCARD[[clang::warn_unused_result]] inline typename cast_retty<X, Y *>::ret_type
361dyn_cast_or_null(Y *Val) {
362 return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
363}
364
365// unique_dyn_cast<X> - Given a unique_ptr<Y>, try to return a unique_ptr<X>,
366// taking ownership of the input pointer iff isa<X>(Val) is true. If the
367// cast is successful, From refers to nullptr on exit and the casted value
368// is returned. If the cast is unsuccessful, the function returns nullptr
369// and From is unchanged.
370template <class X, class Y>
371LLVM_NODISCARD[[clang::warn_unused_result]] inline auto unique_dyn_cast(std::unique_ptr<Y> &Val)
372 -> decltype(cast<X>(Val)) {
373 if (!isa<X>(Val))
374 return nullptr;
375 return cast<X>(std::move(Val));
376}
377
378template <class X, class Y>
379LLVM_NODISCARD[[clang::warn_unused_result]] inline auto unique_dyn_cast(std::unique_ptr<Y> &&Val) {
380 return unique_dyn_cast<X, Y>(Val);
381}
382
383// dyn_cast_or_null<X> - Functionally identical to unique_dyn_cast, except that
384// a null value is accepted.
385template <class X, class Y>
386LLVM_NODISCARD[[clang::warn_unused_result]] inline auto unique_dyn_cast_or_null(std::unique_ptr<Y> &Val)
387 -> decltype(cast<X>(Val)) {
388 if (!Val)
389 return nullptr;
390 return unique_dyn_cast<X, Y>(Val);
391}
392
393template <class X, class Y>
394LLVM_NODISCARD[[clang::warn_unused_result]] inline auto unique_dyn_cast_or_null(std::unique_ptr<Y> &&Val) {
395 return unique_dyn_cast_or_null<X, Y>(Val);
396}
397
398} // end namespace llvm
399
400#endif // LLVM_SUPPORT_CASTING_H

/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/ADT/StringRef.h"
23#include "llvm/ADT/Twine.h"
24#include "llvm/ADT/iterator.h"
25#include "llvm/ADT/iterator_range.h"
26#include "llvm/IR/Attributes.h"
27#include "llvm/IR/BasicBlock.h"
28#include "llvm/IR/CallingConv.h"
29#include "llvm/IR/Constant.h"
30#include "llvm/IR/DerivedTypes.h"
31#include "llvm/IR/Function.h"
32#include "llvm/IR/InstrTypes.h"
33#include "llvm/IR/Instruction.h"
34#include "llvm/IR/OperandTraits.h"
35#include "llvm/IR/Type.h"
36#include "llvm/IR/Use.h"
37#include "llvm/IR/User.h"
38#include "llvm/IR/Value.h"
39#include "llvm/Support/AtomicOrdering.h"
40#include "llvm/Support/Casting.h"
41#include "llvm/Support/ErrorHandling.h"
42#include <cassert>
43#include <cstddef>
44#include <cstdint>
45#include <iterator>
46
47namespace llvm {
48
49class APInt;
50class ConstantInt;
51class DataLayout;
52class LLVMContext;
53
54//===----------------------------------------------------------------------===//
55// AllocaInst Class
56//===----------------------------------------------------------------------===//
57
58/// an instruction to allocate memory on the stack
59class AllocaInst : public UnaryInstruction {
60 Type *AllocatedType;
61
62protected:
63 // Note: Instruction needs to be a friend here to call cloneImpl.
64 friend class Instruction;
65
66 AllocaInst *cloneImpl() const;
67
68public:
69 explicit AllocaInst(Type *Ty, unsigned AddrSpace,
70 Value *ArraySize = nullptr,
71 const Twine &Name = "",
72 Instruction *InsertBefore = nullptr);
73 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
74 const Twine &Name, BasicBlock *InsertAtEnd);
75
76 AllocaInst(Type *Ty, unsigned AddrSpace,
77 const Twine &Name, Instruction *InsertBefore = nullptr);
78 AllocaInst(Type *Ty, unsigned AddrSpace,
79 const Twine &Name, BasicBlock *InsertAtEnd);
80
81 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, MaybeAlign Align,
82 const Twine &Name = "", Instruction *InsertBefore = nullptr);
83 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, MaybeAlign Align,
84 const Twine &Name, BasicBlock *InsertAtEnd);
85
86 /// Return true if there is an allocation size parameter to the allocation
87 /// instruction that is not 1.
88 bool isArrayAllocation() const;
89
90 /// Get the number of elements allocated. For a simple allocation of a single
91 /// element, this will return a constant 1 value.
92 const Value *getArraySize() const { return getOperand(0); }
93 Value *getArraySize() { return getOperand(0); }
94
95 /// Overload to return most specific pointer type.
96 PointerType *getType() const {
97 return cast<PointerType>(Instruction::getType());
98 }
99
100 /// Get allocation size in bits. Returns None if size can't be determined,
101 /// e.g. in case of a VLA.
102 Optional<uint64_t> getAllocationSizeInBits(const DataLayout &DL) const;
103
104 /// Return the type that is being allocated by the instruction.
105 Type *getAllocatedType() const { return AllocatedType; }
106 /// for use only in special circumstances that need to generically
107 /// transform a whole instruction (eg: IR linking and vectorization).
108 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
109
110 /// Return the alignment of the memory that is being allocated by the
111 /// instruction.
112 MaybeAlign getAlign() const {
113 return decodeMaybeAlign(getSubclassDataFromInstruction() & 31);
114 }
115 // FIXME: Remove this one transition to Align is over.
116 unsigned getAlignment() const {
117 if (const auto MA = getAlign())
118 return MA->value();
119 return 0;
120 }
121 void setAlignment(MaybeAlign Align);
122
123 /// Return true if this alloca is in the entry block of the function and is a
124 /// constant size. If so, the code generator will fold it into the
125 /// prolog/epilog code, so it is basically free.
126 bool isStaticAlloca() const;
127
128 /// Return true if this alloca is used as an inalloca argument to a call. Such
129 /// allocas are never considered static even if they are in the entry block.
130 bool isUsedWithInAlloca() const {
131 return getSubclassDataFromInstruction() & 32;
132 }
133
134 /// Specify whether this alloca is used to represent the arguments to a call.
135 void setUsedWithInAlloca(bool V) {
136 setInstructionSubclassData((getSubclassDataFromInstruction() & ~32) |
137 (V ? 32 : 0));
138 }
139
140 /// Return true if this alloca is used as a swifterror argument to a call.
141 bool isSwiftError() const {
142 return getSubclassDataFromInstruction() & 64;
143 }
144
145 /// Specify whether this alloca is used to represent a swifterror.
146 void setSwiftError(bool V) {
147 setInstructionSubclassData((getSubclassDataFromInstruction() & ~64) |
148 (V ? 64 : 0));
149 }
150
151 // Methods for support type inquiry through isa, cast, and dyn_cast:
152 static bool classof(const Instruction *I) {
153 return (I->getOpcode() == Instruction::Alloca);
154 }
155 static bool classof(const Value *V) {
156 return isa<Instruction>(V) && classof(cast<Instruction>(V));
157 }
158
159private:
160 // Shadow Instruction::setInstructionSubclassData with a private forwarding
161 // method so that subclasses cannot accidentally use it.
162 void setInstructionSubclassData(unsigned short D) {
163 Instruction::setInstructionSubclassData(D);
164 }
165};
166
167//===----------------------------------------------------------------------===//
168// LoadInst Class
169//===----------------------------------------------------------------------===//
170
171/// An instruction for reading from memory. This uses the SubclassData field in
172/// Value to store whether or not the load is volatile.
173class LoadInst : public UnaryInstruction {
174 void AssertOK();
175
176protected:
177 // Note: Instruction needs to be a friend here to call cloneImpl.
178 friend class Instruction;
179
180 LoadInst *cloneImpl() const;
181
182public:
183 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr = "",
184 Instruction *InsertBefore = nullptr);
185 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
186 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
187 Instruction *InsertBefore = nullptr);
188 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
189 BasicBlock *InsertAtEnd);
190 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
191 MaybeAlign Align, Instruction *InsertBefore = nullptr);
192 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
193 MaybeAlign Align, BasicBlock *InsertAtEnd);
194 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
195 MaybeAlign Align, AtomicOrdering Order,
196 SyncScope::ID SSID = SyncScope::System,
197 Instruction *InsertBefore = nullptr);
198 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
199 MaybeAlign Align, AtomicOrdering Order, SyncScope::ID SSID,
200 BasicBlock *InsertAtEnd);
201
202 // Deprecated [opaque pointer types]
203 explicit LoadInst(Value *Ptr, const Twine &NameStr = "",
204 Instruction *InsertBefore = nullptr)
205 : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
206 InsertBefore) {}
207 LoadInst(Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd)
208 : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
209 InsertAtEnd) {}
210 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
211 Instruction *InsertBefore = nullptr)
212 : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
213 isVolatile, InsertBefore) {}
214 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
215 BasicBlock *InsertAtEnd)
216 : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
217 isVolatile, InsertAtEnd) {}
218 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, MaybeAlign Align,
219 Instruction *InsertBefore = nullptr)
220 : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
221 isVolatile, Align, InsertBefore) {}
222 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, MaybeAlign Align,
223 BasicBlock *InsertAtEnd)
224 : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
225 isVolatile, Align, InsertAtEnd) {}
226 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, MaybeAlign Align,
227 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
228 Instruction *InsertBefore = nullptr)
229 : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
230 isVolatile, Align, Order, SSID, InsertBefore) {}
231 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, MaybeAlign Align,
232 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd)
233 : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
234 isVolatile, Align, Order, SSID, InsertAtEnd) {}
235
236 /// Return true if this is a load from a volatile memory location.
237 bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
238
239 /// Specify whether this is a volatile load or not.
240 void setVolatile(bool V) {
241 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
242 (V ? 1 : 0));
243 }
244
245 /// Return the alignment of the access that is being performed.
246 /// FIXME: Remove this function once transition to Align is over.
247 /// Use getAlign() instead.
248 unsigned getAlignment() const {
249 if (const auto MA = getAlign())
250 return MA->value();
251 return 0;
252 }
253
254 /// Return the alignment of the access that is being performed.
255 MaybeAlign getAlign() const {
256 return decodeMaybeAlign((getSubclassDataFromInstruction() >> 1) & 31);
257 }
258
259 void setAlignment(MaybeAlign Alignment);
260
261 /// Returns the ordering constraint of this load instruction.
262 AtomicOrdering getOrdering() const {
263 return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
264 }
265
266 /// Sets the ordering constraint of this load instruction. May not be Release
267 /// or AcquireRelease.
268 void setOrdering(AtomicOrdering Ordering) {
269 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
270 ((unsigned)Ordering << 7));
271 }
272
273 /// Returns the synchronization scope ID of this load instruction.
274 SyncScope::ID getSyncScopeID() const {
275 return SSID;
276 }
277
278 /// Sets the synchronization scope ID of this load instruction.
279 void setSyncScopeID(SyncScope::ID SSID) {
280 this->SSID = SSID;
281 }
282
283 /// Sets the ordering constraint and the synchronization scope ID of this load
284 /// instruction.
285 void setAtomic(AtomicOrdering Ordering,
286 SyncScope::ID SSID = SyncScope::System) {
287 setOrdering(Ordering);
288 setSyncScopeID(SSID);
289 }
290
291 bool isSimple() const { return !isAtomic() && !isVolatile(); }
292
293 bool isUnordered() const {
294 return (getOrdering() == AtomicOrdering::NotAtomic ||
295 getOrdering() == AtomicOrdering::Unordered) &&
296 !isVolatile();
297 }
298
299 Value *getPointerOperand() { return getOperand(0); }
300 const Value *getPointerOperand() const { return getOperand(0); }
301 static unsigned getPointerOperandIndex() { return 0U; }
302 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
303
304 /// Returns the address space of the pointer operand.
305 unsigned getPointerAddressSpace() const {
306 return getPointerOperandType()->getPointerAddressSpace();
307 }
308
309 // Methods for support type inquiry through isa, cast, and dyn_cast:
310 static bool classof(const Instruction *I) {
311 return I->getOpcode() == Instruction::Load;
312 }
313 static bool classof(const Value *V) {
314 return isa<Instruction>(V) && classof(cast<Instruction>(V));
315 }
316
317private:
318 // Shadow Instruction::setInstructionSubclassData with a private forwarding
319 // method so that subclasses cannot accidentally use it.
320 void setInstructionSubclassData(unsigned short D) {
321 Instruction::setInstructionSubclassData(D);
322 }
323
324 /// The synchronization scope ID of this load instruction. Not quite enough
325 /// room in SubClassData for everything, so synchronization scope ID gets its
326 /// own field.
327 SyncScope::ID SSID;
328};
329
330//===----------------------------------------------------------------------===//
331// StoreInst Class
332//===----------------------------------------------------------------------===//
333
334/// An instruction for storing to memory.
335class StoreInst : public Instruction {
336 void AssertOK();
337
338protected:
339 // Note: Instruction needs to be a friend here to call cloneImpl.
340 friend class Instruction;
341
342 StoreInst *cloneImpl() const;
343
344public:
345 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
346 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
347 StoreInst(Value *Val, Value *Ptr, bool isVolatile = false,
348 Instruction *InsertBefore = nullptr);
349 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
350 StoreInst(Value *Val, Value *Ptr, bool isVolatile, MaybeAlign Align,
351 Instruction *InsertBefore = nullptr);
352 StoreInst(Value *Val, Value *Ptr, bool isVolatile, MaybeAlign Align,
353 BasicBlock *InsertAtEnd);
354 StoreInst(Value *Val, Value *Ptr, bool isVolatile, MaybeAlign Align,
355 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
356 Instruction *InsertBefore = nullptr);
357 StoreInst(Value *Val, Value *Ptr, bool isVolatile, MaybeAlign Align,
358 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
359
360 // allocate space for exactly two operands
361 void *operator new(size_t s) {
362 return User::operator new(s, 2);
363 }
364
365 /// Return true if this is a store to a volatile memory location.
366 bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
367
368 /// Specify whether this is a volatile store or not.
369 void setVolatile(bool V) {
370 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
371 (V ? 1 : 0));
372 }
373
374 /// Transparently provide more efficient getOperand methods.
375 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
376
377 /// Return the alignment of the access that is being performed
378 /// FIXME: Remove this function once transition to Align is over.
379 /// Use getAlign() instead.
380 unsigned getAlignment() const {
381 if (const auto MA = getAlign())
382 return MA->value();
383 return 0;
384 }
385
386 MaybeAlign getAlign() const {
387 return decodeMaybeAlign((getSubclassDataFromInstruction() >> 1) & 31);
388 }
389
390 void setAlignment(MaybeAlign Alignment);
391
392 /// Returns the ordering constraint of this store instruction.
393 AtomicOrdering getOrdering() const {
394 return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
395 }
396
397 /// Sets the ordering constraint of this store instruction. May not be
398 /// Acquire or AcquireRelease.
399 void setOrdering(AtomicOrdering Ordering) {
400 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
401 ((unsigned)Ordering << 7));
402 }
403
404 /// Returns the synchronization scope ID of this store instruction.
405 SyncScope::ID getSyncScopeID() const {
406 return SSID;
407 }
408
409 /// Sets the synchronization scope ID of this store instruction.
410 void setSyncScopeID(SyncScope::ID SSID) {
411 this->SSID = SSID;
412 }
413
414 /// Sets the ordering constraint and the synchronization scope ID of this
415 /// store instruction.
416 void setAtomic(AtomicOrdering Ordering,
417 SyncScope::ID SSID = SyncScope::System) {
418 setOrdering(Ordering);
419 setSyncScopeID(SSID);
420 }
421
422 bool isSimple() const { return !isAtomic() && !isVolatile(); }
423
424 bool isUnordered() const {
425 return (getOrdering() == AtomicOrdering::NotAtomic ||
426 getOrdering() == AtomicOrdering::Unordered) &&
427 !isVolatile();
428 }
429
430 Value *getValueOperand() { return getOperand(0); }
431 const Value *getValueOperand() const { return getOperand(0); }
432
433 Value *getPointerOperand() { return getOperand(1); }
434 const Value *getPointerOperand() const { return getOperand(1); }
435 static unsigned getPointerOperandIndex() { return 1U; }
436 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
437
438 /// Returns the address space of the pointer operand.
439 unsigned getPointerAddressSpace() const {
440 return getPointerOperandType()->getPointerAddressSpace();
441 }
442
443 // Methods for support type inquiry through isa, cast, and dyn_cast:
444 static bool classof(const Instruction *I) {
445 return I->getOpcode() == Instruction::Store;
446 }
447 static bool classof(const Value *V) {
448 return isa<Instruction>(V) && classof(cast<Instruction>(V));
449 }
450
451private:
452 // Shadow Instruction::setInstructionSubclassData with a private forwarding
453 // method so that subclasses cannot accidentally use it.
454 void setInstructionSubclassData(unsigned short D) {
455 Instruction::setInstructionSubclassData(D);
456 }
457
458 /// The synchronization scope ID of this store instruction. Not quite enough
459 /// room in SubClassData for everything, so synchronization scope ID gets its
460 /// own field.
461 SyncScope::ID SSID;
462};
463
464template <>
465struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
466};
467
468DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { ((i_nocapture < OperandTraits
<StoreInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 468, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<StoreInst>::op_begin(const_cast<StoreInst
*>(this))[i_nocapture].get()); } void StoreInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<StoreInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 468, __PRETTY_FUNCTION__)); OperandTraits<StoreInst>::
op_begin(this)[i_nocapture] = Val_nocapture; } unsigned StoreInst
::getNumOperands() const { return OperandTraits<StoreInst>
::operands(this); } template <int Idx_nocapture> Use &
StoreInst::Op() { return this->OpFrom<Idx_nocapture>
(this); } template <int Idx_nocapture> const Use &StoreInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
469
470//===----------------------------------------------------------------------===//
471// FenceInst Class
472//===----------------------------------------------------------------------===//
473
474/// An instruction for ordering other memory operations.
475class FenceInst : public Instruction {
476 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
477
478protected:
479 // Note: Instruction needs to be a friend here to call cloneImpl.
480 friend class Instruction;
481
482 FenceInst *cloneImpl() const;
483
484public:
485 // Ordering may only be Acquire, Release, AcquireRelease, or
486 // SequentiallyConsistent.
487 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
488 SyncScope::ID SSID = SyncScope::System,
489 Instruction *InsertBefore = nullptr);
490 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
491 BasicBlock *InsertAtEnd);
492
493 // allocate space for exactly zero operands
494 void *operator new(size_t s) {
495 return User::operator new(s, 0);
496 }
497
498 /// Returns the ordering constraint of this fence instruction.
499 AtomicOrdering getOrdering() const {
500 return AtomicOrdering(getSubclassDataFromInstruction() >> 1);
501 }
502
503 /// Sets the ordering constraint of this fence instruction. May only be
504 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
505 void setOrdering(AtomicOrdering Ordering) {
506 setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
507 ((unsigned)Ordering << 1));
508 }
509
510 /// Returns the synchronization scope ID of this fence instruction.
511 SyncScope::ID getSyncScopeID() const {
512 return SSID;
513 }
514
515 /// Sets the synchronization scope ID of this fence instruction.
516 void setSyncScopeID(SyncScope::ID SSID) {
517 this->SSID = SSID;
518 }
519
520 // Methods for support type inquiry through isa, cast, and dyn_cast:
521 static bool classof(const Instruction *I) {
522 return I->getOpcode() == Instruction::Fence;
523 }
524 static bool classof(const Value *V) {
525 return isa<Instruction>(V) && classof(cast<Instruction>(V));
526 }
527
528private:
529 // Shadow Instruction::setInstructionSubclassData with a private forwarding
530 // method so that subclasses cannot accidentally use it.
531 void setInstructionSubclassData(unsigned short D) {
532 Instruction::setInstructionSubclassData(D);
533 }
534
535 /// The synchronization scope ID of this fence instruction. Not quite enough
536 /// room in SubClassData for everything, so synchronization scope ID gets its
537 /// own field.
538 SyncScope::ID SSID;
539};
540
541//===----------------------------------------------------------------------===//
542// AtomicCmpXchgInst Class
543//===----------------------------------------------------------------------===//
544
545/// An instruction that atomically checks whether a
546/// specified value is in a memory location, and, if it is, stores a new value
547/// there. The value returned by this instruction is a pair containing the
548/// original value as first element, and an i1 indicating success (true) or
549/// failure (false) as second element.
550///
551class AtomicCmpXchgInst : public Instruction {
552 void Init(Value *Ptr, Value *Cmp, Value *NewVal,
553 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
554 SyncScope::ID SSID);
555
556protected:
557 // Note: Instruction needs to be a friend here to call cloneImpl.
558 friend class Instruction;
559
560 AtomicCmpXchgInst *cloneImpl() const;
561
562public:
563 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
564 AtomicOrdering SuccessOrdering,
565 AtomicOrdering FailureOrdering,
566 SyncScope::ID SSID, Instruction *InsertBefore = nullptr);
567 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
568 AtomicOrdering SuccessOrdering,
569 AtomicOrdering FailureOrdering,
570 SyncScope::ID SSID, BasicBlock *InsertAtEnd);
571
572 // allocate space for exactly three operands
573 void *operator new(size_t s) {
574 return User::operator new(s, 3);
575 }
576
577 /// Return true if this is a cmpxchg from a volatile memory
578 /// location.
579 ///
580 bool isVolatile() const {
581 return getSubclassDataFromInstruction() & 1;
582 }
583
584 /// Specify whether this is a volatile cmpxchg.
585 ///
586 void setVolatile(bool V) {
587 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
588 (unsigned)V);
589 }
590
591 /// Return true if this cmpxchg may spuriously fail.
592 bool isWeak() const {
593 return getSubclassDataFromInstruction() & 0x100;
594 }
595
596 void setWeak(bool IsWeak) {
597 setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x100) |
598 (IsWeak << 8));
599 }
600
601 /// Transparently provide more efficient getOperand methods.
602 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
603
604 /// Returns the success ordering constraint of this cmpxchg instruction.
605 AtomicOrdering getSuccessOrdering() const {
606 return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
607 }
608
609 /// Sets the success ordering constraint of this cmpxchg instruction.
610 void setSuccessOrdering(AtomicOrdering Ordering) {
611 assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 612, __PRETTY_FUNCTION__))
612 "CmpXchg instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 612, __PRETTY_FUNCTION__))
;
613 setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x1c) |
614 ((unsigned)Ordering << 2));
615 }
616
617 /// Returns the failure ordering constraint of this cmpxchg instruction.
618 AtomicOrdering getFailureOrdering() const {
619 return AtomicOrdering((getSubclassDataFromInstruction() >> 5) & 7);
620 }
621
622 /// Sets the failure ordering constraint of this cmpxchg instruction.
623 void setFailureOrdering(AtomicOrdering Ordering) {
624 assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 625, __PRETTY_FUNCTION__))
625 "CmpXchg instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 625, __PRETTY_FUNCTION__))
;
626 setInstructionSubclassData((getSubclassDataFromInstruction() & ~0xe0) |
627 ((unsigned)Ordering << 5));
628 }
629
630 /// Returns the synchronization scope ID of this cmpxchg instruction.
631 SyncScope::ID getSyncScopeID() const {
632 return SSID;
633 }
634
635 /// Sets the synchronization scope ID of this cmpxchg instruction.
636 void setSyncScopeID(SyncScope::ID SSID) {
637 this->SSID = SSID;
638 }
639
640 Value *getPointerOperand() { return getOperand(0); }
641 const Value *getPointerOperand() const { return getOperand(0); }
642 static unsigned getPointerOperandIndex() { return 0U; }
643
644 Value *getCompareOperand() { return getOperand(1); }
645 const Value *getCompareOperand() const { return getOperand(1); }
646
647 Value *getNewValOperand() { return getOperand(2); }
648 const Value *getNewValOperand() const { return getOperand(2); }
649
650 /// Returns the address space of the pointer operand.
651 unsigned getPointerAddressSpace() const {
652 return getPointerOperand()->getType()->getPointerAddressSpace();
653 }
654
655 /// Returns the strongest permitted ordering on failure, given the
656 /// desired ordering on success.
657 ///
658 /// If the comparison in a cmpxchg operation fails, there is no atomic store
659 /// so release semantics cannot be provided. So this function drops explicit
660 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
661 /// operation would remain SequentiallyConsistent.
662 static AtomicOrdering
663 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
664 switch (SuccessOrdering) {
665 default:
666 llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 666)
;
667 case AtomicOrdering::Release:
668 case AtomicOrdering::Monotonic:
669 return AtomicOrdering::Monotonic;
670 case AtomicOrdering::AcquireRelease:
671 case AtomicOrdering::Acquire:
672 return AtomicOrdering::Acquire;
673 case AtomicOrdering::SequentiallyConsistent:
674 return AtomicOrdering::SequentiallyConsistent;
675 }
676 }
677
678 // Methods for support type inquiry through isa, cast, and dyn_cast:
679 static bool classof(const Instruction *I) {
680 return I->getOpcode() == Instruction::AtomicCmpXchg;
681 }
682 static bool classof(const Value *V) {
683 return isa<Instruction>(V) && classof(cast<Instruction>(V));
684 }
685
686private:
687 // Shadow Instruction::setInstructionSubclassData with a private forwarding
688 // method so that subclasses cannot accidentally use it.
689 void setInstructionSubclassData(unsigned short D) {
690 Instruction::setInstructionSubclassData(D);
691 }
692
693 /// The synchronization scope ID of this cmpxchg instruction. Not quite
694 /// enough room in SubClassData for everything, so synchronization scope ID
695 /// gets its own field.
696 SyncScope::ID SSID;
697};
698
699template <>
700struct OperandTraits<AtomicCmpXchgInst> :
701 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
702};
703
704DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<AtomicCmpXchgInst
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 704, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<AtomicCmpXchgInst>::op_begin(const_cast
<AtomicCmpXchgInst*>(this))[i_nocapture].get()); } void
AtomicCmpXchgInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((i_nocapture < OperandTraits<AtomicCmpXchgInst>
::operands(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 704, __PRETTY_FUNCTION__)); OperandTraits<AtomicCmpXchgInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
AtomicCmpXchgInst::getNumOperands() const { return OperandTraits
<AtomicCmpXchgInst>::operands(this); } template <int
Idx_nocapture> Use &AtomicCmpXchgInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &AtomicCmpXchgInst::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
705
706//===----------------------------------------------------------------------===//
707// AtomicRMWInst Class
708//===----------------------------------------------------------------------===//
709
710/// an instruction that atomically reads a memory location,
711/// combines it with another value, and then stores the result back. Returns
712/// the old value.
713///
714class AtomicRMWInst : public Instruction {
715protected:
716 // Note: Instruction needs to be a friend here to call cloneImpl.
717 friend class Instruction;
718
719 AtomicRMWInst *cloneImpl() const;
720
721public:
722 /// This enumeration lists the possible modifications atomicrmw can make. In
723 /// the descriptions, 'p' is the pointer to the instruction's memory location,
724 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
725 /// instruction. These instructions always return 'old'.
726 enum BinOp {
727 /// *p = v
728 Xchg,
729 /// *p = old + v
730 Add,
731 /// *p = old - v
732 Sub,
733 /// *p = old & v
734 And,
735 /// *p = ~(old & v)
736 Nand,
737 /// *p = old | v
738 Or,
739 /// *p = old ^ v
740 Xor,
741 /// *p = old >signed v ? old : v
742 Max,
743 /// *p = old <signed v ? old : v
744 Min,
745 /// *p = old >unsigned v ? old : v
746 UMax,
747 /// *p = old <unsigned v ? old : v
748 UMin,
749
750 /// *p = old + v
751 FAdd,
752
753 /// *p = old - v
754 FSub,
755
756 FIRST_BINOP = Xchg,
757 LAST_BINOP = FSub,
758 BAD_BINOP
759 };
760
761 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
762 AtomicOrdering Ordering, SyncScope::ID SSID,
763 Instruction *InsertBefore = nullptr);
764 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
765 AtomicOrdering Ordering, SyncScope::ID SSID,
766 BasicBlock *InsertAtEnd);
767
768 // allocate space for exactly two operands
769 void *operator new(size_t s) {
770 return User::operator new(s, 2);
771 }
772
773 BinOp getOperation() const {
774 return static_cast<BinOp>(getSubclassDataFromInstruction() >> 5);
775 }
776
777 static StringRef getOperationName(BinOp Op);
778
779 static bool isFPOperation(BinOp Op) {
780 switch (Op) {
781 case AtomicRMWInst::FAdd:
782 case AtomicRMWInst::FSub:
783 return true;
784 default:
785 return false;
786 }
787 }
788
789 void setOperation(BinOp Operation) {
790 unsigned short SubclassData = getSubclassDataFromInstruction();
791 setInstructionSubclassData((SubclassData & 31) |
792 (Operation << 5));
793 }
794
795 /// Return true if this is a RMW on a volatile memory location.
796 ///
797 bool isVolatile() const {
798 return getSubclassDataFromInstruction() & 1;
799 }
800
801 /// Specify whether this is a volatile RMW or not.
802 ///
803 void setVolatile(bool V) {
804 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
805 (unsigned)V);
806 }
807
808 /// Transparently provide more efficient getOperand methods.
809 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
810
811 /// Returns the ordering constraint of this rmw instruction.
812 AtomicOrdering getOrdering() const {
813 return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
814 }
815
816 /// Sets the ordering constraint of this rmw instruction.
817 void setOrdering(AtomicOrdering Ordering) {
818 assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 819, __PRETTY_FUNCTION__))
819 "atomicrmw instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 819, __PRETTY_FUNCTION__))
;
820 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 2)) |
821 ((unsigned)Ordering << 2));
822 }
823
824 /// Returns the synchronization scope ID of this rmw instruction.
825 SyncScope::ID getSyncScopeID() const {
826 return SSID;
827 }
828
829 /// Sets the synchronization scope ID of this rmw instruction.
830 void setSyncScopeID(SyncScope::ID SSID) {
831 this->SSID = SSID;
832 }
833
834 Value *getPointerOperand() { return getOperand(0); }
835 const Value *getPointerOperand() const { return getOperand(0); }
836 static unsigned getPointerOperandIndex() { return 0U; }
837
838 Value *getValOperand() { return getOperand(1); }
839 const Value *getValOperand() const { return getOperand(1); }
840
841 /// Returns the address space of the pointer operand.
842 unsigned getPointerAddressSpace() const {
843 return getPointerOperand()->getType()->getPointerAddressSpace();
844 }
845
846 bool isFloatingPointOperation() const {
847 return isFPOperation(getOperation());
848 }
849
850 // Methods for support type inquiry through isa, cast, and dyn_cast:
851 static bool classof(const Instruction *I) {
852 return I->getOpcode() == Instruction::AtomicRMW;
853 }
854 static bool classof(const Value *V) {
855 return isa<Instruction>(V) && classof(cast<Instruction>(V));
856 }
857
858private:
859 void Init(BinOp Operation, Value *Ptr, Value *Val,
860 AtomicOrdering Ordering, SyncScope::ID SSID);
861
862 // Shadow Instruction::setInstructionSubclassData with a private forwarding
863 // method so that subclasses cannot accidentally use it.
864 void setInstructionSubclassData(unsigned short D) {
865 Instruction::setInstructionSubclassData(D);
866 }
867
868 /// The synchronization scope ID of this rmw instruction. Not quite enough
869 /// room in SubClassData for everything, so synchronization scope ID gets its
870 /// own field.
871 SyncScope::ID SSID;
872};
873
874template <>
875struct OperandTraits<AtomicRMWInst>
876 : public FixedNumOperandTraits<AtomicRMWInst,2> {
877};
878
879DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { ((i_nocapture < OperandTraits
<AtomicRMWInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 879, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<AtomicRMWInst>::op_begin(const_cast<
AtomicRMWInst*>(this))[i_nocapture].get()); } void AtomicRMWInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<AtomicRMWInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 879, __PRETTY_FUNCTION__)); OperandTraits<AtomicRMWInst>
::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned AtomicRMWInst
::getNumOperands() const { return OperandTraits<AtomicRMWInst
>::operands(this); } template <int Idx_nocapture> Use
&AtomicRMWInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
AtomicRMWInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
880
881//===----------------------------------------------------------------------===//
882// GetElementPtrInst Class
883//===----------------------------------------------------------------------===//
884
885// checkGEPType - Simple wrapper function to give a better assertion failure
886// message on bad indexes for a gep instruction.
887//
888inline Type *checkGEPType(Type *Ty) {
889 assert(Ty && "Invalid GetElementPtrInst indices for type!")((Ty && "Invalid GetElementPtrInst indices for type!"
) ? static_cast<void> (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 889, __PRETTY_FUNCTION__))
;
890 return Ty;
891}
892
893/// an instruction for type-safe pointer arithmetic to
894/// access elements of arrays and structs
895///
896class GetElementPtrInst : public Instruction {
897 Type *SourceElementType;
898 Type *ResultElementType;
899
900 GetElementPtrInst(const GetElementPtrInst &GEPI);
901
902 /// Constructors - Create a getelementptr instruction with a base pointer an
903 /// list of indices. The first ctor can optionally insert before an existing
904 /// instruction, the second appends the new instruction to the specified
905 /// BasicBlock.
906 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
907 ArrayRef<Value *> IdxList, unsigned Values,
908 const Twine &NameStr, Instruction *InsertBefore);
909 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
910 ArrayRef<Value *> IdxList, unsigned Values,
911 const Twine &NameStr, BasicBlock *InsertAtEnd);
912
913 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
914
915protected:
916 // Note: Instruction needs to be a friend here to call cloneImpl.
917 friend class Instruction;
918
919 GetElementPtrInst *cloneImpl() const;
920
921public:
922 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
923 ArrayRef<Value *> IdxList,
924 const Twine &NameStr = "",
925 Instruction *InsertBefore = nullptr) {
926 unsigned Values = 1 + unsigned(IdxList.size());
927 if (!PointeeType)
928 PointeeType =
929 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
930 else
931 assert(((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 933, __PRETTY_FUNCTION__))
932 PointeeType ==((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 933, __PRETTY_FUNCTION__))
933 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 933, __PRETTY_FUNCTION__))
;
934 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
935 NameStr, InsertBefore);
936 }
937
938 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
939 ArrayRef<Value *> IdxList,
940 const Twine &NameStr,
941 BasicBlock *InsertAtEnd) {
942 unsigned Values = 1 + unsigned(IdxList.size());
943 if (!PointeeType)
944 PointeeType =
945 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
946 else
947 assert(((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 949, __PRETTY_FUNCTION__))
948 PointeeType ==((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 949, __PRETTY_FUNCTION__))
949 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 949, __PRETTY_FUNCTION__))
;
950 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
951 NameStr, InsertAtEnd);
952 }
953
954 /// Create an "inbounds" getelementptr. See the documentation for the
955 /// "inbounds" flag in LangRef.html for details.
956 static GetElementPtrInst *CreateInBounds(Value *Ptr,
957 ArrayRef<Value *> IdxList,
958 const Twine &NameStr = "",
959 Instruction *InsertBefore = nullptr){
960 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertBefore);
961 }
962
963 static GetElementPtrInst *
964 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
965 const Twine &NameStr = "",
966 Instruction *InsertBefore = nullptr) {
967 GetElementPtrInst *GEP =
968 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
969 GEP->setIsInBounds(true);
970 return GEP;
971 }
972
973 static GetElementPtrInst *CreateInBounds(Value *Ptr,
974 ArrayRef<Value *> IdxList,
975 const Twine &NameStr,
976 BasicBlock *InsertAtEnd) {
977 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertAtEnd);
978 }
979
980 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
981 ArrayRef<Value *> IdxList,
982 const Twine &NameStr,
983 BasicBlock *InsertAtEnd) {
984 GetElementPtrInst *GEP =
985 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
986 GEP->setIsInBounds(true);
987 return GEP;
988 }
989
990 /// Transparently provide more efficient getOperand methods.
991 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
992
993 Type *getSourceElementType() const { return SourceElementType; }
994
995 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
996 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
997
998 Type *getResultElementType() const {
999 assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 1000, __PRETTY_FUNCTION__))
1000 cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 1000, __PRETTY_FUNCTION__))
;
1001 return ResultElementType;
1002 }
1003
1004 /// Returns the address space of this instruction's pointer type.
1005 unsigned getAddressSpace() const {
1006 // Note that this is always the same as the pointer operand's address space
1007 // and that is cheaper to compute, so cheat here.
1008 return getPointerAddressSpace();
1009 }
1010
1011 /// Returns the type of the element that would be loaded with
1012 /// a load instruction with the specified parameters.
1013 ///
1014 /// Null is returned if the indices are invalid for the specified
1015 /// pointer type.
1016 ///
1017 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1018 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1019 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1020
1021 inline op_iterator idx_begin() { return op_begin()+1; }
1022 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1023 inline op_iterator idx_end() { return op_end(); }
1024 inline const_op_iterator idx_end() const { return op_end(); }
1025
1026 inline iterator_range<op_iterator> indices() {
1027 return make_range(idx_begin(), idx_end());
1028 }
1029
1030 inline iterator_range<const_op_iterator> indices() const {
1031 return make_range(idx_begin(), idx_end());
1032 }
1033
1034 Value *getPointerOperand() {
1035 return getOperand(0);
1036 }
1037 const Value *getPointerOperand() const {
1038 return getOperand(0);
1039 }
1040 static unsigned getPointerOperandIndex() {
1041 return 0U; // get index for modifying correct operand.
1042 }
1043
1044 /// Method to return the pointer operand as a
1045 /// PointerType.
1046 Type *getPointerOperandType() const {
1047 return getPointerOperand()->getType();
1048 }
1049
1050 /// Returns the address space of the pointer operand.
1051 unsigned getPointerAddressSpace() const {
1052 return getPointerOperandType()->getPointerAddressSpace();
1053 }
1054
1055 /// Returns the pointer type returned by the GEP
1056 /// instruction, which may be a vector of pointers.
1057 static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1058 ArrayRef<Value *> IdxList) {
1059 Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)),
1060 Ptr->getType()->getPointerAddressSpace());
1061 // Vector GEP
1062 if (Ptr->getType()->isVectorTy()) {
1063 ElementCount EltCount = Ptr->getType()->getVectorElementCount();
1064 return VectorType::get(PtrTy, EltCount);
1065 }
1066 for (Value *Index : IdxList)
1067 if (Index->getType()->isVectorTy()) {
1068 ElementCount EltCount = Index->getType()->getVectorElementCount();
1069 return VectorType::get(PtrTy, EltCount);
1070 }
1071 // Scalar GEP
1072 return PtrTy;
1073 }
1074
1075 unsigned getNumIndices() const { // Note: always non-negative
1076 return getNumOperands() - 1;
1077 }
1078
1079 bool hasIndices() const {
1080 return getNumOperands() > 1;
1081 }
1082
1083 /// Return true if all of the indices of this GEP are
1084 /// zeros. If so, the result pointer and the first operand have the same
1085 /// value, just potentially different types.
1086 bool hasAllZeroIndices() const;
1087
1088 /// Return true if all of the indices of this GEP are
1089 /// constant integers. If so, the result pointer and the first operand have
1090 /// a constant offset between them.
1091 bool hasAllConstantIndices() const;
1092
1093 /// Set or clear the inbounds flag on this GEP instruction.
1094 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1095 void setIsInBounds(bool b = true);
1096
1097 /// Determine whether the GEP has the inbounds flag.
1098 bool isInBounds() const;
1099
1100 /// Accumulate the constant address offset of this GEP if possible.
1101 ///
1102 /// This routine accepts an APInt into which it will accumulate the constant
1103 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1104 /// all-constant, it returns false and the value of the offset APInt is
1105 /// undefined (it is *not* preserved!). The APInt passed into this routine
1106 /// must be at least as wide as the IntPtr type for the address space of
1107 /// the base GEP pointer.
1108 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1109
1110 // Methods for support type inquiry through isa, cast, and dyn_cast:
1111 static bool classof(const Instruction *I) {
1112 return (I->getOpcode() == Instruction::GetElementPtr);
1113 }
1114 static bool classof(const Value *V) {
1115 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1116 }
1117};
1118
1119template <>
1120struct OperandTraits<GetElementPtrInst> :
1121 public VariadicOperandTraits<GetElementPtrInst, 1> {
1122};
1123
1124GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1125 ArrayRef<Value *> IdxList, unsigned Values,
1126 const Twine &NameStr,
1127 Instruction *InsertBefore)
1128 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1129 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1130 Values, InsertBefore),
1131 SourceElementType(PointeeType),
1132 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1133 assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 1134, __PRETTY_FUNCTION__))
1134 cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 1134, __PRETTY_FUNCTION__))
;
1135 init(Ptr, IdxList, NameStr);
1136}
1137
1138GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1139 ArrayRef<Value *> IdxList, unsigned Values,
1140 const Twine &NameStr,
1141 BasicBlock *InsertAtEnd)
1142 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1143 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1144 Values, InsertAtEnd),
1145 SourceElementType(PointeeType),
1146 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1147 assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 1148, __PRETTY_FUNCTION__))
1148 cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 1148, __PRETTY_FUNCTION__))
;
1149 init(Ptr, IdxList, NameStr);
1150}
1151
1152DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() {
return OperandTraits<GetElementPtrInst>::op_begin(this
); } GetElementPtrInst::const_op_iterator GetElementPtrInst::
op_begin() const { return OperandTraits<GetElementPtrInst>
::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst
::op_iterator GetElementPtrInst::op_end() { return OperandTraits
<GetElementPtrInst>::op_end(this); } GetElementPtrInst::
const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits
<GetElementPtrInst>::op_end(const_cast<GetElementPtrInst
*>(this)); } Value *GetElementPtrInst::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<GetElementPtrInst
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 1152, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<GetElementPtrInst>::op_begin(const_cast
<GetElementPtrInst*>(this))[i_nocapture].get()); } void
GetElementPtrInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((i_nocapture < OperandTraits<GetElementPtrInst>
::operands(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 1152, __PRETTY_FUNCTION__)); OperandTraits<GetElementPtrInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
GetElementPtrInst::getNumOperands() const { return OperandTraits
<GetElementPtrInst>::operands(this); } template <int
Idx_nocapture> Use &GetElementPtrInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &GetElementPtrInst::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
1153
1154//===----------------------------------------------------------------------===//
1155// ICmpInst Class
1156//===----------------------------------------------------------------------===//
1157
1158/// This instruction compares its operands according to the predicate given
1159/// to the constructor. It only operates on integers or pointers. The operands
1160/// must be identical types.
1161/// Represent an integer comparison operator.
1162class ICmpInst: public CmpInst {
1163 void AssertOK() {
1164 assert(isIntPredicate() &&((isIntPredicate() && "Invalid ICmp predicate value")
? static_cast<void> (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 1165, __PRETTY_FUNCTION__))
1165 "Invalid ICmp predicate value")((isIntPredicate() && "Invalid ICmp predicate value")
? static_cast<void> (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 1165, __PRETTY_FUNCTION__))
;
1166 assert(getOperand(0)->getType() == getOperand(1)->getType() &&((getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to ICmp instruction are not of the same type!"
) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 1167, __PRETTY_FUNCTION__))
1167 "Both operands to ICmp instruction are not of the same type!")((getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to ICmp instruction are not of the same type!"
) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 1167, __PRETTY_FUNCTION__))
;
1168 // Check that the operands are the right type
1169 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand
(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction"
) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 1171, __PRETTY_FUNCTION__))
1170 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand
(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction"
) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 1171, __PRETTY_FUNCTION__))
1171 "Invalid operand types for ICmp instruction")(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand
(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction"
) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 1171, __PRETTY_FUNCTION__))
;
1172 }
1173
1174protected:
1175 // Note: Instruction needs to be a friend here to call cloneImpl.
1176 friend class Instruction;
1177
1178 /// Clone an identical ICmpInst
1179 ICmpInst *cloneImpl() const;
1180
1181public:
1182 /// Constructor with insert-before-instruction semantics.
1183 ICmpInst(
1184 Instruction *InsertBefore, ///< Where to insert
1185 Predicate pred, ///< The predicate to use for the comparison
1186 Value *LHS, ///< The left-hand-side of the expression
1187 Value *RHS, ///< The right-hand-side of the expression
1188 const Twine &NameStr = "" ///< Name of the instruction
1189 ) : CmpInst(makeCmpResultType(LHS->getType()),
1190 Instruction::ICmp, pred, LHS, RHS, NameStr,
1191 InsertBefore) {
1192#ifndef NDEBUG
1193 AssertOK();
1194#endif
1195 }
1196
1197 /// Constructor with insert-at-end semantics.
1198 ICmpInst(
1199 BasicBlock &InsertAtEnd, ///< Block to insert into.
1200 Predicate pred, ///< The predicate to use for the comparison
1201 Value *LHS, ///< The left-hand-side of the expression
1202 Value *RHS, ///< The right-hand-side of the expression
1203 const Twine &NameStr = "" ///< Name of the instruction
1204 ) : CmpInst(makeCmpResultType(LHS->getType()),
1205 Instruction::ICmp, pred, LHS, RHS, NameStr,
1206 &InsertAtEnd) {
1207#ifndef NDEBUG
1208 AssertOK();
1209#endif
1210 }
1211
1212 /// Constructor with no-insertion semantics
1213 ICmpInst(
1214 Predicate pred, ///< The predicate to use for the comparison
1215 Value *LHS, ///< The left-hand-side of the expression
1216 Value *RHS, ///< The right-hand-side of the expression
1217 const Twine &NameStr = "" ///< Name of the instruction
1218 ) : CmpInst(makeCmpResultType(LHS->getType()),
1219 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1220#ifndef NDEBUG
1221 AssertOK();
1222#endif
1223 }
1224
1225 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1226 /// @returns the predicate that would be the result if the operand were
1227 /// regarded as signed.
1228 /// Return the signed version of the predicate
1229 Predicate getSignedPredicate() const {
1230 return getSignedPredicate(getPredicate());
1231 }
1232
1233 /// This is a static version that you can use without an instruction.
1234 /// Return the signed version of the predicate.
1235 static Predicate getSignedPredicate(Predicate pred);
1236
1237 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1238 /// @returns the predicate that would be the result if the operand were
1239 /// regarded as unsigned.
1240 /// Return the unsigned version of the predicate
1241 Predicate getUnsignedPredicate() const {
1242 return getUnsignedPredicate(getPredicate());
1243 }
1244
1245 /// This is a static version that you can use without an instruction.
1246 /// Return the unsigned version of the predicate.
1247 static Predicate getUnsignedPredicate(Predicate pred);
1248
1249 /// Return true if this predicate is either EQ or NE. This also
1250 /// tests for commutativity.
1251 static bool isEquality(Predicate P) {
1252 return P == ICMP_EQ || P == ICMP_NE;
1253 }
1254
1255 /// Return true if this predicate is either EQ or NE. This also
1256 /// tests for commutativity.
1257 bool isEquality() const {
1258 return isEquality(getPredicate());
1259 }
1260
1261 /// @returns true if the predicate of this ICmpInst is commutative
1262 /// Determine if this relation is commutative.
1263 bool isCommutative() const { return isEquality(); }
1264
1265 /// Return true if the predicate is relational (not EQ or NE).
1266 ///
1267 bool isRelational() const {
1268 return !isEquality();
1269 }
1270
1271 /// Return true if the predicate is relational (not EQ or NE).
1272 ///
1273 static bool isRelational(Predicate P) {
1274 return !isEquality(P);
1275 }
1276
1277 /// Exchange the two operands to this instruction in such a way that it does
1278 /// not modify the semantics of the instruction. The predicate value may be
1279 /// changed to retain the same result if the predicate is order dependent
1280 /// (e.g. ult).
1281 /// Swap operands and adjust predicate.
1282 void swapOperands() {
1283 setPredicate(getSwappedPredicate());
1284 Op<0>().swap(Op<1>());
1285 }
1286
1287 // Methods for support type inquiry through isa, cast, and dyn_cast:
1288 static bool classof(const Instruction *I) {
1289 return I->getOpcode() == Instruction::ICmp;
1290 }
1291 static bool classof(const Value *V) {
1292 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1293 }
1294};
1295
1296//===----------------------------------------------------------------------===//
1297// FCmpInst Class
1298//===----------------------------------------------------------------------===//
1299
1300/// This instruction compares its operands according to the predicate given
1301/// to the constructor. It only operates on floating point values or packed
1302/// vectors of floating point values. The operands must be identical types.
1303/// Represents a floating point comparison operator.
1304class FCmpInst: public CmpInst {
1305 void AssertOK() {
1306 assert(isFPPredicate() && "Invalid FCmp predicate value")((isFPPredicate() && "Invalid FCmp predicate value") ?
static_cast<void> (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 1306, __PRETTY_FUNCTION__))
;
1307 assert(getOperand(0)->getType() == getOperand(1)->getType() &&((getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to FCmp instruction are not of the same type!"
) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 1308, __PRETTY_FUNCTION__))
1308 "Both operands to FCmp instruction are not of the same type!")((getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to FCmp instruction are not of the same type!"
) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 1308, __PRETTY_FUNCTION__))
;
1309 // Check that the operands are the right type
1310 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&((getOperand(0)->getType()->isFPOrFPVectorTy() &&
"Invalid operand types for FCmp instruction") ? static_cast<
void> (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 1311, __PRETTY_FUNCTION__))
1311 "Invalid operand types for FCmp instruction")((getOperand(0)->getType()->isFPOrFPVectorTy() &&
"Invalid operand types for FCmp instruction") ? static_cast<
void> (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 1311, __PRETTY_FUNCTION__))
;
1312 }
1313
1314protected:
1315 // Note: Instruction needs to be a friend here to call cloneImpl.
1316 friend class Instruction;
1317
1318 /// Clone an identical FCmpInst
1319 FCmpInst *cloneImpl() const;
1320
1321public:
1322 /// Constructor with insert-before-instruction semantics.
1323 FCmpInst(
1324 Instruction *InsertBefore, ///< Where to insert
1325 Predicate pred, ///< The predicate to use for the comparison
1326 Value *LHS, ///< The left-hand-side of the expression
1327 Value *RHS, ///< The right-hand-side of the expression
1328 const Twine &NameStr = "" ///< Name of the instruction
1329 ) : CmpInst(makeCmpResultType(LHS->getType()),
1330 Instruction::FCmp, pred, LHS, RHS, NameStr,
1331 InsertBefore) {
1332 AssertOK();
1333 }
1334
1335 /// Constructor with insert-at-end semantics.
1336 FCmpInst(
1337 BasicBlock &InsertAtEnd, ///< Block to insert into.
1338 Predicate pred, ///< The predicate to use for the comparison
1339 Value *LHS, ///< The left-hand-side of the expression
1340 Value *RHS, ///< The right-hand-side of the expression
1341 const Twine &NameStr = "" ///< Name of the instruction
1342 ) : CmpInst(makeCmpResultType(LHS->getType()),
1343 Instruction::FCmp, pred, LHS, RHS, NameStr,
1344 &InsertAtEnd) {
1345 AssertOK();
1346 }
1347
1348 /// Constructor with no-insertion semantics
1349 FCmpInst(
1350 Predicate Pred, ///< The predicate to use for the comparison
1351 Value *LHS, ///< The left-hand-side of the expression
1352 Value *RHS, ///< The right-hand-side of the expression
1353 const Twine &NameStr = "", ///< Name of the instruction
1354 Instruction *FlagsSource = nullptr
1355 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1356 RHS, NameStr, nullptr, FlagsSource) {
1357 AssertOK();
1358 }
1359
1360 /// @returns true if the predicate of this instruction is EQ or NE.
1361 /// Determine if this is an equality predicate.
1362 static bool isEquality(Predicate Pred) {
1363 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1364 Pred == FCMP_UNE;
1365 }
1366
1367 /// @returns true if the predicate of this instruction is EQ or NE.
1368 /// Determine if this is an equality predicate.
1369 bool isEquality() const { return isEquality(getPredicate()); }
1370
1371 /// @returns true if the predicate of this instruction is commutative.
1372 /// Determine if this is a commutative predicate.
1373 bool isCommutative() const {
1374 return isEquality() ||
1375 getPredicate() == FCMP_FALSE ||
1376 getPredicate() == FCMP_TRUE ||
1377 getPredicate() == FCMP_ORD ||
1378 getPredicate() == FCMP_UNO;
1379 }
1380
1381 /// @returns true if the predicate is relational (not EQ or NE).
1382 /// Determine if this a relational predicate.
1383 bool isRelational() const { return !isEquality(); }
1384
1385 /// Exchange the two operands to this instruction in such a way that it does
1386 /// not modify the semantics of the instruction. The predicate value may be
1387 /// changed to retain the same result if the predicate is order dependent
1388 /// (e.g. ult).
1389 /// Swap operands and adjust predicate.
1390 void swapOperands() {
1391 setPredicate(getSwappedPredicate());
1392 Op<0>().swap(Op<1>());
1393 }
1394
1395 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1396 static bool classof(const Instruction *I) {
1397 return I->getOpcode() == Instruction::FCmp;
1398 }
1399 static bool classof(const Value *V) {
1400 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1401 }
1402};
1403
1404//===----------------------------------------------------------------------===//
1405/// This class represents a function call, abstracting a target
1406/// machine's calling convention. This class uses low bit of the SubClassData
1407/// field to indicate whether or not this is a tail call. The rest of the bits
1408/// hold the calling convention of the call.
1409///
1410class CallInst : public CallBase {
1411 CallInst(const CallInst &CI);
1412
1413 /// Construct a CallInst given a range of arguments.
1414 /// Construct a CallInst from a range of arguments
1415 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1416 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1417 Instruction *InsertBefore);
1418
1419 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1420 const Twine &NameStr, Instruction *InsertBefore)
1421 : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {}
1422
1423 /// Construct a CallInst given a range of arguments.
1424 /// Construct a CallInst from a range of arguments
1425 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1426 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1427 BasicBlock *InsertAtEnd);
1428
1429 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1430 Instruction *InsertBefore);
1431
1432 CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1433 BasicBlock *InsertAtEnd);
1434
1435 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1436 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1437 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1438
1439 /// Compute the number of operands to allocate.
1440 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1441 // We need one operand for the called function, plus the input operand
1442 // counts provided.
1443 return 1 + NumArgs + NumBundleInputs;
1444 }
1445
1446protected:
1447 // Note: Instruction needs to be a friend here to call cloneImpl.
1448 friend class Instruction;
1449
1450 CallInst *cloneImpl() const;
1451
1452public:
1453 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1454 Instruction *InsertBefore = nullptr) {
1455 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1456 }
1457
1458 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1459 const Twine &NameStr,
1460 Instruction *InsertBefore = nullptr) {
1461 return new (ComputeNumOperands(Args.size()))
1462 CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1463 }
1464
1465 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1466 ArrayRef<OperandBundleDef> Bundles = None,
1467 const Twine &NameStr = "",
1468 Instruction *InsertBefore = nullptr) {
1469 const int NumOperands =
1470 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1471 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1472
1473 return new (NumOperands, DescriptorBytes)
1474 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1475 }
1476
1477 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1478 BasicBlock *InsertAtEnd) {
1479 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1480 }
1481
1482 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1483 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1484 return new (ComputeNumOperands(Args.size()))
1485 CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd);
1486 }
1487
1488 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1489 ArrayRef<OperandBundleDef> Bundles,
1490 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1491 const int NumOperands =
1492 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1493 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1494
1495 return new (NumOperands, DescriptorBytes)
1496 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1497 }
1498
1499 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1500 Instruction *InsertBefore = nullptr) {
1501 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1502 InsertBefore);
1503 }
1504
1505 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1506 ArrayRef<OperandBundleDef> Bundles = None,
1507 const Twine &NameStr = "",
1508 Instruction *InsertBefore = nullptr) {
1509 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1510 NameStr, InsertBefore);
1511 }
1512
1513 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1514 const Twine &NameStr,
1515 Instruction *InsertBefore = nullptr) {
1516 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1517 InsertBefore);
1518 }
1519
1520 static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1521 BasicBlock *InsertAtEnd) {
1522 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1523 InsertAtEnd);
1524 }
1525
1526 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1527 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1528 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1529 InsertAtEnd);
1530 }
1531
1532 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1533 ArrayRef<OperandBundleDef> Bundles,
1534 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1535 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1536 NameStr, InsertAtEnd);
1537 }
1538
1539 // Deprecated [opaque pointer types]
1540 static CallInst *Create(Value *Func, const Twine &NameStr = "",
1541 Instruction *InsertBefore = nullptr) {
1542 return Create(cast<FunctionType>(
1543 cast<PointerType>(Func->getType())->getElementType()),
1544 Func, NameStr, InsertBefore);
1545 }
1546
1547 // Deprecated [opaque pointer types]
1548 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1549 const Twine &NameStr,
1550 Instruction *InsertBefore = nullptr) {
1551 return Create(cast<FunctionType>(
1552 cast<PointerType>(Func->getType())->getElementType()),
1553 Func, Args, NameStr, InsertBefore);
1554 }
1555
1556 // Deprecated [opaque pointer types]
1557 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1558 ArrayRef<OperandBundleDef> Bundles = None,
1559 const Twine &NameStr = "",
1560 Instruction *InsertBefore = nullptr) {
1561 return Create(cast<FunctionType>(
1562 cast<PointerType>(Func->getType())->getElementType()),
1563 Func, Args, Bundles, NameStr, InsertBefore);
1564 }
1565
1566 // Deprecated [opaque pointer types]
1567 static CallInst *Create(Value *Func, const Twine &NameStr,
1568 BasicBlock *InsertAtEnd) {
1569 return Create(cast<FunctionType>(
1570 cast<PointerType>(Func->getType())->getElementType()),
1571 Func, NameStr, InsertAtEnd);
1572 }
1573
1574 // Deprecated [opaque pointer types]
1575 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1576 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1577 return Create(cast<FunctionType>(
1578 cast<PointerType>(Func->getType())->getElementType()),
1579 Func, Args, NameStr, InsertAtEnd);
1580 }
1581
1582 // Deprecated [opaque pointer types]
1583 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1584 ArrayRef<OperandBundleDef> Bundles,
1585 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1586 return Create(cast<FunctionType>(
1587 cast<PointerType>(Func->getType())->getElementType()),
1588 Func, Args, Bundles, NameStr, InsertAtEnd);
1589 }
1590
1591 /// Create a clone of \p CI with a different set of operand bundles and
1592 /// insert it before \p InsertPt.
1593 ///
1594 /// The returned call instruction is identical \p CI in every way except that
1595 /// the operand bundles for the new instruction are set to the operand bundles
1596 /// in \p Bundles.
1597 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1598 Instruction *InsertPt = nullptr);
1599
1600 /// Generate the IR for a call to malloc:
1601 /// 1. Compute the malloc call's argument as the specified type's size,
1602 /// possibly multiplied by the array size if the array size is not
1603 /// constant 1.
1604 /// 2. Call malloc with that argument.
1605 /// 3. Bitcast the result of the malloc call to the specified type.
1606 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1607 Type *AllocTy, Value *AllocSize,
1608 Value *ArraySize = nullptr,
1609 Function *MallocF = nullptr,
1610 const Twine &Name = "");
1611 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1612 Type *AllocTy, Value *AllocSize,
1613 Value *ArraySize = nullptr,
1614 Function *MallocF = nullptr,
1615 const Twine &Name = "");
1616 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1617 Type *AllocTy, Value *AllocSize,
1618 Value *ArraySize = nullptr,
1619 ArrayRef<OperandBundleDef> Bundles = None,
1620 Function *MallocF = nullptr,
1621 const Twine &Name = "");
1622 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1623 Type *AllocTy, Value *AllocSize,
1624 Value *ArraySize = nullptr,
1625 ArrayRef<OperandBundleDef> Bundles = None,
1626 Function *MallocF = nullptr,
1627 const Twine &Name = "");
1628 /// Generate the IR for a call to the builtin free function.
1629 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
1630 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
1631 static Instruction *CreateFree(Value *Source,
1632 ArrayRef<OperandBundleDef> Bundles,
1633 Instruction *InsertBefore);
1634 static Instruction *CreateFree(Value *Source,
1635 ArrayRef<OperandBundleDef> Bundles,
1636 BasicBlock *InsertAtEnd);
1637
1638 // Note that 'musttail' implies 'tail'.
1639 enum TailCallKind {
1640 TCK_None = 0,
1641 TCK_Tail = 1,
1642 TCK_MustTail = 2,
1643 TCK_NoTail = 3
1644 };
1645 TailCallKind getTailCallKind() const {
1646 return TailCallKind(getSubclassDataFromInstruction() & 3);
1647 }
1648
1649 bool isTailCall() const {
1650 unsigned Kind = getSubclassDataFromInstruction() & 3;
1651 return Kind == TCK_Tail || Kind == TCK_MustTail;
36
Assuming 'Kind' is equal to TCK_Tail
37
Returning the value 1, which participates in a condition later
1652 }
1653
1654 bool isMustTailCall() const {
1655 return (getSubclassDataFromInstruction() & 3) == TCK_MustTail;
1656 }
1657
1658 bool isNoTailCall() const {
1659 return (getSubclassDataFromInstruction() & 3) == TCK_NoTail;
1660 }
1661
1662 void setTailCall(bool isTC = true) {
1663 setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) |
1664 unsigned(isTC ? TCK_Tail : TCK_None));
1665 }
1666
1667 void setTailCallKind(TailCallKind TCK) {
1668 setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) |
1669 unsigned(TCK));
1670 }
1671
1672 /// Return true if the call can return twice
1673 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1674 void setCanReturnTwice() {
1675 addAttribute(AttributeList::FunctionIndex, Attribute::ReturnsTwice);
1676 }
1677
1678 // Methods for support type inquiry through isa, cast, and dyn_cast:
1679 static bool classof(const Instruction *I) {
1680 return I->getOpcode() == Instruction::Call;
1681 }
1682 static bool classof(const Value *V) {
1683 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1684 }
1685
1686 /// Updates profile metadata by scaling it by \p S / \p T.
1687 void updateProfWeight(uint64_t S, uint64_t T);
1688
1689private:
1690 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1691 // method so that subclasses cannot accidentally use it.
1692 void setInstructionSubclassData(unsigned short D) {
1693 Instruction::setInstructionSubclassData(D);
1694 }
1695};
1696
1697CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1698 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1699 BasicBlock *InsertAtEnd)
1700 : CallBase(Ty->getReturnType(), Instruction::Call,
1701 OperandTraits<CallBase>::op_end(this) -
1702 (Args.size() + CountBundleInputs(Bundles) + 1),
1703 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1704 InsertAtEnd) {
1705 init(Ty, Func, Args, Bundles, NameStr);
1706}
1707
1708CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1709 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1710 Instruction *InsertBefore)
1711 : CallBase(Ty->getReturnType(), Instruction::Call,
1712 OperandTraits<CallBase>::op_end(this) -
1713 (Args.size() + CountBundleInputs(Bundles) + 1),
1714 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1715 InsertBefore) {
1716 init(Ty, Func, Args, Bundles, NameStr);
1717}
1718
1719//===----------------------------------------------------------------------===//
1720// SelectInst Class
1721//===----------------------------------------------------------------------===//
1722
1723/// This class represents the LLVM 'select' instruction.
1724///
1725class SelectInst : public Instruction {
1726 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1727 Instruction *InsertBefore)
1728 : Instruction(S1->getType(), Instruction::Select,
1729 &Op<0>(), 3, InsertBefore) {
1730 init(C, S1, S2);
1731 setName(NameStr);
1732 }
1733
1734 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1735 BasicBlock *InsertAtEnd)
1736 : Instruction(S1->getType(), Instruction::Select,
1737 &Op<0>(), 3, InsertAtEnd) {
1738 init(C, S1, S2);
1739 setName(NameStr);
1740 }
1741
1742 void init(Value *C, Value *S1, Value *S2) {
1743 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")((!areInvalidOperands(C, S1, S2) && "Invalid operands for select"
) ? static_cast<void> (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 1743, __PRETTY_FUNCTION__))
;
1744 Op<0>() = C;
1745 Op<1>() = S1;
1746 Op<2>() = S2;
1747 }
1748
1749protected:
1750 // Note: Instruction needs to be a friend here to call cloneImpl.
1751 friend class Instruction;
1752
1753 SelectInst *cloneImpl() const;
1754
1755public:
1756 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1757 const Twine &NameStr = "",
1758 Instruction *InsertBefore = nullptr,
1759 Instruction *MDFrom = nullptr) {
1760 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1761 if (MDFrom)
1762 Sel->copyMetadata(*MDFrom);
1763 return Sel;
1764 }
1765
1766 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1767 const Twine &NameStr,
1768 BasicBlock *InsertAtEnd) {
1769 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1770 }
1771
1772 const Value *getCondition() const { return Op<0>(); }
1773 const Value *getTrueValue() const { return Op<1>(); }
1774 const Value *getFalseValue() const { return Op<2>(); }
1775 Value *getCondition() { return Op<0>(); }
1776 Value *getTrueValue() { return Op<1>(); }
1777 Value *getFalseValue() { return Op<2>(); }
1778
1779 void setCondition(Value *V) { Op<0>() = V; }
1780 void setTrueValue(Value *V) { Op<1>() = V; }
1781 void setFalseValue(Value *V) { Op<2>() = V; }
1782
1783 /// Swap the true and false values of the select instruction.
1784 /// This doesn't swap prof metadata.
1785 void swapValues() { Op<1>().swap(Op<2>()); }
1786
1787 /// Return a string if the specified operands are invalid
1788 /// for a select operation, otherwise return null.
1789 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1790
1791 /// Transparently provide more efficient getOperand methods.
1792 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1793
1794 OtherOps getOpcode() const {
1795 return static_cast<OtherOps>(Instruction::getOpcode());
1796 }
1797
1798 // Methods for support type inquiry through isa, cast, and dyn_cast:
1799 static bool classof(const Instruction *I) {
1800 return I->getOpcode() == Instruction::Select;
1801 }
1802 static bool classof(const Value *V) {
1803 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1804 }
1805};
1806
1807template <>
1808struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1809};
1810
1811DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits
<SelectInst>::op_begin(this); } SelectInst::const_op_iterator
SelectInst::op_begin() const { return OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this)); } SelectInst
::op_iterator SelectInst::op_end() { return OperandTraits<
SelectInst>::op_end(this); } SelectInst::const_op_iterator
SelectInst::op_end() const { return OperandTraits<SelectInst
>::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<SelectInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 1811, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<SelectInst>::op_begin(const_cast<SelectInst
*>(this))[i_nocapture].get()); } void SelectInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<SelectInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 1811, __PRETTY_FUNCTION__)); OperandTraits<SelectInst>
::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned SelectInst
::getNumOperands() const { return OperandTraits<SelectInst
>::operands(this); } template <int Idx_nocapture> Use
&SelectInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SelectInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
1812
1813//===----------------------------------------------------------------------===//
1814// VAArgInst Class
1815//===----------------------------------------------------------------------===//
1816
1817/// This class represents the va_arg llvm instruction, which returns
1818/// an argument of the specified type given a va_list and increments that list
1819///
1820class VAArgInst : public UnaryInstruction {
1821protected:
1822 // Note: Instruction needs to be a friend here to call cloneImpl.
1823 friend class Instruction;
1824
1825 VAArgInst *cloneImpl() const;
1826
1827public:
1828 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1829 Instruction *InsertBefore = nullptr)
1830 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1831 setName(NameStr);
1832 }
1833
1834 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1835 BasicBlock *InsertAtEnd)
1836 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1837 setName(NameStr);
1838 }
1839
1840 Value *getPointerOperand() { return getOperand(0); }
1841 const Value *getPointerOperand() const { return getOperand(0); }
1842 static unsigned getPointerOperandIndex() { return 0U; }
1843
1844 // Methods for support type inquiry through isa, cast, and dyn_cast:
1845 static bool classof(const Instruction *I) {
1846 return I->getOpcode() == VAArg;
1847 }
1848 static bool classof(const Value *V) {
1849 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1850 }
1851};
1852
1853//===----------------------------------------------------------------------===//
1854// ExtractElementInst Class
1855//===----------------------------------------------------------------------===//
1856
1857/// This instruction extracts a single (scalar)
1858/// element from a VectorType value
1859///
1860class ExtractElementInst : public Instruction {
1861 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1862 Instruction *InsertBefore = nullptr);
1863 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1864 BasicBlock *InsertAtEnd);
1865
1866protected:
1867 // Note: Instruction needs to be a friend here to call cloneImpl.
1868 friend class Instruction;
1869
1870 ExtractElementInst *cloneImpl() const;
1871
1872public:
1873 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1874 const Twine &NameStr = "",
1875 Instruction *InsertBefore = nullptr) {
1876 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1877 }
1878
1879 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1880 const Twine &NameStr,
1881 BasicBlock *InsertAtEnd) {
1882 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1883 }
1884
1885 /// Return true if an extractelement instruction can be
1886 /// formed with the specified operands.
1887 static bool isValidOperands(const Value *Vec, const Value *Idx);
1888
1889 Value *getVectorOperand() { return Op<0>(); }
1890 Value *getIndexOperand() { return Op<1>(); }
1891 const Value *getVectorOperand() const { return Op<0>(); }
1892 const Value *getIndexOperand() const { return Op<1>(); }
1893
1894 VectorType *getVectorOperandType() const {
1895 return cast<VectorType>(getVectorOperand()->getType());
1896 }
1897
1898 /// Transparently provide more efficient getOperand methods.
1899 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1900
1901 // Methods for support type inquiry through isa, cast, and dyn_cast:
1902 static bool classof(const Instruction *I) {
1903 return I->getOpcode() == Instruction::ExtractElement;
1904 }
1905 static bool classof(const Value *V) {
1906 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1907 }
1908};
1909
1910template <>
1911struct OperandTraits<ExtractElementInst> :
1912 public FixedNumOperandTraits<ExtractElementInst, 2> {
1913};
1914
1915DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin(
) { return OperandTraits<ExtractElementInst>::op_begin(
this); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_begin() const { return OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this)); }
ExtractElementInst::op_iterator ExtractElementInst::op_end()
{ return OperandTraits<ExtractElementInst>::op_end(this
); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_end() const { return OperandTraits<ExtractElementInst
>::op_end(const_cast<ExtractElementInst*>(this)); } Value
*ExtractElementInst::getOperand(unsigned i_nocapture) const {
((i_nocapture < OperandTraits<ExtractElementInst>::
operands(this) && "getOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 1915, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<ExtractElementInst>::op_begin(const_cast
<ExtractElementInst*>(this))[i_nocapture].get()); } void
ExtractElementInst::setOperand(unsigned i_nocapture, Value *
Val_nocapture) { ((i_nocapture < OperandTraits<ExtractElementInst
>::operands(this) && "setOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 1915, __PRETTY_FUNCTION__)); OperandTraits<ExtractElementInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
ExtractElementInst::getNumOperands() const { return OperandTraits
<ExtractElementInst>::operands(this); } template <int
Idx_nocapture> Use &ExtractElementInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &ExtractElementInst::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
1916
1917//===----------------------------------------------------------------------===//
1918// InsertElementInst Class
1919//===----------------------------------------------------------------------===//
1920
1921/// This instruction inserts a single (scalar)
1922/// element into a VectorType value
1923///
1924class InsertElementInst : public Instruction {
1925 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1926 const Twine &NameStr = "",
1927 Instruction *InsertBefore = nullptr);
1928 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1929 BasicBlock *InsertAtEnd);
1930
1931protected:
1932 // Note: Instruction needs to be a friend here to call cloneImpl.
1933 friend class Instruction;
1934
1935 InsertElementInst *cloneImpl() const;
1936
1937public:
1938 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1939 const Twine &NameStr = "",
1940 Instruction *InsertBefore = nullptr) {
1941 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1942 }
1943
1944 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1945 const Twine &NameStr,
1946 BasicBlock *InsertAtEnd) {
1947 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1948 }
1949
1950 /// Return true if an insertelement instruction can be
1951 /// formed with the specified operands.
1952 static bool isValidOperands(const Value *Vec, const Value *NewElt,
1953 const Value *Idx);
1954
1955 /// Overload to return most specific vector type.
1956 ///
1957 VectorType *getType() const {
1958 return cast<VectorType>(Instruction::getType());
1959 }
1960
1961 /// Transparently provide more efficient getOperand methods.
1962 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1963
1964 // Methods for support type inquiry through isa, cast, and dyn_cast:
1965 static bool classof(const Instruction *I) {
1966 return I->getOpcode() == Instruction::InsertElement;
1967 }
1968 static bool classof(const Value *V) {
1969 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1970 }
1971};
1972
1973template <>
1974struct OperandTraits<InsertElementInst> :
1975 public FixedNumOperandTraits<InsertElementInst, 3> {
1976};
1977
1978DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() {
return OperandTraits<InsertElementInst>::op_begin(this
); } InsertElementInst::const_op_iterator InsertElementInst::
op_begin() const { return OperandTraits<InsertElementInst>
::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst
::op_iterator InsertElementInst::op_end() { return OperandTraits
<InsertElementInst>::op_end(this); } InsertElementInst::
const_op_iterator InsertElementInst::op_end() const { return OperandTraits
<InsertElementInst>::op_end(const_cast<InsertElementInst
*>(this)); } Value *InsertElementInst::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<InsertElementInst
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 1978, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<InsertElementInst>::op_begin(const_cast
<InsertElementInst*>(this))[i_nocapture].get()); } void
InsertElementInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((i_nocapture < OperandTraits<InsertElementInst>
::operands(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 1978, __PRETTY_FUNCTION__)); OperandTraits<InsertElementInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
InsertElementInst::getNumOperands() const { return OperandTraits
<InsertElementInst>::operands(this); } template <int
Idx_nocapture> Use &InsertElementInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &InsertElementInst::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
1979
1980//===----------------------------------------------------------------------===//
1981// ShuffleVectorInst Class
1982//===----------------------------------------------------------------------===//
1983
1984/// This instruction constructs a fixed permutation of two
1985/// input vectors.
1986///
1987class ShuffleVectorInst : public Instruction {
1988protected:
1989 // Note: Instruction needs to be a friend here to call cloneImpl.
1990 friend class Instruction;
1991
1992 ShuffleVectorInst *cloneImpl() const;
1993
1994public:
1995 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
1996 const Twine &NameStr = "",
1997 Instruction *InsertBefor = nullptr);
1998 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
1999 const Twine &NameStr, BasicBlock *InsertAtEnd);
2000
2001 // allocate space for exactly three operands
2002 void *operator new(size_t s) {
2003 return User::operator new(s, 3);
2004 }
2005
2006 /// Swap the first 2 operands and adjust the mask to preserve the semantics
2007 /// of the instruction.
2008 void commute();
2009
2010 /// Return true if a shufflevector instruction can be
2011 /// formed with the specified operands.
2012 static bool isValidOperands(const Value *V1, const Value *V2,
2013 const Value *Mask);
2014
2015 /// Overload to return most specific vector type.
2016 ///
2017 VectorType *getType() const {
2018 return cast<VectorType>(Instruction::getType());
2019 }
2020
2021 /// Transparently provide more efficient getOperand methods.
2022 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2023
2024 Constant *getMask() const {
2025 return cast<Constant>(getOperand(2));
2026 }
2027
2028 /// Return the shuffle mask value for the specified element of the mask.
2029 /// Return -1 if the element is undef.
2030 static int getMaskValue(const Constant *Mask, unsigned Elt);
2031
2032 /// Return the shuffle mask value of this instruction for the given element
2033 /// index. Return -1 if the element is undef.
2034 int getMaskValue(unsigned Elt) const {
2035 return getMaskValue(getMask(), Elt);
2036 }
2037
2038 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2039 /// elements of the mask are returned as -1.
2040 static void getShuffleMask(const Constant *Mask,
2041 SmallVectorImpl<int> &Result);
2042
2043 /// Return the mask for this instruction as a vector of integers. Undefined
2044 /// elements of the mask are returned as -1.
2045 void getShuffleMask(SmallVectorImpl<int> &Result) const {
2046 return getShuffleMask(getMask(), Result);
2047 }
2048
2049 SmallVector<int, 16> getShuffleMask() const {
2050 SmallVector<int, 16> Mask;
2051 getShuffleMask(Mask);
2052 return Mask;
2053 }
2054
2055 /// Return true if this shuffle returns a vector with a different number of
2056 /// elements than its source vectors.
2057 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2058 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2059 bool changesLength() const {
2060 unsigned NumSourceElts = Op<0>()->getType()->getVectorNumElements();
2061 unsigned NumMaskElts = getMask()->getType()->getVectorNumElements();
2062 return NumSourceElts != NumMaskElts;
2063 }
2064
2065 /// Return true if this shuffle returns a vector with a greater number of
2066 /// elements than its source vectors.
2067 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2068 bool increasesLength() const {
2069 unsigned NumSourceElts = Op<0>()->getType()->getVectorNumElements();
2070 unsigned NumMaskElts = getMask()->getType()->getVectorNumElements();
2071 return NumSourceElts < NumMaskElts;
2072 }
2073
2074 /// Return true if this shuffle mask chooses elements from exactly one source
2075 /// vector.
2076 /// Example: <7,5,undef,7>
2077 /// This assumes that vector operands are the same length as the mask.
2078 static bool isSingleSourceMask(ArrayRef<int> Mask);
2079 static bool isSingleSourceMask(const Constant *Mask) {
2080 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2080, __PRETTY_FUNCTION__))
;
2081 SmallVector<int, 16> MaskAsInts;
2082 getShuffleMask(Mask, MaskAsInts);
2083 return isSingleSourceMask(MaskAsInts);
2084 }
2085
2086 /// Return true if this shuffle chooses elements from exactly one source
2087 /// vector without changing the length of that vector.
2088 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2089 /// TODO: Optionally allow length-changing shuffles.
2090 bool isSingleSource() const {
2091 return !changesLength() && isSingleSourceMask(getMask());
2092 }
2093
2094 /// Return true if this shuffle mask chooses elements from exactly one source
2095 /// vector without lane crossings. A shuffle using this mask is not
2096 /// necessarily a no-op because it may change the number of elements from its
2097 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2098 /// Example: <undef,undef,2,3>
2099 static bool isIdentityMask(ArrayRef<int> Mask);
2100 static bool isIdentityMask(const Constant *Mask) {
2101 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2101, __PRETTY_FUNCTION__))
;
2102 SmallVector<int, 16> MaskAsInts;
2103 getShuffleMask(Mask, MaskAsInts);
2104 return isIdentityMask(MaskAsInts);
2105 }
2106
2107 /// Return true if this shuffle chooses elements from exactly one source
2108 /// vector without lane crossings and does not change the number of elements
2109 /// from its input vectors.
2110 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2111 bool isIdentity() const {
2112 return !changesLength() && isIdentityMask(getShuffleMask());
2113 }
2114
2115 /// Return true if this shuffle lengthens exactly one source vector with
2116 /// undefs in the high elements.
2117 bool isIdentityWithPadding() const;
2118
2119 /// Return true if this shuffle extracts the first N elements of exactly one
2120 /// source vector.
2121 bool isIdentityWithExtract() const;
2122
2123 /// Return true if this shuffle concatenates its 2 source vectors. This
2124 /// returns false if either input is undefined. In that case, the shuffle is
2125 /// is better classified as an identity with padding operation.
2126 bool isConcat() const;
2127
2128 /// Return true if this shuffle mask chooses elements from its source vectors
2129 /// without lane crossings. A shuffle using this mask would be
2130 /// equivalent to a vector select with a constant condition operand.
2131 /// Example: <4,1,6,undef>
2132 /// This returns false if the mask does not choose from both input vectors.
2133 /// In that case, the shuffle is better classified as an identity shuffle.
2134 /// This assumes that vector operands are the same length as the mask
2135 /// (a length-changing shuffle can never be equivalent to a vector select).
2136 static bool isSelectMask(ArrayRef<int> Mask);
2137 static bool isSelectMask(const Constant *Mask) {
2138 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2138, __PRETTY_FUNCTION__))
;
2139 SmallVector<int, 16> MaskAsInts;
2140 getShuffleMask(Mask, MaskAsInts);
2141 return isSelectMask(MaskAsInts);
2142 }
2143
2144 /// Return true if this shuffle chooses elements from its source vectors
2145 /// without lane crossings and all operands have the same number of elements.
2146 /// In other words, this shuffle is equivalent to a vector select with a
2147 /// constant condition operand.
2148 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2149 /// This returns false if the mask does not choose from both input vectors.
2150 /// In that case, the shuffle is better classified as an identity shuffle.
2151 /// TODO: Optionally allow length-changing shuffles.
2152 bool isSelect() const {
2153 return !changesLength() && isSelectMask(getMask());
2154 }
2155
2156 /// Return true if this shuffle mask swaps the order of elements from exactly
2157 /// one source vector.
2158 /// Example: <7,6,undef,4>
2159 /// This assumes that vector operands are the same length as the mask.
2160 static bool isReverseMask(ArrayRef<int> Mask);
2161 static bool isReverseMask(const Constant *Mask) {
2162 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2162, __PRETTY_FUNCTION__))
;
2163 SmallVector<int, 16> MaskAsInts;
2164 getShuffleMask(Mask, MaskAsInts);
2165 return isReverseMask(MaskAsInts);
2166 }
2167
2168 /// Return true if this shuffle swaps the order of elements from exactly
2169 /// one source vector.
2170 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2171 /// TODO: Optionally allow length-changing shuffles.
2172 bool isReverse() const {
2173 return !changesLength() && isReverseMask(getMask());
2174 }
2175
2176 /// Return true if this shuffle mask chooses all elements with the same value
2177 /// as the first element of exactly one source vector.
2178 /// Example: <4,undef,undef,4>
2179 /// This assumes that vector operands are the same length as the mask.
2180 static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2181 static bool isZeroEltSplatMask(const Constant *Mask) {
2182 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2182, __PRETTY_FUNCTION__))
;
2183 SmallVector<int, 16> MaskAsInts;
2184 getShuffleMask(Mask, MaskAsInts);
2185 return isZeroEltSplatMask(MaskAsInts);
2186 }
2187
2188 /// Return true if all elements of this shuffle are the same value as the
2189 /// first element of exactly one source vector without changing the length
2190 /// of that vector.
2191 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2192 /// TODO: Optionally allow length-changing shuffles.
2193 /// TODO: Optionally allow splats from other elements.
2194 bool isZeroEltSplat() const {
2195 return !changesLength() && isZeroEltSplatMask(getMask());
2196 }
2197
2198 /// Return true if this shuffle mask is a transpose mask.
2199 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2200 /// even- or odd-numbered vector elements from two n-dimensional source
2201 /// vectors and write each result into consecutive elements of an
2202 /// n-dimensional destination vector. Two shuffles are necessary to complete
2203 /// the transpose, one for the even elements and another for the odd elements.
2204 /// This description closely follows how the TRN1 and TRN2 AArch64
2205 /// instructions operate.
2206 ///
2207 /// For example, a simple 2x2 matrix can be transposed with:
2208 ///
2209 /// ; Original matrix
2210 /// m0 = < a, b >
2211 /// m1 = < c, d >
2212 ///
2213 /// ; Transposed matrix
2214 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2215 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2216 ///
2217 /// For matrices having greater than n columns, the resulting nx2 transposed
2218 /// matrix is stored in two result vectors such that one vector contains
2219 /// interleaved elements from all the even-numbered rows and the other vector
2220 /// contains interleaved elements from all the odd-numbered rows. For example,
2221 /// a 2x4 matrix can be transposed with:
2222 ///
2223 /// ; Original matrix
2224 /// m0 = < a, b, c, d >
2225 /// m1 = < e, f, g, h >
2226 ///
2227 /// ; Transposed matrix
2228 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2229 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2230 static bool isTransposeMask(ArrayRef<int> Mask);
2231 static bool isTransposeMask(const Constant *Mask) {
2232 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2232, __PRETTY_FUNCTION__))
;
2233 SmallVector<int, 16> MaskAsInts;
2234 getShuffleMask(Mask, MaskAsInts);
2235 return isTransposeMask(MaskAsInts);
2236 }
2237
2238 /// Return true if this shuffle transposes the elements of its inputs without
2239 /// changing the length of the vectors. This operation may also be known as a
2240 /// merge or interleave. See the description for isTransposeMask() for the
2241 /// exact specification.
2242 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2243 bool isTranspose() const {
2244 return !changesLength() && isTransposeMask(getMask());
2245 }
2246
2247 /// Return true if this shuffle mask is an extract subvector mask.
2248 /// A valid extract subvector mask returns a smaller vector from a single
2249 /// source operand. The base extraction index is returned as well.
2250 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2251 int &Index);
2252 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2253 int &Index) {
2254 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2254, __PRETTY_FUNCTION__))
;
2255 SmallVector<int, 16> MaskAsInts;
2256 getShuffleMask(Mask, MaskAsInts);
2257 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2258 }
2259
2260 /// Return true if this shuffle mask is an extract subvector mask.
2261 bool isExtractSubvectorMask(int &Index) const {
2262 int NumSrcElts = Op<0>()->getType()->getVectorNumElements();
2263 return isExtractSubvectorMask(getMask(), NumSrcElts, Index);
2264 }
2265
2266 /// Change values in a shuffle permute mask assuming the two vector operands
2267 /// of length InVecNumElts have swapped position.
2268 static void commuteShuffleMask(MutableArrayRef<int> Mask,
2269 unsigned InVecNumElts) {
2270 for (int &Idx : Mask) {
2271 if (Idx == -1)
2272 continue;
2273 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2274 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&((Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
"shufflevector mask index out of range") ? static_cast<void
> (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2275, __PRETTY_FUNCTION__))
2275 "shufflevector mask index out of range")((Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
"shufflevector mask index out of range") ? static_cast<void
> (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2275, __PRETTY_FUNCTION__))
;
2276 }
2277 }
2278
2279 // Methods for support type inquiry through isa, cast, and dyn_cast:
2280 static bool classof(const Instruction *I) {
2281 return I->getOpcode() == Instruction::ShuffleVector;
2282 }
2283 static bool classof(const Value *V) {
2284 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2285 }
2286};
2287
2288template <>
2289struct OperandTraits<ShuffleVectorInst> :
2290 public FixedNumOperandTraits<ShuffleVectorInst, 3> {
2291};
2292
2293DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() {
return OperandTraits<ShuffleVectorInst>::op_begin(this
); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst::
op_begin() const { return OperandTraits<ShuffleVectorInst>
::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst
::op_iterator ShuffleVectorInst::op_end() { return OperandTraits
<ShuffleVectorInst>::op_end(this); } ShuffleVectorInst::
const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits
<ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst
*>(this)); } Value *ShuffleVectorInst::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<ShuffleVectorInst
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2293, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<ShuffleVectorInst>::op_begin(const_cast
<ShuffleVectorInst*>(this))[i_nocapture].get()); } void
ShuffleVectorInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((i_nocapture < OperandTraits<ShuffleVectorInst>
::operands(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2293, __PRETTY_FUNCTION__)); OperandTraits<ShuffleVectorInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
ShuffleVectorInst::getNumOperands() const { return OperandTraits
<ShuffleVectorInst>::operands(this); } template <int
Idx_nocapture> Use &ShuffleVectorInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &ShuffleVectorInst::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
2294
2295//===----------------------------------------------------------------------===//
2296// ExtractValueInst Class
2297//===----------------------------------------------------------------------===//
2298
2299/// This instruction extracts a struct member or array
2300/// element value from an aggregate value.
2301///
2302class ExtractValueInst : public UnaryInstruction {
2303 SmallVector<unsigned, 4> Indices;
2304
2305 ExtractValueInst(const ExtractValueInst &EVI);
2306
2307 /// Constructors - Create a extractvalue instruction with a base aggregate
2308 /// value and a list of indices. The first ctor can optionally insert before
2309 /// an existing instruction, the second appends the new instruction to the
2310 /// specified BasicBlock.
2311 inline ExtractValueInst(Value *Agg,
2312 ArrayRef<unsigned> Idxs,
2313 const Twine &NameStr,
2314 Instruction *InsertBefore);
2315 inline ExtractValueInst(Value *Agg,
2316 ArrayRef<unsigned> Idxs,
2317 const Twine &NameStr, BasicBlock *InsertAtEnd);
2318
2319 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2320
2321protected:
2322 // Note: Instruction needs to be a friend here to call cloneImpl.
2323 friend class Instruction;
2324
2325 ExtractValueInst *cloneImpl() const;
2326
2327public:
2328 static ExtractValueInst *Create(Value *Agg,
2329 ArrayRef<unsigned> Idxs,
2330 const Twine &NameStr = "",
2331 Instruction *InsertBefore = nullptr) {
2332 return new
2333 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2334 }
2335
2336 static ExtractValueInst *Create(Value *Agg,
2337 ArrayRef<unsigned> Idxs,
2338 const Twine &NameStr,
2339 BasicBlock *InsertAtEnd) {
2340 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2341 }
2342
2343 /// Returns the type of the element that would be extracted
2344 /// with an extractvalue instruction with the specified parameters.
2345 ///
2346 /// Null is returned if the indices are invalid for the specified type.
2347 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2348
2349 using idx_iterator = const unsigned*;
2350
2351 inline idx_iterator idx_begin() const { return Indices.begin(); }
2352 inline idx_iterator idx_end() const { return Indices.end(); }
2353 inline iterator_range<idx_iterator> indices() const {
2354 return make_range(idx_begin(), idx_end());
2355 }
2356
2357 Value *getAggregateOperand() {
2358 return getOperand(0);
2359 }
2360 const Value *getAggregateOperand() const {
2361 return getOperand(0);
2362 }
2363 static unsigned getAggregateOperandIndex() {
2364 return 0U; // get index for modifying correct operand
2365 }
2366
2367 ArrayRef<unsigned> getIndices() const {
2368 return Indices;
2369 }
2370
2371 unsigned getNumIndices() const {
2372 return (unsigned)Indices.size();
2373 }
2374
2375 bool hasIndices() const {
2376 return true;
2377 }
2378
2379 // Methods for support type inquiry through isa, cast, and dyn_cast:
2380 static bool classof(const Instruction *I) {
2381 return I->getOpcode() == Instruction::ExtractValue;
2382 }
2383 static bool classof(const Value *V) {
2384 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2385 }
2386};
2387
2388ExtractValueInst::ExtractValueInst(Value *Agg,
2389 ArrayRef<unsigned> Idxs,
2390 const Twine &NameStr,
2391 Instruction *InsertBefore)
2392 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2393 ExtractValue, Agg, InsertBefore) {
2394 init(Idxs, NameStr);
2395}
2396
2397ExtractValueInst::ExtractValueInst(Value *Agg,
2398 ArrayRef<unsigned> Idxs,
2399 const Twine &NameStr,
2400 BasicBlock *InsertAtEnd)
2401 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2402 ExtractValue, Agg, InsertAtEnd) {
2403 init(Idxs, NameStr);
2404}
2405
2406//===----------------------------------------------------------------------===//
2407// InsertValueInst Class
2408//===----------------------------------------------------------------------===//
2409
2410/// This instruction inserts a struct field of array element
2411/// value into an aggregate value.
2412///
2413class InsertValueInst : public Instruction {
2414 SmallVector<unsigned, 4> Indices;
2415
2416 InsertValueInst(const InsertValueInst &IVI);
2417
2418 /// Constructors - Create a insertvalue instruction with a base aggregate
2419 /// value, a value to insert, and a list of indices. The first ctor can
2420 /// optionally insert before an existing instruction, the second appends
2421 /// the new instruction to the specified BasicBlock.
2422 inline InsertValueInst(Value *Agg, Value *Val,
2423 ArrayRef<unsigned> Idxs,
2424 const Twine &NameStr,
2425 Instruction *InsertBefore);
2426 inline InsertValueInst(Value *Agg, Value *Val,
2427 ArrayRef<unsigned> Idxs,
2428 const Twine &NameStr, BasicBlock *InsertAtEnd);
2429
2430 /// Constructors - These two constructors are convenience methods because one
2431 /// and two index insertvalue instructions are so common.
2432 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2433 const Twine &NameStr = "",
2434 Instruction *InsertBefore = nullptr);
2435 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2436 BasicBlock *InsertAtEnd);
2437
2438 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2439 const Twine &NameStr);
2440
2441protected:
2442 // Note: Instruction needs to be a friend here to call cloneImpl.
2443 friend class Instruction;
2444
2445 InsertValueInst *cloneImpl() const;
2446
2447public:
2448 // allocate space for exactly two operands
2449 void *operator new(size_t s) {
2450 return User::operator new(s, 2);
2451 }
2452
2453 static InsertValueInst *Create(Value *Agg, Value *Val,
2454 ArrayRef<unsigned> Idxs,
2455 const Twine &NameStr = "",
2456 Instruction *InsertBefore = nullptr) {
2457 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2458 }
2459
2460 static InsertValueInst *Create(Value *Agg, Value *Val,
2461 ArrayRef<unsigned> Idxs,
2462 const Twine &NameStr,
2463 BasicBlock *InsertAtEnd) {
2464 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2465 }
2466
2467 /// Transparently provide more efficient getOperand methods.
2468 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2469
2470 using idx_iterator = const unsigned*;
2471
2472 inline idx_iterator idx_begin() const { return Indices.begin(); }
2473 inline idx_iterator idx_end() const { return Indices.end(); }
2474 inline iterator_range<idx_iterator> indices() const {
2475 return make_range(idx_begin(), idx_end());
2476 }
2477
2478 Value *getAggregateOperand() {
2479 return getOperand(0);
2480 }
2481 const Value *getAggregateOperand() const {
2482 return getOperand(0);
2483 }
2484 static unsigned getAggregateOperandIndex() {
2485 return 0U; // get index for modifying correct operand
2486 }
2487
2488 Value *getInsertedValueOperand() {
2489 return getOperand(1);
2490 }
2491 const Value *getInsertedValueOperand() const {
2492 return getOperand(1);
2493 }
2494 static unsigned getInsertedValueOperandIndex() {
2495 return 1U; // get index for modifying correct operand
2496 }
2497
2498 ArrayRef<unsigned> getIndices() const {
2499 return Indices;
2500 }
2501
2502 unsigned getNumIndices() const {
2503 return (unsigned)Indices.size();
2504 }
2505
2506 bool hasIndices() const {
2507 return true;
2508 }
2509
2510 // Methods for support type inquiry through isa, cast, and dyn_cast:
2511 static bool classof(const Instruction *I) {
2512 return I->getOpcode() == Instruction::InsertValue;
2513 }
2514 static bool classof(const Value *V) {
2515 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2516 }
2517};
2518
2519template <>
2520struct OperandTraits<InsertValueInst> :
2521 public FixedNumOperandTraits<InsertValueInst, 2> {
2522};
2523
2524InsertValueInst::InsertValueInst(Value *Agg,
2525 Value *Val,
2526 ArrayRef<unsigned> Idxs,
2527 const Twine &NameStr,
2528 Instruction *InsertBefore)
2529 : Instruction(Agg->getType(), InsertValue,
2530 OperandTraits<InsertValueInst>::op_begin(this),
2531 2, InsertBefore) {
2532 init(Agg, Val, Idxs, NameStr);
2533}
2534
2535InsertValueInst::InsertValueInst(Value *Agg,
2536 Value *Val,
2537 ArrayRef<unsigned> Idxs,
2538 const Twine &NameStr,
2539 BasicBlock *InsertAtEnd)
2540 : Instruction(Agg->getType(), InsertValue,
2541 OperandTraits<InsertValueInst>::op_begin(this),
2542 2, InsertAtEnd) {
2543 init(Agg, Val, Idxs, NameStr);
2544}
2545
2546DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return
OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst
::const_op_iterator InsertValueInst::op_begin() const { return
OperandTraits<InsertValueInst>::op_begin(const_cast<
InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst
::op_end() { return OperandTraits<InsertValueInst>::op_end
(this); } InsertValueInst::const_op_iterator InsertValueInst::
op_end() const { return OperandTraits<InsertValueInst>::
op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<InsertValueInst>::operands(this) &&
"getOperand() out of range!") ? static_cast<void> (0) :
__assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2546, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<InsertValueInst>::op_begin(const_cast<
InsertValueInst*>(this))[i_nocapture].get()); } void InsertValueInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<InsertValueInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2546, __PRETTY_FUNCTION__)); OperandTraits<InsertValueInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
InsertValueInst::getNumOperands() const { return OperandTraits
<InsertValueInst>::operands(this); } template <int Idx_nocapture
> Use &InsertValueInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &InsertValueInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
2547
2548//===----------------------------------------------------------------------===//
2549// PHINode Class
2550//===----------------------------------------------------------------------===//
2551
2552// PHINode - The PHINode class is used to represent the magical mystical PHI
2553// node, that can not exist in nature, but can be synthesized in a computer
2554// scientist's overactive imagination.
2555//
2556class PHINode : public Instruction {
2557 /// The number of operands actually allocated. NumOperands is
2558 /// the number actually in use.
2559 unsigned ReservedSpace;
2560
2561 PHINode(const PHINode &PN);
2562
2563 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2564 const Twine &NameStr = "",
2565 Instruction *InsertBefore = nullptr)
2566 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2567 ReservedSpace(NumReservedValues) {
2568 setName(NameStr);
2569 allocHungoffUses(ReservedSpace);
2570 }
2571
2572 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2573 BasicBlock *InsertAtEnd)
2574 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2575 ReservedSpace(NumReservedValues) {
2576 setName(NameStr);
2577 allocHungoffUses(ReservedSpace);
2578 }
2579
2580protected:
2581 // Note: Instruction needs to be a friend here to call cloneImpl.
2582 friend class Instruction;
2583
2584 PHINode *cloneImpl() const;
2585
2586 // allocHungoffUses - this is more complicated than the generic
2587 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2588 // values and pointers to the incoming blocks, all in one allocation.
2589 void allocHungoffUses(unsigned N) {
2590 User::allocHungoffUses(N, /* IsPhi */ true);
2591 }
2592
2593public:
2594 /// Constructors - NumReservedValues is a hint for the number of incoming
2595 /// edges that this phi node will have (use 0 if you really have no idea).
2596 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2597 const Twine &NameStr = "",
2598 Instruction *InsertBefore = nullptr) {
2599 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2600 }
2601
2602 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2603 const Twine &NameStr, BasicBlock *InsertAtEnd) {
2604 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2605 }
2606
2607 /// Provide fast operand accessors
2608 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2609
2610 // Block iterator interface. This provides access to the list of incoming
2611 // basic blocks, which parallels the list of incoming values.
2612
2613 using block_iterator = BasicBlock **;
2614 using const_block_iterator = BasicBlock * const *;
2615
2616 block_iterator block_begin() {
2617 Use::UserRef *ref =
2618 reinterpret_cast<Use::UserRef*>(op_begin() + ReservedSpace);
2619 return reinterpret_cast<block_iterator>(ref + 1);
2620 }
2621
2622 const_block_iterator block_begin() const {
2623 const Use::UserRef *ref =
2624 reinterpret_cast<const Use::UserRef*>(op_begin() + ReservedSpace);
2625 return reinterpret_cast<const_block_iterator>(ref + 1);
2626 }
2627
2628 block_iterator block_end() {
2629 return block_begin() + getNumOperands();
2630 }
2631
2632 const_block_iterator block_end() const {
2633 return block_begin() + getNumOperands();
2634 }
2635
2636 iterator_range<block_iterator> blocks() {
2637 return make_range(block_begin(), block_end());
2638 }
2639
2640 iterator_range<const_block_iterator> blocks() const {
2641 return make_range(block_begin(), block_end());
2642 }
2643
2644 op_range incoming_values() { return operands(); }
2645
2646 const_op_range incoming_values() const { return operands(); }
2647
2648 /// Return the number of incoming edges
2649 ///
2650 unsigned getNumIncomingValues() const { return getNumOperands(); }
2651
2652 /// Return incoming value number x
2653 ///
2654 Value *getIncomingValue(unsigned i) const {
2655 return getOperand(i);
2656 }
2657 void setIncomingValue(unsigned i, Value *V) {
2658 assert(V && "PHI node got a null value!")((V && "PHI node got a null value!") ? static_cast<
void> (0) : __assert_fail ("V && \"PHI node got a null value!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2658, __PRETTY_FUNCTION__))
;
2659 assert(getType() == V->getType() &&((getType() == V->getType() && "All operands to PHI node must be the same type as the PHI node!"
) ? static_cast<void> (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2660, __PRETTY_FUNCTION__))
2660 "All operands to PHI node must be the same type as the PHI node!")((getType() == V->getType() && "All operands to PHI node must be the same type as the PHI node!"
) ? static_cast<void> (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2660, __PRETTY_FUNCTION__))
;
2661 setOperand(i, V);
2662 }
2663
2664 static unsigned getOperandNumForIncomingValue(unsigned i) {
2665 return i;
2666 }
2667
2668 static unsigned getIncomingValueNumForOperand(unsigned i) {
2669 return i;
2670 }
2671
2672 /// Return incoming basic block number @p i.
2673 ///
2674 BasicBlock *getIncomingBlock(unsigned i) const {
2675 return block_begin()[i];
2676 }
2677
2678 /// Return incoming basic block corresponding
2679 /// to an operand of the PHI.
2680 ///
2681 BasicBlock *getIncomingBlock(const Use &U) const {
2682 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")((this == U.getUser() && "Iterator doesn't point to PHI's Uses?"
) ? static_cast<void> (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2682, __PRETTY_FUNCTION__))
;
2683 return getIncomingBlock(unsigned(&U - op_begin()));
2684 }
2685
2686 /// Return incoming basic block corresponding
2687 /// to value use iterator.
2688 ///
2689 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
2690 return getIncomingBlock(I.getUse());
2691 }
2692
2693 void setIncomingBlock(unsigned i, BasicBlock *BB) {
2694 assert(BB && "PHI node got a null basic block!")((BB && "PHI node got a null basic block!") ? static_cast
<void> (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2694, __PRETTY_FUNCTION__))
;
2695 block_begin()[i] = BB;
2696 }
2697
2698 /// Replace every incoming basic block \p Old to basic block \p New.
2699 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) {
2700 assert(New && Old && "PHI node got a null basic block!")((New && Old && "PHI node got a null basic block!"
) ? static_cast<void> (0) : __assert_fail ("New && Old && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2700, __PRETTY_FUNCTION__))
;
2701 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2702 if (getIncomingBlock(Op) == Old)
2703 setIncomingBlock(Op, New);
2704 }
2705
2706 /// Add an incoming value to the end of the PHI list
2707 ///
2708 void addIncoming(Value *V, BasicBlock *BB) {
2709 if (getNumOperands() == ReservedSpace)
2710 growOperands(); // Get more space!
2711 // Initialize some new operands.
2712 setNumHungOffUseOperands(getNumOperands() + 1);
2713 setIncomingValue(getNumOperands() - 1, V);
2714 setIncomingBlock(getNumOperands() - 1, BB);
2715 }
2716
2717 /// Remove an incoming value. This is useful if a
2718 /// predecessor basic block is deleted. The value removed is returned.
2719 ///
2720 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2721 /// is true), the PHI node is destroyed and any uses of it are replaced with
2722 /// dummy values. The only time there should be zero incoming values to a PHI
2723 /// node is when the block is dead, so this strategy is sound.
2724 ///
2725 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2726
2727 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2728 int Idx = getBasicBlockIndex(BB);
2729 assert(Idx >= 0 && "Invalid basic block argument to remove!")((Idx >= 0 && "Invalid basic block argument to remove!"
) ? static_cast<void> (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument to remove!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2729, __PRETTY_FUNCTION__))
;
2730 return removeIncomingValue(Idx, DeletePHIIfEmpty);
2731 }
2732
2733 /// Return the first index of the specified basic
2734 /// block in the value list for this PHI. Returns -1 if no instance.
2735 ///
2736 int getBasicBlockIndex(const BasicBlock *BB) const {
2737 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2738 if (block_begin()[i] == BB)
2739 return i;
2740 return -1;
2741 }
2742
2743 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
2744 int Idx = getBasicBlockIndex(BB);
2745 assert(Idx >= 0 && "Invalid basic block argument!")((Idx >= 0 && "Invalid basic block argument!") ? static_cast
<void> (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2745, __PRETTY_FUNCTION__))
;
2746 return getIncomingValue(Idx);
2747 }
2748
2749 /// Set every incoming value(s) for block \p BB to \p V.
2750 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) {
2751 assert(BB && "PHI node got a null basic block!")((BB && "PHI node got a null basic block!") ? static_cast
<void> (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2751, __PRETTY_FUNCTION__))
;
2752 bool Found = false;
2753 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2754 if (getIncomingBlock(Op) == BB) {
2755 Found = true;
2756 setIncomingValue(Op, V);
2757 }
2758 (void)Found;
2759 assert(Found && "Invalid basic block argument to set!")((Found && "Invalid basic block argument to set!") ? static_cast
<void> (0) : __assert_fail ("Found && \"Invalid basic block argument to set!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2759, __PRETTY_FUNCTION__))
;
2760 }
2761
2762 /// If the specified PHI node always merges together the
2763 /// same value, return the value, otherwise return null.
2764 Value *hasConstantValue() const;
2765
2766 /// Whether the specified PHI node always merges
2767 /// together the same value, assuming undefs are equal to a unique
2768 /// non-undef value.
2769 bool hasConstantOrUndefValue() const;
2770
2771 /// Methods for support type inquiry through isa, cast, and dyn_cast:
2772 static bool classof(const Instruction *I) {
2773 return I->getOpcode() == Instruction::PHI;
2774 }
2775 static bool classof(const Value *V) {
2776 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2777 }
2778
2779private:
2780 void growOperands();
2781};
2782
2783template <>
2784struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
2785};
2786
2787DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits
<PHINode>::op_begin(this); } PHINode::const_op_iterator
PHINode::op_begin() const { return OperandTraits<PHINode>
::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator
PHINode::op_end() { return OperandTraits<PHINode>::op_end
(this); } PHINode::const_op_iterator PHINode::op_end() const {
return OperandTraits<PHINode>::op_end(const_cast<PHINode
*>(this)); } Value *PHINode::getOperand(unsigned i_nocapture
) const { ((i_nocapture < OperandTraits<PHINode>::operands
(this) && "getOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2787, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<PHINode>::op_begin(const_cast<PHINode
*>(this))[i_nocapture].get()); } void PHINode::setOperand(
unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<PHINode>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2787, __PRETTY_FUNCTION__)); OperandTraits<PHINode>::
op_begin(this)[i_nocapture] = Val_nocapture; } unsigned PHINode
::getNumOperands() const { return OperandTraits<PHINode>
::operands(this); } template <int Idx_nocapture> Use &
PHINode::Op() { return this->OpFrom<Idx_nocapture>(this
); } template <int Idx_nocapture> const Use &PHINode
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2788
2789//===----------------------------------------------------------------------===//
2790// LandingPadInst Class
2791//===----------------------------------------------------------------------===//
2792
2793//===---------------------------------------------------------------------------
2794/// The landingpad instruction holds all of the information
2795/// necessary to generate correct exception handling. The landingpad instruction
2796/// cannot be moved from the top of a landing pad block, which itself is
2797/// accessible only from the 'unwind' edge of an invoke. This uses the
2798/// SubclassData field in Value to store whether or not the landingpad is a
2799/// cleanup.
2800///
2801class LandingPadInst : public Instruction {
2802 /// The number of operands actually allocated. NumOperands is
2803 /// the number actually in use.
2804 unsigned ReservedSpace;
2805
2806 LandingPadInst(const LandingPadInst &LP);
2807
2808public:
2809 enum ClauseType { Catch, Filter };
2810
2811private:
2812 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2813 const Twine &NameStr, Instruction *InsertBefore);
2814 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2815 const Twine &NameStr, BasicBlock *InsertAtEnd);
2816
2817 // Allocate space for exactly zero operands.
2818 void *operator new(size_t s) {
2819 return User::operator new(s);
2820 }
2821
2822 void growOperands(unsigned Size);
2823 void init(unsigned NumReservedValues, const Twine &NameStr);
2824
2825protected:
2826 // Note: Instruction needs to be a friend here to call cloneImpl.
2827 friend class Instruction;
2828
2829 LandingPadInst *cloneImpl() const;
2830
2831public:
2832 /// Constructors - NumReservedClauses is a hint for the number of incoming
2833 /// clauses that this landingpad will have (use 0 if you really have no idea).
2834 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2835 const Twine &NameStr = "",
2836 Instruction *InsertBefore = nullptr);
2837 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2838 const Twine &NameStr, BasicBlock *InsertAtEnd);
2839
2840 /// Provide fast operand accessors
2841 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2842
2843 /// Return 'true' if this landingpad instruction is a
2844 /// cleanup. I.e., it should be run when unwinding even if its landing pad
2845 /// doesn't catch the exception.
2846 bool isCleanup() const { return getSubclassDataFromInstruction() & 1; }
2847
2848 /// Indicate that this landingpad instruction is a cleanup.
2849 void setCleanup(bool V) {
2850 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
2851 (V ? 1 : 0));
2852 }
2853
2854 /// Add a catch or filter clause to the landing pad.
2855 void addClause(Constant *ClauseVal);
2856
2857 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
2858 /// determine what type of clause this is.
2859 Constant *getClause(unsigned Idx) const {
2860 return cast<Constant>(getOperandList()[Idx]);
2861 }
2862
2863 /// Return 'true' if the clause and index Idx is a catch clause.
2864 bool isCatch(unsigned Idx) const {
2865 return !isa<ArrayType>(getOperandList()[Idx]->getType());
2866 }
2867
2868 /// Return 'true' if the clause and index Idx is a filter clause.
2869 bool isFilter(unsigned Idx) const {
2870 return isa<ArrayType>(getOperandList()[Idx]->getType());
2871 }
2872
2873 /// Get the number of clauses for this landing pad.
2874 unsigned getNumClauses() const { return getNumOperands(); }
2875
2876 /// Grow the size of the operand list to accommodate the new
2877 /// number of clauses.
2878 void reserveClauses(unsigned Size) { growOperands(Size); }
2879
2880 // Methods for support type inquiry through isa, cast, and dyn_cast:
2881 static bool classof(const Instruction *I) {
2882 return I->getOpcode() == Instruction::LandingPad;
2883 }
2884 static bool classof(const Value *V) {
2885 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2886 }
2887};
2888
2889template <>
2890struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
2891};
2892
2893DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return
OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst
::const_op_iterator LandingPadInst::op_begin() const { return
OperandTraits<LandingPadInst>::op_begin(const_cast<
LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst
::op_end() { return OperandTraits<LandingPadInst>::op_end
(this); } LandingPadInst::const_op_iterator LandingPadInst::op_end
() const { return OperandTraits<LandingPadInst>::op_end
(const_cast<LandingPadInst*>(this)); } Value *LandingPadInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<LandingPadInst>::operands(this) &&
"getOperand() out of range!") ? static_cast<void> (0) :
__assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2893, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<LandingPadInst>::op_begin(const_cast<
LandingPadInst*>(this))[i_nocapture].get()); } void LandingPadInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<LandingPadInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2893, __PRETTY_FUNCTION__)); OperandTraits<LandingPadInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
LandingPadInst::getNumOperands() const { return OperandTraits
<LandingPadInst>::operands(this); } template <int Idx_nocapture
> Use &LandingPadInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &LandingPadInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
2894
2895//===----------------------------------------------------------------------===//
2896// ReturnInst Class
2897//===----------------------------------------------------------------------===//
2898
2899//===---------------------------------------------------------------------------
2900/// Return a value (possibly void), from a function. Execution
2901/// does not continue in this function any longer.
2902///
2903class ReturnInst : public Instruction {
2904 ReturnInst(const ReturnInst &RI);
2905
2906private:
2907 // ReturnInst constructors:
2908 // ReturnInst() - 'ret void' instruction
2909 // ReturnInst( null) - 'ret void' instruction
2910 // ReturnInst(Value* X) - 'ret X' instruction
2911 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
2912 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
2913 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
2914 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
2915 //
2916 // NOTE: If the Value* passed is of type void then the constructor behaves as
2917 // if it was passed NULL.
2918 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
2919 Instruction *InsertBefore = nullptr);
2920 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
2921 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
2922
2923protected:
2924 // Note: Instruction needs to be a friend here to call cloneImpl.
2925 friend class Instruction;
2926
2927 ReturnInst *cloneImpl() const;
2928
2929public:
2930 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
2931 Instruction *InsertBefore = nullptr) {
2932 return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
2933 }
2934
2935 static ReturnInst* Create(LLVMContext &C, Value *retVal,
2936 BasicBlock *InsertAtEnd) {
2937 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
2938 }
2939
2940 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
2941 return new(0) ReturnInst(C, InsertAtEnd);
2942 }
2943
2944 /// Provide fast operand accessors
2945 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2946
2947 /// Convenience accessor. Returns null if there is no return value.
2948 Value *getReturnValue() const {
2949 return getNumOperands() != 0 ? getOperand(0) : nullptr;
2950 }
2951
2952 unsigned getNumSuccessors() const { return 0; }
2953
2954 // Methods for support type inquiry through isa, cast, and dyn_cast:
2955 static bool classof(const Instruction *I) {
2956 return (I->getOpcode() == Instruction::Ret);
2957 }
2958 static bool classof(const Value *V) {
2959 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2960 }
2961
2962private:
2963 BasicBlock *getSuccessor(unsigned idx) const {
2964 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2964)
;
2965 }
2966
2967 void setSuccessor(unsigned idx, BasicBlock *B) {
2968 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2968)
;
2969 }
2970};
2971
2972template <>
2973struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
2974};
2975
2976DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits
<ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator
ReturnInst::op_begin() const { return OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst
::op_iterator ReturnInst::op_end() { return OperandTraits<
ReturnInst>::op_end(this); } ReturnInst::const_op_iterator
ReturnInst::op_end() const { return OperandTraits<ReturnInst
>::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<ReturnInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2976, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<ReturnInst>::op_begin(const_cast<ReturnInst
*>(this))[i_nocapture].get()); } void ReturnInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<ReturnInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 2976, __PRETTY_FUNCTION__)); OperandTraits<ReturnInst>
::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned ReturnInst
::getNumOperands() const { return OperandTraits<ReturnInst
>::operands(this); } template <int Idx_nocapture> Use
&ReturnInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
ReturnInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
2977
2978//===----------------------------------------------------------------------===//
2979// BranchInst Class
2980//===----------------------------------------------------------------------===//
2981
2982//===---------------------------------------------------------------------------
2983/// Conditional or Unconditional Branch instruction.
2984///
2985class BranchInst : public Instruction {
2986 /// Ops list - Branches are strange. The operands are ordered:
2987 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
2988 /// they don't have to check for cond/uncond branchness. These are mostly
2989 /// accessed relative from op_end().
2990 BranchInst(const BranchInst &BI);
2991 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
2992 // BranchInst(BB *B) - 'br B'
2993 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
2994 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
2995 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
2996 // BranchInst(BB* B, BB *I) - 'br B' insert at end
2997 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
2998 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
2999 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3000 Instruction *InsertBefore = nullptr);
3001 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3002 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3003 BasicBlock *InsertAtEnd);
3004
3005 void AssertOK();
3006
3007protected:
3008 // Note: Instruction needs to be a friend here to call cloneImpl.
3009 friend class Instruction;
3010
3011 BranchInst *cloneImpl() const;
3012
3013public:
3014 /// Iterator type that casts an operand to a basic block.
3015 ///
3016 /// This only makes sense because the successors are stored as adjacent
3017 /// operands for branch instructions.
3018 struct succ_op_iterator
3019 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3020 std::random_access_iterator_tag, BasicBlock *,
3021 ptrdiff_t, BasicBlock *, BasicBlock *> {
3022 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3023
3024 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3025 BasicBlock *operator->() const { return operator*(); }
3026 };
3027
3028 /// The const version of `succ_op_iterator`.
3029 struct const_succ_op_iterator
3030 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3031 std::random_access_iterator_tag,
3032 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3033 const BasicBlock *> {
3034 explicit const_succ_op_iterator(const_value_op_iterator I)
3035 : iterator_adaptor_base(I) {}
3036
3037 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3038 const BasicBlock *operator->() const { return operator*(); }
3039 };
3040
3041 static BranchInst *Create(BasicBlock *IfTrue,
3042 Instruction *InsertBefore = nullptr) {
3043 return new(1) BranchInst(IfTrue, InsertBefore);
3044 }
3045
3046 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3047 Value *Cond, Instruction *InsertBefore = nullptr) {
3048 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3049 }
3050
3051 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3052 return new(1) BranchInst(IfTrue, InsertAtEnd);
3053 }
3054
3055 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3056 Value *Cond, BasicBlock *InsertAtEnd) {
3057 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3058 }
3059
3060 /// Transparently provide more efficient getOperand methods.
3061 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3062
3063 bool isUnconditional() const { return getNumOperands() == 1; }
3064 bool isConditional() const { return getNumOperands() == 3; }
3065
3066 Value *getCondition() const {
3067 assert(isConditional() && "Cannot get condition of an uncond branch!")((isConditional() && "Cannot get condition of an uncond branch!"
) ? static_cast<void> (0) : __assert_fail ("isConditional() && \"Cannot get condition of an uncond branch!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3067, __PRETTY_FUNCTION__))
;
3068 return Op<-3>();
3069 }
3070
3071 void setCondition(Value *V) {
3072 assert(isConditional() && "Cannot set condition of unconditional branch!")((isConditional() && "Cannot set condition of unconditional branch!"
) ? static_cast<void> (0) : __assert_fail ("isConditional() && \"Cannot set condition of unconditional branch!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3072, __PRETTY_FUNCTION__))
;
3073 Op<-3>() = V;
3074 }
3075
3076 unsigned getNumSuccessors() const { return 1+isConditional(); }
3077
3078 BasicBlock *getSuccessor(unsigned i) const {
3079 assert(i < getNumSuccessors() && "Successor # out of range for Branch!")((i < getNumSuccessors() && "Successor # out of range for Branch!"
) ? static_cast<void> (0) : __assert_fail ("i < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3079, __PRETTY_FUNCTION__))
;
3080 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3081 }
3082
3083 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3084 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")((idx < getNumSuccessors() && "Successor # out of range for Branch!"
) ? static_cast<void> (0) : __assert_fail ("idx < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3084, __PRETTY_FUNCTION__))
;
3085 *(&Op<-1>() - idx) = NewSucc;
3086 }
3087
3088 /// Swap the successors of this branch instruction.
3089 ///
3090 /// Swaps the successors of the branch instruction. This also swaps any
3091 /// branch weight metadata associated with the instruction so that it
3092 /// continues to map correctly to each operand.
3093 void swapSuccessors();
3094
3095 iterator_range<succ_op_iterator> successors() {
3096 return make_range(
3097 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3098 succ_op_iterator(value_op_end()));
3099 }
3100
3101 iterator_range<const_succ_op_iterator> successors() const {
3102 return make_range(const_succ_op_iterator(
3103 std::next(value_op_begin(), isConditional() ? 1 : 0)),
3104 const_succ_op_iterator(value_op_end()));
3105 }
3106
3107 // Methods for support type inquiry through isa, cast, and dyn_cast:
3108 static bool classof(const Instruction *I) {
3109 return (I->getOpcode() == Instruction::Br);
3110 }
3111 static bool classof(const Value *V) {
3112 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3113 }
3114};
3115
3116template <>
3117struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3118};
3119
3120DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits
<BranchInst>::op_begin(this); } BranchInst::const_op_iterator
BranchInst::op_begin() const { return OperandTraits<BranchInst
>::op_begin(const_cast<BranchInst*>(this)); } BranchInst
::op_iterator BranchInst::op_end() { return OperandTraits<
BranchInst>::op_end(this); } BranchInst::const_op_iterator
BranchInst::op_end() const { return OperandTraits<BranchInst
>::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<BranchInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3120, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<BranchInst>::op_begin(const_cast<BranchInst
*>(this))[i_nocapture].get()); } void BranchInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<BranchInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3120, __PRETTY_FUNCTION__)); OperandTraits<BranchInst>
::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned BranchInst
::getNumOperands() const { return OperandTraits<BranchInst
>::operands(this); } template <int Idx_nocapture> Use
&BranchInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
BranchInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3121
3122//===----------------------------------------------------------------------===//
3123// SwitchInst Class
3124//===----------------------------------------------------------------------===//
3125
3126//===---------------------------------------------------------------------------
3127/// Multiway switch
3128///
3129class SwitchInst : public Instruction {
3130 unsigned ReservedSpace;
3131
3132 // Operand[0] = Value to switch on
3133 // Operand[1] = Default basic block destination
3134 // Operand[2n ] = Value to match
3135 // Operand[2n+1] = BasicBlock to go to on match
3136 SwitchInst(const SwitchInst &SI);
3137
3138 /// Create a new switch instruction, specifying a value to switch on and a
3139 /// default destination. The number of additional cases can be specified here
3140 /// to make memory allocation more efficient. This constructor can also
3141 /// auto-insert before another instruction.
3142 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3143 Instruction *InsertBefore);
3144
3145 /// Create a new switch instruction, specifying a value to switch on and a
3146 /// default destination. The number of additional cases can be specified here
3147 /// to make memory allocation more efficient. This constructor also
3148 /// auto-inserts at the end of the specified BasicBlock.
3149 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3150 BasicBlock *InsertAtEnd);
3151
3152 // allocate space for exactly zero operands
3153 void *operator new(size_t s) {
3154 return User::operator new(s);
3155 }
3156
3157 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3158 void growOperands();
3159
3160protected:
3161 // Note: Instruction needs to be a friend here to call cloneImpl.
3162 friend class Instruction;
3163
3164 SwitchInst *cloneImpl() const;
3165
3166public:
3167 // -2
3168 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3169
3170 template <typename CaseHandleT> class CaseIteratorImpl;
3171
3172 /// A handle to a particular switch case. It exposes a convenient interface
3173 /// to both the case value and the successor block.
3174 ///
3175 /// We define this as a template and instantiate it to form both a const and
3176 /// non-const handle.
3177 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3178 class CaseHandleImpl {
3179 // Directly befriend both const and non-const iterators.
3180 friend class SwitchInst::CaseIteratorImpl<
3181 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3182
3183 protected:
3184 // Expose the switch type we're parameterized with to the iterator.
3185 using SwitchInstType = SwitchInstT;
3186
3187 SwitchInstT *SI;
3188 ptrdiff_t Index;
3189
3190 CaseHandleImpl() = default;
3191 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3192
3193 public:
3194 /// Resolves case value for current case.
3195 ConstantIntT *getCaseValue() const {
3196 assert((unsigned)Index < SI->getNumCases() &&(((unsigned)Index < SI->getNumCases() && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3197, __PRETTY_FUNCTION__))
3197 "Index out the number of cases.")(((unsigned)Index < SI->getNumCases() && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3197, __PRETTY_FUNCTION__))
;
3198 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3199 }
3200
3201 /// Resolves successor for current case.
3202 BasicBlockT *getCaseSuccessor() const {
3203 assert(((unsigned)Index < SI->getNumCases() ||((((unsigned)Index < SI->getNumCases() || (unsigned)Index
== DefaultPseudoIndex) && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3205, __PRETTY_FUNCTION__))
3204 (unsigned)Index == DefaultPseudoIndex) &&((((unsigned)Index < SI->getNumCases() || (unsigned)Index
== DefaultPseudoIndex) && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3205, __PRETTY_FUNCTION__))
3205 "Index out the number of cases.")((((unsigned)Index < SI->getNumCases() || (unsigned)Index
== DefaultPseudoIndex) && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3205, __PRETTY_FUNCTION__))
;
3206 return SI->getSuccessor(getSuccessorIndex());
3207 }
3208
3209 /// Returns number of current case.
3210 unsigned getCaseIndex() const { return Index; }
3211
3212 /// Returns successor index for current case successor.
3213 unsigned getSuccessorIndex() const {
3214 assert(((unsigned)Index == DefaultPseudoIndex ||((((unsigned)Index == DefaultPseudoIndex || (unsigned)Index <
SI->getNumCases()) && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3216, __PRETTY_FUNCTION__))
3215 (unsigned)Index < SI->getNumCases()) &&((((unsigned)Index == DefaultPseudoIndex || (unsigned)Index <
SI->getNumCases()) && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3216, __PRETTY_FUNCTION__))
3216 "Index out the number of cases.")((((unsigned)Index == DefaultPseudoIndex || (unsigned)Index <
SI->getNumCases()) && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3216, __PRETTY_FUNCTION__))
;
3217 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3218 }
3219
3220 bool operator==(const CaseHandleImpl &RHS) const {
3221 assert(SI == RHS.SI && "Incompatible operators.")((SI == RHS.SI && "Incompatible operators.") ? static_cast
<void> (0) : __assert_fail ("SI == RHS.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3221, __PRETTY_FUNCTION__))
;
3222 return Index == RHS.Index;
3223 }
3224 };
3225
3226 using ConstCaseHandle =
3227 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>;
3228
3229 class CaseHandle
3230 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3231 friend class SwitchInst::CaseIteratorImpl<CaseHandle>;
3232
3233 public:
3234 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {}
3235
3236 /// Sets the new value for current case.
3237 void setValue(ConstantInt *V) {
3238 assert((unsigned)Index < SI->getNumCases() &&(((unsigned)Index < SI->getNumCases() && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3239, __PRETTY_FUNCTION__))
3239 "Index out the number of cases.")(((unsigned)Index < SI->getNumCases() && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3239, __PRETTY_FUNCTION__))
;
3240 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3241 }
3242
3243 /// Sets the new successor for current case.
3244 void setSuccessor(BasicBlock *S) {
3245 SI->setSuccessor(getSuccessorIndex(), S);
3246 }
3247 };
3248
3249 template <typename CaseHandleT>
3250 class CaseIteratorImpl
3251 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3252 std::random_access_iterator_tag,
3253 CaseHandleT> {
3254 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3255
3256 CaseHandleT Case;
3257
3258 public:
3259 /// Default constructed iterator is in an invalid state until assigned to
3260 /// a case for a particular switch.
3261 CaseIteratorImpl() = default;
3262
3263 /// Initializes case iterator for given SwitchInst and for given
3264 /// case number.
3265 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3266
3267 /// Initializes case iterator for given SwitchInst and for given
3268 /// successor index.
3269 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI,
3270 unsigned SuccessorIndex) {
3271 assert(SuccessorIndex < SI->getNumSuccessors() &&((SuccessorIndex < SI->getNumSuccessors() && "Successor index # out of range!"
) ? static_cast<void> (0) : __assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3272, __PRETTY_FUNCTION__))
3272 "Successor index # out of range!")((SuccessorIndex < SI->getNumSuccessors() && "Successor index # out of range!"
) ? static_cast<void> (0) : __assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3272, __PRETTY_FUNCTION__))
;
3273 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3274 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3275 }
3276
3277 /// Support converting to the const variant. This will be a no-op for const
3278 /// variant.
3279 operator CaseIteratorImpl<ConstCaseHandle>() const {
3280 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3281 }
3282
3283 CaseIteratorImpl &operator+=(ptrdiff_t N) {
3284 // Check index correctness after addition.
3285 // Note: Index == getNumCases() means end().
3286 assert(Case.Index + N >= 0 &&((Case.Index + N >= 0 && (unsigned)(Case.Index + N
) <= Case.SI->getNumCases() && "Case.Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3288, __PRETTY_FUNCTION__))
3287 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&((Case.Index + N >= 0 && (unsigned)(Case.Index + N
) <= Case.SI->getNumCases() && "Case.Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3288, __PRETTY_FUNCTION__))
3288 "Case.Index out the number of cases.")((Case.Index + N >= 0 && (unsigned)(Case.Index + N
) <= Case.SI->getNumCases() && "Case.Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3288, __PRETTY_FUNCTION__))
;
3289 Case.Index += N;
3290 return *this;
3291 }
3292 CaseIteratorImpl &operator-=(ptrdiff_t N) {
3293 // Check index correctness after subtraction.
3294 // Note: Case.Index == getNumCases() means end().
3295 assert(Case.Index - N >= 0 &&((Case.Index - N >= 0 && (unsigned)(Case.Index - N
) <= Case.SI->getNumCases() && "Case.Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3297, __PRETTY_FUNCTION__))
3296 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&((Case.Index - N >= 0 && (unsigned)(Case.Index - N
) <= Case.SI->getNumCases() && "Case.Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3297, __PRETTY_FUNCTION__))
3297 "Case.Index out the number of cases.")((Case.Index - N >= 0 && (unsigned)(Case.Index - N
) <= Case.SI->getNumCases() && "Case.Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3297, __PRETTY_FUNCTION__))
;
3298 Case.Index -= N;
3299 return *this;
3300 }
3301 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const {
3302 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")((Case.SI == RHS.Case.SI && "Incompatible operators."
) ? static_cast<void> (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3302, __PRETTY_FUNCTION__))
;
3303 return Case.Index - RHS.Case.Index;
3304 }
3305 bool operator==(const CaseIteratorImpl &RHS) const {
3306 return Case == RHS.Case;
3307 }
3308 bool operator<(const CaseIteratorImpl &RHS) const {
3309 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")((Case.SI == RHS.Case.SI && "Incompatible operators."
) ? static_cast<void> (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3309, __PRETTY_FUNCTION__))
;
3310 return Case.Index < RHS.Case.Index;
3311 }
3312 CaseHandleT &operator*() { return Case; }
3313 const CaseHandleT &operator*() const { return Case; }
3314 };
3315
3316 using CaseIt = CaseIteratorImpl<CaseHandle>;
3317 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>;
3318
3319 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3320 unsigned NumCases,
3321 Instruction *InsertBefore = nullptr) {
3322 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3323 }
3324
3325 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3326 unsigned NumCases, BasicBlock *InsertAtEnd) {
3327 return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3328 }
3329
3330 /// Provide fast operand accessors
3331 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3332
3333 // Accessor Methods for Switch stmt
3334 Value *getCondition() const { return getOperand(0); }
3335 void setCondition(Value *V) { setOperand(0, V); }
3336
3337 BasicBlock *getDefaultDest() const {
3338 return cast<BasicBlock>(getOperand(1));
3339 }
3340
3341 void setDefaultDest(BasicBlock *DefaultCase) {
3342 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3343 }
3344
3345 /// Return the number of 'cases' in this switch instruction, excluding the
3346 /// default case.
3347 unsigned getNumCases() const {
3348 return getNumOperands()/2 - 1;
3349 }
3350
3351 /// Returns a read/write iterator that points to the first case in the
3352 /// SwitchInst.
3353 CaseIt case_begin() {
3354 return CaseIt(this, 0);
3355 }
3356
3357 /// Returns a read-only iterator that points to the first case in the
3358 /// SwitchInst.
3359 ConstCaseIt case_begin() const {
3360 return ConstCaseIt(this, 0);
3361 }
3362
3363 /// Returns a read/write iterator that points one past the last in the
3364 /// SwitchInst.
3365 CaseIt case_end() {
3366 return CaseIt(this, getNumCases());
3367 }
3368
3369 /// Returns a read-only iterator that points one past the last in the
3370 /// SwitchInst.
3371 ConstCaseIt case_end() const {
3372 return ConstCaseIt(this, getNumCases());
3373 }
3374
3375 /// Iteration adapter for range-for loops.
3376 iterator_range<CaseIt> cases() {
3377 return make_range(case_begin(), case_end());
3378 }
3379
3380 /// Constant iteration adapter for range-for loops.
3381 iterator_range<ConstCaseIt> cases() const {
3382 return make_range(case_begin(), case_end());
3383 }
3384
3385 /// Returns an iterator that points to the default case.
3386 /// Note: this iterator allows to resolve successor only. Attempt
3387 /// to resolve case value causes an assertion.
3388 /// Also note, that increment and decrement also causes an assertion and
3389 /// makes iterator invalid.
3390 CaseIt case_default() {
3391 return CaseIt(this, DefaultPseudoIndex);
3392 }
3393 ConstCaseIt case_default() const {
3394 return ConstCaseIt(this, DefaultPseudoIndex);
3395 }
3396
3397 /// Search all of the case values for the specified constant. If it is
3398 /// explicitly handled, return the case iterator of it, otherwise return
3399 /// default case iterator to indicate that it is handled by the default
3400 /// handler.
3401 CaseIt findCaseValue(const ConstantInt *C) {
3402 CaseIt I = llvm::find_if(
3403 cases(), [C](CaseHandle &Case) { return Case.getCaseValue() == C; });
3404 if (I != case_end())
3405 return I;
3406
3407 return case_default();
3408 }
3409 ConstCaseIt findCaseValue(const ConstantInt *C) const {
3410 ConstCaseIt I = llvm::find_if(cases(), [C](ConstCaseHandle &Case) {
3411 return Case.getCaseValue() == C;
3412 });
3413 if (I != case_end())
3414 return I;
3415
3416 return case_default();
3417 }
3418
3419 /// Finds the unique case value for a given successor. Returns null if the
3420 /// successor is not found, not unique, or is the default case.
3421 ConstantInt *findCaseDest(BasicBlock *BB) {
3422 if (BB == getDefaultDest())
3423 return nullptr;
3424
3425 ConstantInt *CI = nullptr;
3426 for (auto Case : cases()) {
3427 if (Case.getCaseSuccessor() != BB)
3428 continue;
3429
3430 if (CI)
3431 return nullptr; // Multiple cases lead to BB.
3432
3433 CI = Case.getCaseValue();
3434 }
3435
3436 return CI;
3437 }
3438
3439 /// Add an entry to the switch instruction.
3440 /// Note:
3441 /// This action invalidates case_end(). Old case_end() iterator will
3442 /// point to the added case.
3443 void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3444
3445 /// This method removes the specified case and its successor from the switch
3446 /// instruction. Note that this operation may reorder the remaining cases at
3447 /// index idx and above.
3448 /// Note:
3449 /// This action invalidates iterators for all cases following the one removed,
3450 /// including the case_end() iterator. It returns an iterator for the next
3451 /// case.
3452 CaseIt removeCase(CaseIt I);
3453
3454 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3455 BasicBlock *getSuccessor(unsigned idx) const {
3456 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!")((idx < getNumSuccessors() &&"Successor idx out of range for switch!"
) ? static_cast<void> (0) : __assert_fail ("idx < getNumSuccessors() &&\"Successor idx out of range for switch!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3456, __PRETTY_FUNCTION__))
;
3457 return cast<BasicBlock>(getOperand(idx*2+1));
3458 }
3459 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3460 assert(idx < getNumSuccessors() && "Successor # out of range for switch!")((idx < getNumSuccessors() && "Successor # out of range for switch!"
) ? static_cast<void> (0) : __assert_fail ("idx < getNumSuccessors() && \"Successor # out of range for switch!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3460, __PRETTY_FUNCTION__))
;
3461 setOperand(idx * 2 + 1, NewSucc);
3462 }
3463
3464 // Methods for support type inquiry through isa, cast, and dyn_cast:
3465 static bool classof(const Instruction *I) {
3466 return I->getOpcode() == Instruction::Switch;
3467 }
3468 static bool classof(const Value *V) {
3469 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3470 }
3471};
3472
3473/// A wrapper class to simplify modification of SwitchInst cases along with
3474/// their prof branch_weights metadata.
3475class SwitchInstProfUpdateWrapper {
3476 SwitchInst &SI;
3477 Optional<SmallVector<uint32_t, 8> > Weights = None;
3478 bool Changed = false;
3479
3480protected:
3481 static MDNode *getProfBranchWeightsMD(const SwitchInst &SI);
3482
3483 MDNode *buildProfBranchWeightsMD();
3484
3485 void init();
3486
3487public:
3488 using CaseWeightOpt = Optional<uint32_t>;
3489 SwitchInst *operator->() { return &SI; }
3490 SwitchInst &operator*() { return SI; }
3491 operator SwitchInst *() { return &SI; }
3492
3493 SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); }
3494
3495 ~SwitchInstProfUpdateWrapper() {
3496 if (Changed)
3497 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3498 }
3499
3500 /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3501 /// correspondent branch weight.
3502 SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I);
3503
3504 /// Delegate the call to the underlying SwitchInst::addCase() and set the
3505 /// specified branch weight for the added case.
3506 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3507
3508 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3509 /// this object to not touch the underlying SwitchInst in destructor.
3510 SymbolTableList<Instruction>::iterator eraseFromParent();
3511
3512 void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3513 CaseWeightOpt getSuccessorWeight(unsigned idx);
3514
3515 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3516};
3517
3518template <>
3519struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> {
3520};
3521
3522DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)SwitchInst::op_iterator SwitchInst::op_begin() { return OperandTraits
<SwitchInst>::op_begin(this); } SwitchInst::const_op_iterator
SwitchInst::op_begin() const { return OperandTraits<SwitchInst
>::op_begin(const_cast<SwitchInst*>(this)); } SwitchInst
::op_iterator SwitchInst::op_end() { return OperandTraits<
SwitchInst>::op_end(this); } SwitchInst::const_op_iterator
SwitchInst::op_end() const { return OperandTraits<SwitchInst
>::op_end(const_cast<SwitchInst*>(this)); } Value *SwitchInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<SwitchInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3522, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<SwitchInst>::op_begin(const_cast<SwitchInst
*>(this))[i_nocapture].get()); } void SwitchInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<SwitchInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3522, __PRETTY_FUNCTION__)); OperandTraits<SwitchInst>
::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned SwitchInst
::getNumOperands() const { return OperandTraits<SwitchInst
>::operands(this); } template <int Idx_nocapture> Use
&SwitchInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SwitchInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3523
3524//===----------------------------------------------------------------------===//
3525// IndirectBrInst Class
3526//===----------------------------------------------------------------------===//
3527
3528//===---------------------------------------------------------------------------
3529/// Indirect Branch Instruction.
3530///
3531class IndirectBrInst : public Instruction {
3532 unsigned ReservedSpace;
3533
3534 // Operand[0] = Address to jump to
3535 // Operand[n+1] = n-th destination
3536 IndirectBrInst(const IndirectBrInst &IBI);
3537
3538 /// Create a new indirectbr instruction, specifying an
3539 /// Address to jump to. The number of expected destinations can be specified
3540 /// here to make memory allocation more efficient. This constructor can also
3541 /// autoinsert before another instruction.
3542 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3543
3544 /// Create a new indirectbr instruction, specifying an
3545 /// Address to jump to. The number of expected destinations can be specified
3546 /// here to make memory allocation more efficient. This constructor also
3547 /// autoinserts at the end of the specified BasicBlock.
3548 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3549
3550 // allocate space for exactly zero operands
3551 void *operator new(size_t s) {
3552 return User::operator new(s);
3553 }
3554
3555 void init(Value *Address, unsigned NumDests);
3556 void growOperands();
3557
3558protected:
3559 // Note: Instruction needs to be a friend here to call cloneImpl.
3560 friend class Instruction;
3561
3562 IndirectBrInst *cloneImpl() const;
3563
3564public:
3565 /// Iterator type that casts an operand to a basic block.
3566 ///
3567 /// This only makes sense because the successors are stored as adjacent
3568 /// operands for indirectbr instructions.
3569 struct succ_op_iterator
3570 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3571 std::random_access_iterator_tag, BasicBlock *,
3572 ptrdiff_t, BasicBlock *, BasicBlock *> {
3573 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3574
3575 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3576 BasicBlock *operator->() const { return operator*(); }
3577 };
3578
3579 /// The const version of `succ_op_iterator`.
3580 struct const_succ_op_iterator
3581 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3582 std::random_access_iterator_tag,
3583 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3584 const BasicBlock *> {
3585 explicit const_succ_op_iterator(const_value_op_iterator I)
3586 : iterator_adaptor_base(I) {}
3587
3588 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3589 const BasicBlock *operator->() const { return operator*(); }
3590 };
3591
3592 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3593 Instruction *InsertBefore = nullptr) {
3594 return new IndirectBrInst(Address, NumDests, InsertBefore);
3595 }
3596
3597 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3598 BasicBlock *InsertAtEnd) {
3599 return new IndirectBrInst(Address, NumDests, InsertAtEnd);
3600 }
3601
3602 /// Provide fast operand accessors.
3603 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3604
3605 // Accessor Methods for IndirectBrInst instruction.
3606 Value *getAddress() { return getOperand(0); }
3607 const Value *getAddress() const { return getOperand(0); }
3608 void setAddress(Value *V) { setOperand(0, V); }
3609
3610 /// return the number of possible destinations in this
3611 /// indirectbr instruction.
3612 unsigned getNumDestinations() const { return getNumOperands()-1; }
3613
3614 /// Return the specified destination.
3615 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3616 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3617
3618 /// Add a destination.
3619 ///
3620 void addDestination(BasicBlock *Dest);
3621
3622 /// This method removes the specified successor from the
3623 /// indirectbr instruction.
3624 void removeDestination(unsigned i);
3625
3626 unsigned getNumSuccessors() const { return getNumOperands()-1; }
3627 BasicBlock *getSuccessor(unsigned i) const {
3628 return cast<BasicBlock>(getOperand(i+1));
3629 }
3630 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3631 setOperand(i + 1, NewSucc);
3632 }
3633
3634 iterator_range<succ_op_iterator> successors() {
3635 return make_range(succ_op_iterator(std::next(value_op_begin())),
3636 succ_op_iterator(value_op_end()));
3637 }
3638
3639 iterator_range<const_succ_op_iterator> successors() const {
3640 return make_range(const_succ_op_iterator(std::next(value_op_begin())),
3641 const_succ_op_iterator(value_op_end()));
3642 }
3643
3644 // Methods for support type inquiry through isa, cast, and dyn_cast:
3645 static bool classof(const Instruction *I) {
3646 return I->getOpcode() == Instruction::IndirectBr;
3647 }
3648 static bool classof(const Value *V) {
3649 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3650 }
3651};
3652
3653template <>
3654struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> {
3655};
3656
3657DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)IndirectBrInst::op_iterator IndirectBrInst::op_begin() { return
OperandTraits<IndirectBrInst>::op_begin(this); } IndirectBrInst
::const_op_iterator IndirectBrInst::op_begin() const { return
OperandTraits<IndirectBrInst>::op_begin(const_cast<
IndirectBrInst*>(this)); } IndirectBrInst::op_iterator IndirectBrInst
::op_end() { return OperandTraits<IndirectBrInst>::op_end
(this); } IndirectBrInst::const_op_iterator IndirectBrInst::op_end
() const { return OperandTraits<IndirectBrInst>::op_end
(const_cast<IndirectBrInst*>(this)); } Value *IndirectBrInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<IndirectBrInst>::operands(this) &&
"getOperand() out of range!") ? static_cast<void> (0) :
__assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3657, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<IndirectBrInst>::op_begin(const_cast<
IndirectBrInst*>(this))[i_nocapture].get()); } void IndirectBrInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<IndirectBrInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3657, __PRETTY_FUNCTION__)); OperandTraits<IndirectBrInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
IndirectBrInst::getNumOperands() const { return OperandTraits
<IndirectBrInst>::operands(this); } template <int Idx_nocapture
> Use &IndirectBrInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &IndirectBrInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
3658
3659//===----------------------------------------------------------------------===//
3660// InvokeInst Class
3661//===----------------------------------------------------------------------===//
3662
3663/// Invoke instruction. The SubclassData field is used to hold the
3664/// calling convention of the call.
3665///
3666class InvokeInst : public CallBase {
3667 /// The number of operands for this call beyond the called function,
3668 /// arguments, and operand bundles.
3669 static constexpr int NumExtraOperands = 2;
3670
3671 /// The index from the end of the operand array to the normal destination.
3672 static constexpr int NormalDestOpEndIdx = -3;
3673
3674 /// The index from the end of the operand array to the unwind destination.
3675 static constexpr int UnwindDestOpEndIdx = -2;
3676
3677 InvokeInst(const InvokeInst &BI);
3678
3679 /// Construct an InvokeInst given a range of arguments.
3680 ///
3681 /// Construct an InvokeInst from a range of arguments
3682 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3683 BasicBlock *IfException, ArrayRef<Value *> Args,
3684 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3685 const Twine &NameStr, Instruction *InsertBefore);
3686
3687 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3688 BasicBlock *IfException, ArrayRef<Value *> Args,
3689 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3690 const Twine &NameStr, BasicBlock *InsertAtEnd);
3691
3692 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3693 BasicBlock *IfException, ArrayRef<Value *> Args,
3694 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3695
3696 /// Compute the number of operands to allocate.
3697 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
3698 // We need one operand for the called function, plus our extra operands and
3699 // the input operand counts provided.
3700 return 1 + NumExtraOperands + NumArgs + NumBundleInputs;
3701 }
3702
3703protected:
3704 // Note: Instruction needs to be a friend here to call cloneImpl.
3705 friend class Instruction;
3706
3707 InvokeInst *cloneImpl() const;
3708
3709public:
3710 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3711 BasicBlock *IfException, ArrayRef<Value *> Args,
3712 const Twine &NameStr,
3713 Instruction *InsertBefore = nullptr) {
3714 int NumOperands = ComputeNumOperands(Args.size());
3715 return new (NumOperands)
3716 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3717 NameStr, InsertBefore);
3718 }
3719
3720 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3721 BasicBlock *IfException, ArrayRef<Value *> Args,
3722 ArrayRef<OperandBundleDef> Bundles = None,
3723 const Twine &NameStr = "",
3724 Instruction *InsertBefore = nullptr) {
3725 int NumOperands =
3726 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3727 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3728
3729 return new (NumOperands, DescriptorBytes)
3730 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3731 NameStr, InsertBefore);
3732 }
3733
3734 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3735 BasicBlock *IfException, ArrayRef<Value *> Args,
3736 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3737 int NumOperands = ComputeNumOperands(Args.size());
3738 return new (NumOperands)
3739 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3740 NameStr, InsertAtEnd);
3741 }
3742
3743 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3744 BasicBlock *IfException, ArrayRef<Value *> Args,
3745 ArrayRef<OperandBundleDef> Bundles,
3746 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3747 int NumOperands =
3748 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3749 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3750
3751 return new (NumOperands, DescriptorBytes)
3752 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3753 NameStr, InsertAtEnd);
3754 }
3755
3756 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3757 BasicBlock *IfException, ArrayRef<Value *> Args,
3758 const Twine &NameStr,
3759 Instruction *InsertBefore = nullptr) {
3760 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3761 IfException, Args, None, NameStr, InsertBefore);
3762 }
3763
3764 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3765 BasicBlock *IfException, ArrayRef<Value *> Args,
3766 ArrayRef<OperandBundleDef> Bundles = None,
3767 const Twine &NameStr = "",
3768 Instruction *InsertBefore = nullptr) {
3769 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3770 IfException, Args, Bundles, NameStr, InsertBefore);
3771 }
3772
3773 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3774 BasicBlock *IfException, ArrayRef<Value *> Args,
3775 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3776 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3777 IfException, Args, NameStr, InsertAtEnd);
3778 }
3779
3780 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3781 BasicBlock *IfException, ArrayRef<Value *> Args,
3782 ArrayRef<OperandBundleDef> Bundles,
3783 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3784 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3785 IfException, Args, Bundles, NameStr, InsertAtEnd);
3786 }
3787
3788 // Deprecated [opaque pointer types]
3789 static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
3790 BasicBlock *IfException, ArrayRef<Value *> Args,
3791 const Twine &NameStr,
3792 Instruction *InsertBefore = nullptr) {
3793 return Create(cast<FunctionType>(
3794 cast<PointerType>(Func->getType())->getElementType()),
3795 Func, IfNormal, IfException, Args, None, NameStr,
3796 InsertBefore);
3797 }
3798
3799 // Deprecated [opaque pointer types]
3800 static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
3801 BasicBlock *IfException, ArrayRef<Value *> Args,
3802 ArrayRef<OperandBundleDef> Bundles = None,
3803 const Twine &NameStr = "",
3804 Instruction *InsertBefore = nullptr) {
3805 return Create(cast<FunctionType>(
3806 cast<PointerType>(Func->getType())->getElementType()),
3807 Func, IfNormal, IfException, Args, Bundles, NameStr,
3808 InsertBefore);
3809 }
3810
3811 // Deprecated [opaque pointer types]
3812 static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
3813 BasicBlock *IfException, ArrayRef<Value *> Args,
3814 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3815 return Create(cast<FunctionType>(
3816 cast<PointerType>(Func->getType())->getElementType()),
3817 Func, IfNormal, IfException, Args, NameStr, InsertAtEnd);
3818 }
3819
3820 // Deprecated [opaque pointer types]
3821 static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
3822 BasicBlock *IfException, ArrayRef<Value *> Args,
3823 ArrayRef<OperandBundleDef> Bundles,
3824 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3825 return Create(cast<FunctionType>(
3826 cast<PointerType>(Func->getType())->getElementType()),
3827 Func, IfNormal, IfException, Args, Bundles, NameStr,
3828 InsertAtEnd);
3829 }
3830
3831 /// Create a clone of \p II with a different set of operand bundles and
3832 /// insert it before \p InsertPt.
3833 ///
3834 /// The returned invoke instruction is identical to \p II in every way except
3835 /// that the operand bundles for the new instruction are set to the operand
3836 /// bundles in \p Bundles.
3837 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3838 Instruction *InsertPt = nullptr);
3839
3840 /// Determine if the call should not perform indirect branch tracking.
3841 bool doesNoCfCheck() const { return hasFnAttr(Attribute::NoCfCheck); }
3842
3843 /// Determine if the call cannot unwind.
3844 bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
3845 void setDoesNotThrow() {
3846 addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind);
3847 }
3848
3849 // get*Dest - Return the destination basic blocks...
3850 BasicBlock *getNormalDest() const {
3851 return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
3852 }
3853 BasicBlock *getUnwindDest() const {
3854 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
3855 }
3856 void setNormalDest(BasicBlock *B) {
3857 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3858 }
3859 void setUnwindDest(BasicBlock *B) {
3860 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3861 }
3862
3863 /// Get the landingpad instruction from the landing pad
3864 /// block (the unwind destination).
3865 LandingPadInst *getLandingPadInst() const;
3866
3867 BasicBlock *getSuccessor(unsigned i) const {
3868 assert(i < 2 && "Successor # out of range for invoke!")((i < 2 && "Successor # out of range for invoke!")
? static_cast<void> (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3868, __PRETTY_FUNCTION__))
;
3869 return i == 0 ? getNormalDest() : getUnwindDest();
3870 }
3871
3872 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3873 assert(i < 2 && "Successor # out of range for invoke!")((i < 2 && "Successor # out of range for invoke!")
? static_cast<void> (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 3873, __PRETTY_FUNCTION__))
;
3874 if (i == 0)
3875 setNormalDest(NewSucc);
3876 else
3877 setUnwindDest(NewSucc);
3878 }
3879
3880 unsigned getNumSuccessors() const { return 2; }
3881
3882 // Methods for support type inquiry through isa, cast, and dyn_cast:
3883 static bool classof(const Instruction *I) {
3884 return (I->getOpcode() == Instruction::Invoke);
3885 }
3886 static bool classof(const Value *V) {
3887 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3888 }
3889
3890private:
3891
3892 // Shadow Instruction::setInstructionSubclassData with a private forwarding
3893 // method so that subclasses cannot accidentally use it.
3894 void setInstructionSubclassData(unsigned short D) {
3895 Instruction::setInstructionSubclassData(D);
3896 }
3897};
3898
3899InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3900 BasicBlock *IfException, ArrayRef<Value *> Args,
3901 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3902 const Twine &NameStr, Instruction *InsertBefore)
3903 : CallBase(Ty->getReturnType(), Instruction::Invoke,
3904 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3905 InsertBefore) {
3906 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3907}
3908
3909InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3910 BasicBlock *IfException, ArrayRef<Value *> Args,
3911 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3912 const Twine &NameStr, BasicBlock *InsertAtEnd)
3913 : CallBase(Ty->getReturnType(), Instruction::Invoke,
3914 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3915 InsertAtEnd) {
3916 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3917}
3918
3919//===----------------------------------------------------------------------===//
3920// CallBrInst Class
3921//===----------------------------------------------------------------------===//
3922
3923/// CallBr instruction, tracking function calls that may not return control but
3924/// instead transfer it to a third location. The SubclassData field is used to
3925/// hold the calling convention of the call.
3926///
3927class CallBrInst : public CallBase {
3928
3929 unsigned NumIndirectDests;
3930
3931 CallBrInst(const CallBrInst &BI);
3932
3933 /// Construct a CallBrInst given a range of arguments.
3934 ///
3935 /// Construct a CallBrInst from a range of arguments
3936 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3937 ArrayRef<BasicBlock *> IndirectDests,
3938 ArrayRef<Value *> Args,
3939 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3940 const Twine &NameStr, Instruction *InsertBefore);
3941
3942 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3943 ArrayRef<BasicBlock *> IndirectDests,
3944 ArrayRef<Value *> Args,
3945 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3946 const Twine &NameStr, BasicBlock *InsertAtEnd);
3947
3948 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
3949 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
3950 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3951
3952 /// Should the Indirect Destinations change, scan + update the Arg list.
3953 void updateArgBlockAddresses(unsigned i, BasicBlock *B);
3954
3955 /// Compute the number of operands to allocate.
3956 static int ComputeNumOperands(int NumArgs, int NumIndirectDests,
3957 int NumBundleInputs = 0) {
3958 // We need one operand for the called function, plus our extra operands and
3959 // the input operand counts provided.
3960 return 2 + NumIndirectDests + NumArgs + NumBundleInputs;
3961 }
3962
3963protected:
3964 // Note: Instruction needs to be a friend here to call cloneImpl.
3965 friend class Instruction;
3966
3967 CallBrInst *cloneImpl() const;
3968
3969public:
3970 static CallBrInst *Create(FunctionType *Ty, Value *Func,
3971 BasicBlock *DefaultDest,
3972 ArrayRef<BasicBlock *> IndirectDests,
3973 ArrayRef<Value *> Args, const Twine &NameStr,
3974 Instruction *InsertBefore = nullptr) {
3975 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
3976 return new (NumOperands)
3977 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
3978 NumOperands, NameStr, InsertBefore);
3979 }
3980
3981 static CallBrInst *Create(FunctionType *Ty, Value *Func,
3982 BasicBlock *DefaultDest,
3983 ArrayRef<BasicBlock *> IndirectDests,
3984 ArrayRef<Value *> Args,
3985 ArrayRef<OperandBundleDef> Bundles = None,
3986 const Twine &NameStr = "",
3987 Instruction *InsertBefore = nullptr) {
3988 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
3989 CountBundleInputs(Bundles));
3990 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3991
3992 return new (NumOperands, DescriptorBytes)
3993 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
3994 NumOperands, NameStr, InsertBefore);
3995 }
3996
3997 static CallBrInst *Create(FunctionType *Ty, Value *Func,
3998 BasicBlock *DefaultDest,
3999 ArrayRef<BasicBlock *> IndirectDests,
4000 ArrayRef<Value *> Args, const Twine &NameStr,
4001 BasicBlock *InsertAtEnd) {
4002 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4003 return new (NumOperands)
4004 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
4005 NumOperands, NameStr, InsertAtEnd);
4006 }
4007
4008 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4009 BasicBlock *DefaultDest,
4010 ArrayRef<BasicBlock *> IndirectDests,
4011 ArrayRef<Value *> Args,
4012 ArrayRef<OperandBundleDef> Bundles,
4013 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4014 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4015 CountBundleInputs(Bundles));
4016 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4017
4018 return new (NumOperands, DescriptorBytes)
4019 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4020 NumOperands, NameStr, InsertAtEnd);
4021 }
4022
4023 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4024 ArrayRef<BasicBlock *> IndirectDests,
4025 ArrayRef<Value *> Args, const Twine &NameStr,
4026 Instruction *InsertBefore = nullptr) {
4027 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4028 IndirectDests, Args, NameStr, InsertBefore);
4029 }
4030
4031 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4032 ArrayRef<BasicBlock *> IndirectDests,
4033 ArrayRef<Value *> Args,
4034 ArrayRef<OperandBundleDef> Bundles = None,
4035 const Twine &NameStr = "",
4036 Instruction *InsertBefore = nullptr) {
4037 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4038 IndirectDests, Args, Bundles, NameStr, InsertBefore);
4039 }
4040
4041 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4042 ArrayRef<BasicBlock *> IndirectDests,
4043 ArrayRef<Value *> Args, const Twine &NameStr,
4044 BasicBlock *InsertAtEnd) {
4045 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4046 IndirectDests, Args, NameStr, InsertAtEnd);
4047 }
4048
4049 static CallBrInst *Create(FunctionCallee Func,
4050 BasicBlock *DefaultDest,
4051 ArrayRef<BasicBlock *> IndirectDests,
4052 ArrayRef<Value *> Args,
4053 ArrayRef<OperandBundleDef> Bundles,
4054 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4055 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4056 IndirectDests, Args, Bundles, NameStr, InsertAtEnd);
4057 }
4058
4059 /// Create a clone of \p CBI with a different set of operand bundles and
4060 /// insert it before \p InsertPt.
4061 ///
4062 /// The returned callbr instruction is identical to \p CBI in every way
4063 /// except that the operand bundles for the new instruction are set to the
4064 /// operand bundles in \p Bundles.
4065 static CallBrInst *Create(CallBrInst *CBI,
4066 ArrayRef<OperandBundleDef> Bundles,
4067 Instruction *InsertPt = nullptr);
4068
4069 /// Return the number of callbr indirect dest labels.
4070 ///
4071 unsigned getNumIndirectDests() const { return NumIndirectDests; }
4072
4073 /// getIndirectDestLabel - Return the i-th indirect dest label.
4074 ///
4075 Value *getIndirectDestLabel(unsigned i) const {
4076 assert(i < getNumIndirectDests() && "Out of bounds!")((i < getNumIndirectDests() && "Out of bounds!") ?
static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4076, __PRETTY_FUNCTION__))
;
4077 return getOperand(i + getNumArgOperands() + getNumTotalBundleOperands() +
4078 1);
4079 }
4080
4081 Value *getIndirectDestLabelUse(unsigned i) const {
4082 assert(i < getNumIndirectDests() && "Out of bounds!")((i < getNumIndirectDests() && "Out of bounds!") ?
static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4082, __PRETTY_FUNCTION__))
;
4083 return getOperandUse(i + getNumArgOperands() + getNumTotalBundleOperands() +
4084 1);
4085 }
4086
4087 // Return the destination basic blocks...
4088 BasicBlock *getDefaultDest() const {
4089 return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1));
4090 }
4091 BasicBlock *getIndirectDest(unsigned i) const {
4092 return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
4093 }
4094 SmallVector<BasicBlock *, 16> getIndirectDests() const {
4095 SmallVector<BasicBlock *, 16> IndirectDests;
4096 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
4097 IndirectDests.push_back(getIndirectDest(i));
4098 return IndirectDests;
4099 }
4100 void setDefaultDest(BasicBlock *B) {
4101 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
4102 }
4103 void setIndirectDest(unsigned i, BasicBlock *B) {
4104 updateArgBlockAddresses(i, B);
4105 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
4106 }
4107
4108 BasicBlock *getSuccessor(unsigned i) const {
4109 assert(i < getNumSuccessors() + 1 &&((i < getNumSuccessors() + 1 && "Successor # out of range for callbr!"
) ? static_cast<void> (0) : __assert_fail ("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4110, __PRETTY_FUNCTION__))
4110 "Successor # out of range for callbr!")((i < getNumSuccessors() + 1 && "Successor # out of range for callbr!"
) ? static_cast<void> (0) : __assert_fail ("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4110, __PRETTY_FUNCTION__))
;
4111 return i == 0 ? getDefaultDest() : getIndirectDest(i - 1);
4112 }
4113
4114 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
4115 assert(i < getNumIndirectDests() + 1 &&((i < getNumIndirectDests() + 1 && "Successor # out of range for callbr!"
) ? static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4116, __PRETTY_FUNCTION__))
4116 "Successor # out of range for callbr!")((i < getNumIndirectDests() + 1 && "Successor # out of range for callbr!"
) ? static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4116, __PRETTY_FUNCTION__))
;
4117 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc);
4118 }
4119
4120 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
4121
4122 // Methods for support type inquiry through isa, cast, and dyn_cast:
4123 static bool classof(const Instruction *I) {
4124 return (I->getOpcode() == Instruction::CallBr);
4125 }
4126 static bool classof(const Value *V) {
4127 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4128 }
4129
4130private:
4131
4132 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4133 // method so that subclasses cannot accidentally use it.
4134 void setInstructionSubclassData(unsigned short D) {
4135 Instruction::setInstructionSubclassData(D);
4136 }
4137};
4138
4139CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4140 ArrayRef<BasicBlock *> IndirectDests,
4141 ArrayRef<Value *> Args,
4142 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4143 const Twine &NameStr, Instruction *InsertBefore)
4144 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4145 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4146 InsertBefore) {
4147 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4148}
4149
4150CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4151 ArrayRef<BasicBlock *> IndirectDests,
4152 ArrayRef<Value *> Args,
4153 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4154 const Twine &NameStr, BasicBlock *InsertAtEnd)
4155 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4156 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4157 InsertAtEnd) {
4158 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4159}
4160
4161//===----------------------------------------------------------------------===//
4162// ResumeInst Class
4163//===----------------------------------------------------------------------===//
4164
4165//===---------------------------------------------------------------------------
4166/// Resume the propagation of an exception.
4167///
4168class ResumeInst : public Instruction {
4169 ResumeInst(const ResumeInst &RI);
4170
4171 explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
4172 ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
4173
4174protected:
4175 // Note: Instruction needs to be a friend here to call cloneImpl.
4176 friend class Instruction;
4177
4178 ResumeInst *cloneImpl() const;
4179
4180public:
4181 static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
4182 return new(1) ResumeInst(Exn, InsertBefore);
4183 }
4184
4185 static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
4186 return new(1) ResumeInst(Exn, InsertAtEnd);
4187 }
4188
4189 /// Provide fast operand accessors
4190 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4191
4192 /// Convenience accessor.
4193 Value *getValue() const { return Op<0>(); }
4194
4195 unsigned getNumSuccessors() const { return 0; }
4196
4197 // Methods for support type inquiry through isa, cast, and dyn_cast:
4198 static bool classof(const Instruction *I) {
4199 return I->getOpcode() == Instruction::Resume;
4200 }
4201 static bool classof(const Value *V) {
4202 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4203 }
4204
4205private:
4206 BasicBlock *getSuccessor(unsigned idx) const {
4207 llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4207)
;
4208 }
4209
4210 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4211 llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4211)
;
4212 }
4213};
4214
4215template <>
4216struct OperandTraits<ResumeInst> :
4217 public FixedNumOperandTraits<ResumeInst, 1> {
4218};
4219
4220DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)ResumeInst::op_iterator ResumeInst::op_begin() { return OperandTraits
<ResumeInst>::op_begin(this); } ResumeInst::const_op_iterator
ResumeInst::op_begin() const { return OperandTraits<ResumeInst
>::op_begin(const_cast<ResumeInst*>(this)); } ResumeInst
::op_iterator ResumeInst::op_end() { return OperandTraits<
ResumeInst>::op_end(this); } ResumeInst::const_op_iterator
ResumeInst::op_end() const { return OperandTraits<ResumeInst
>::op_end(const_cast<ResumeInst*>(this)); } Value *ResumeInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<ResumeInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4220, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<ResumeInst>::op_begin(const_cast<ResumeInst
*>(this))[i_nocapture].get()); } void ResumeInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<ResumeInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4220, __PRETTY_FUNCTION__)); OperandTraits<ResumeInst>
::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned ResumeInst
::getNumOperands() const { return OperandTraits<ResumeInst
>::operands(this); } template <int Idx_nocapture> Use
&ResumeInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
ResumeInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
4221
4222//===----------------------------------------------------------------------===//
4223// CatchSwitchInst Class
4224//===----------------------------------------------------------------------===//
4225class CatchSwitchInst : public Instruction {
4226 /// The number of operands actually allocated. NumOperands is
4227 /// the number actually in use.
4228 unsigned ReservedSpace;
4229
4230 // Operand[0] = Outer scope
4231 // Operand[1] = Unwind block destination
4232 // Operand[n] = BasicBlock to go to on match
4233 CatchSwitchInst(const CatchSwitchInst &CSI);
4234
4235 /// Create a new switch instruction, specifying a
4236 /// default destination. The number of additional handlers can be specified
4237 /// here to make memory allocation more efficient.
4238 /// This constructor can also autoinsert before another instruction.
4239 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4240 unsigned NumHandlers, const Twine &NameStr,
4241 Instruction *InsertBefore);
4242
4243 /// Create a new switch instruction, specifying a
4244 /// default destination. The number of additional handlers can be specified
4245 /// here to make memory allocation more efficient.
4246 /// This constructor also autoinserts at the end of the specified BasicBlock.
4247 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4248 unsigned NumHandlers, const Twine &NameStr,
4249 BasicBlock *InsertAtEnd);
4250
4251 // allocate space for exactly zero operands
4252 void *operator new(size_t s) { return User::operator new(s); }
4253
4254 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4255 void growOperands(unsigned Size);
4256
4257protected:
4258 // Note: Instruction needs to be a friend here to call cloneImpl.
4259 friend class Instruction;
4260
4261 CatchSwitchInst *cloneImpl() const;
4262
4263public:
4264 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4265 unsigned NumHandlers,
4266 const Twine &NameStr = "",
4267 Instruction *InsertBefore = nullptr) {
4268 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4269 InsertBefore);
4270 }
4271
4272 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4273 unsigned NumHandlers, const Twine &NameStr,
4274 BasicBlock *InsertAtEnd) {
4275 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4276 InsertAtEnd);
4277 }
4278
4279 /// Provide fast operand accessors
4280 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4281
4282 // Accessor Methods for CatchSwitch stmt
4283 Value *getParentPad() const { return getOperand(0); }
4284 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4285
4286 // Accessor Methods for CatchSwitch stmt
4287 bool hasUnwindDest() const { return getSubclassDataFromInstruction() & 1; }
4288 bool unwindsToCaller() const { return !hasUnwindDest(); }
4289 BasicBlock *getUnwindDest() const {
4290 if (hasUnwindDest())
4291 return cast<BasicBlock>(getOperand(1));
4292 return nullptr;
4293 }
4294 void setUnwindDest(BasicBlock *UnwindDest) {
4295 assert(UnwindDest)((UnwindDest) ? static_cast<void> (0) : __assert_fail (
"UnwindDest", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4295, __PRETTY_FUNCTION__))
;
4296 assert(hasUnwindDest())((hasUnwindDest()) ? static_cast<void> (0) : __assert_fail
("hasUnwindDest()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4296, __PRETTY_FUNCTION__))
;
4297 setOperand(1, UnwindDest);
4298 }
4299
4300 /// return the number of 'handlers' in this catchswitch
4301 /// instruction, except the default handler
4302 unsigned getNumHandlers() const {
4303 if (hasUnwindDest())
4304 return getNumOperands() - 2;
4305 return getNumOperands() - 1;
4306 }
4307
4308private:
4309 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
4310 static const BasicBlock *handler_helper(const Value *V) {
4311 return cast<BasicBlock>(V);
4312 }
4313
4314public:
4315 using DerefFnTy = BasicBlock *(*)(Value *);
4316 using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>;
4317 using handler_range = iterator_range<handler_iterator>;
4318 using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4319 using const_handler_iterator =
4320 mapped_iterator<const_op_iterator, ConstDerefFnTy>;
4321 using const_handler_range = iterator_range<const_handler_iterator>;
4322
4323 /// Returns an iterator that points to the first handler in CatchSwitchInst.
4324 handler_iterator handler_begin() {
4325 op_iterator It = op_begin() + 1;
4326 if (hasUnwindDest())
4327 ++It;
4328 return handler_iterator(It, DerefFnTy(handler_helper));
4329 }
4330
4331 /// Returns an iterator that points to the first handler in the
4332 /// CatchSwitchInst.
4333 const_handler_iterator handler_begin() const {
4334 const_op_iterator It = op_begin() + 1;
4335 if (hasUnwindDest())
4336 ++It;
4337 return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4338 }
4339
4340 /// Returns a read-only iterator that points one past the last
4341 /// handler in the CatchSwitchInst.
4342 handler_iterator handler_end() {
4343 return handler_iterator(op_end(), DerefFnTy(handler_helper));
4344 }
4345
4346 /// Returns an iterator that points one past the last handler in the
4347 /// CatchSwitchInst.
4348 const_handler_iterator handler_end() const {
4349 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4350 }
4351
4352 /// iteration adapter for range-for loops.
4353 handler_range handlers() {
4354 return make_range(handler_begin(), handler_end());
4355 }
4356
4357 /// iteration adapter for range-for loops.
4358 const_handler_range handlers() const {
4359 return make_range(handler_begin(), handler_end());
4360 }
4361
4362 /// Add an entry to the switch instruction...
4363 /// Note:
4364 /// This action invalidates handler_end(). Old handler_end() iterator will
4365 /// point to the added handler.
4366 void addHandler(BasicBlock *Dest);
4367
4368 void removeHandler(handler_iterator HI);
4369
4370 unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4371 BasicBlock *getSuccessor(unsigned Idx) const {
4372 assert(Idx < getNumSuccessors() &&((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4373, __PRETTY_FUNCTION__))
4373 "Successor # out of range for catchswitch!")((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4373, __PRETTY_FUNCTION__))
;
4374 return cast<BasicBlock>(getOperand(Idx + 1));
4375 }
4376 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
4377 assert(Idx < getNumSuccessors() &&((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4378, __PRETTY_FUNCTION__))
4378 "Successor # out of range for catchswitch!")((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4378, __PRETTY_FUNCTION__))
;
4379 setOperand(Idx + 1, NewSucc);
4380 }
4381
4382 // Methods for support type inquiry through isa, cast, and dyn_cast:
4383 static bool classof(const Instruction *I) {
4384 return I->getOpcode() == Instruction::CatchSwitch;
4385 }
4386 static bool classof(const Value *V) {
4387 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4388 }
4389};
4390
4391template <>
4392struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {};
4393
4394DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)CatchSwitchInst::op_iterator CatchSwitchInst::op_begin() { return
OperandTraits<CatchSwitchInst>::op_begin(this); } CatchSwitchInst
::const_op_iterator CatchSwitchInst::op_begin() const { return
OperandTraits<CatchSwitchInst>::op_begin(const_cast<
CatchSwitchInst*>(this)); } CatchSwitchInst::op_iterator CatchSwitchInst
::op_end() { return OperandTraits<CatchSwitchInst>::op_end
(this); } CatchSwitchInst::const_op_iterator CatchSwitchInst::
op_end() const { return OperandTraits<CatchSwitchInst>::
op_end(const_cast<CatchSwitchInst*>(this)); } Value *CatchSwitchInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<CatchSwitchInst>::operands(this) &&
"getOperand() out of range!") ? static_cast<void> (0) :
__assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4394, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<CatchSwitchInst>::op_begin(const_cast<
CatchSwitchInst*>(this))[i_nocapture].get()); } void CatchSwitchInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<CatchSwitchInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4394, __PRETTY_FUNCTION__)); OperandTraits<CatchSwitchInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
CatchSwitchInst::getNumOperands() const { return OperandTraits
<CatchSwitchInst>::operands(this); } template <int Idx_nocapture
> Use &CatchSwitchInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &CatchSwitchInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
4395
4396//===----------------------------------------------------------------------===//
4397// CleanupPadInst Class
4398//===----------------------------------------------------------------------===//
4399class CleanupPadInst : public FuncletPadInst {
4400private:
4401 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4402 unsigned Values, const Twine &NameStr,
4403 Instruction *InsertBefore)
4404 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4405 NameStr, InsertBefore) {}
4406 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4407 unsigned Values, const Twine &NameStr,
4408 BasicBlock *InsertAtEnd)
4409 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4410 NameStr, InsertAtEnd) {}
4411
4412public:
4413 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = None,
4414 const Twine &NameStr = "",
4415 Instruction *InsertBefore = nullptr) {
4416 unsigned Values = 1 + Args.size();
4417 return new (Values)
4418 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore);
4419 }
4420
4421 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args,
4422 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4423 unsigned Values = 1 + Args.size();
4424 return new (Values)
4425 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd);
4426 }
4427
4428 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4429 static bool classof(const Instruction *I) {
4430 return I->getOpcode() == Instruction::CleanupPad;
4431 }
4432 static bool classof(const Value *V) {
4433 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4434 }
4435};
4436
4437//===----------------------------------------------------------------------===//
4438// CatchPadInst Class
4439//===----------------------------------------------------------------------===//
4440class CatchPadInst : public FuncletPadInst {
4441private:
4442 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4443 unsigned Values, const Twine &NameStr,
4444 Instruction *InsertBefore)
4445 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4446 NameStr, InsertBefore) {}
4447 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4448 unsigned Values, const Twine &NameStr,
4449 BasicBlock *InsertAtEnd)
4450 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4451 NameStr, InsertAtEnd) {}
4452
4453public:
4454 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4455 const Twine &NameStr = "",
4456 Instruction *InsertBefore = nullptr) {
4457 unsigned Values = 1 + Args.size();
4458 return new (Values)
4459 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore);
4460 }
4461
4462 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4463 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4464 unsigned Values = 1 + Args.size();
4465 return new (Values)
4466 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd);
4467 }
4468
4469 /// Convenience accessors
4470 CatchSwitchInst *getCatchSwitch() const {
4471 return cast<CatchSwitchInst>(Op<-1>());
4472 }
4473 void setCatchSwitch(Value *CatchSwitch) {
4474 assert(CatchSwitch)((CatchSwitch) ? static_cast<void> (0) : __assert_fail (
"CatchSwitch", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4474, __PRETTY_FUNCTION__))
;
4475 Op<-1>() = CatchSwitch;
4476 }
4477
4478 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4479 static bool classof(const Instruction *I) {
4480 return I->getOpcode() == Instruction::CatchPad;
4481 }
4482 static bool classof(const Value *V) {
4483 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4484 }
4485};
4486
4487//===----------------------------------------------------------------------===//
4488// CatchReturnInst Class
4489//===----------------------------------------------------------------------===//
4490
4491class CatchReturnInst : public Instruction {
4492 CatchReturnInst(const CatchReturnInst &RI);
4493 CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore);
4494 CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd);
4495
4496 void init(Value *CatchPad, BasicBlock *BB);
4497
4498protected:
4499 // Note: Instruction needs to be a friend here to call cloneImpl.
4500 friend class Instruction;
4501
4502 CatchReturnInst *cloneImpl() const;
4503
4504public:
4505 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4506 Instruction *InsertBefore = nullptr) {
4507 assert(CatchPad)((CatchPad) ? static_cast<void> (0) : __assert_fail ("CatchPad"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4507, __PRETTY_FUNCTION__))
;
4508 assert(BB)((BB) ? static_cast<void> (0) : __assert_fail ("BB", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4508, __PRETTY_FUNCTION__))
;
4509 return new (2) CatchReturnInst(CatchPad, BB, InsertBefore);
4510 }
4511
4512 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4513 BasicBlock *InsertAtEnd) {
4514 assert(CatchPad)((CatchPad) ? static_cast<void> (0) : __assert_fail ("CatchPad"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4514, __PRETTY_FUNCTION__))
;
4515 assert(BB)((BB) ? static_cast<void> (0) : __assert_fail ("BB", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4515, __PRETTY_FUNCTION__))
;
4516 return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd);
4517 }
4518
4519 /// Provide fast operand accessors
4520 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4521
4522 /// Convenience accessors.
4523 CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); }
4524 void setCatchPad(CatchPadInst *CatchPad) {
4525 assert(CatchPad)((CatchPad) ? static_cast<void> (0) : __assert_fail ("CatchPad"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4525, __PRETTY_FUNCTION__))
;
4526 Op<0>() = CatchPad;
4527 }
4528
4529 BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); }
4530 void setSuccessor(BasicBlock *NewSucc) {
4531 assert(NewSucc)((NewSucc) ? static_cast<void> (0) : __assert_fail ("NewSucc"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4531, __PRETTY_FUNCTION__))
;
4532 Op<1>() = NewSucc;
4533 }
4534 unsigned getNumSuccessors() const { return 1; }
4535
4536 /// Get the parentPad of this catchret's catchpad's catchswitch.
4537 /// The successor block is implicitly a member of this funclet.
4538 Value *getCatchSwitchParentPad() const {
4539 return getCatchPad()->getCatchSwitch()->getParentPad();
4540 }
4541
4542 // Methods for support type inquiry through isa, cast, and dyn_cast:
4543 static bool classof(const Instruction *I) {
4544 return (I->getOpcode() == Instruction::CatchRet);
4545 }
4546 static bool classof(const Value *V) {
4547 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4548 }
4549
4550private:
4551 BasicBlock *getSuccessor(unsigned Idx) const {
4552 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")((Idx < getNumSuccessors() && "Successor # out of range for catchret!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4552, __PRETTY_FUNCTION__))
;
4553 return getSuccessor();
4554 }
4555
4556 void setSuccessor(unsigned Idx, BasicBlock *B) {
4557 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")((Idx < getNumSuccessors() && "Successor # out of range for catchret!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4557, __PRETTY_FUNCTION__))
;
4558 setSuccessor(B);
4559 }
4560};
4561
4562template <>
4563struct OperandTraits<CatchReturnInst>
4564 : public FixedNumOperandTraits<CatchReturnInst, 2> {};
4565
4566DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)CatchReturnInst::op_iterator CatchReturnInst::op_begin() { return
OperandTraits<CatchReturnInst>::op_begin(this); } CatchReturnInst
::const_op_iterator CatchReturnInst::op_begin() const { return
OperandTraits<CatchReturnInst>::op_begin(const_cast<
CatchReturnInst*>(this)); } CatchReturnInst::op_iterator CatchReturnInst
::op_end() { return OperandTraits<CatchReturnInst>::op_end
(this); } CatchReturnInst::const_op_iterator CatchReturnInst::
op_end() const { return OperandTraits<CatchReturnInst>::
op_end(const_cast<CatchReturnInst*>(this)); } Value *CatchReturnInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<CatchReturnInst>::operands(this) &&
"getOperand() out of range!") ? static_cast<void> (0) :
__assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4566, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<CatchReturnInst>::op_begin(const_cast<
CatchReturnInst*>(this))[i_nocapture].get()); } void CatchReturnInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<CatchReturnInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4566, __PRETTY_FUNCTION__)); OperandTraits<CatchReturnInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
CatchReturnInst::getNumOperands() const { return OperandTraits
<CatchReturnInst>::operands(this); } template <int Idx_nocapture
> Use &CatchReturnInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &CatchReturnInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
4567
4568//===----------------------------------------------------------------------===//
4569// CleanupReturnInst Class
4570//===----------------------------------------------------------------------===//
4571
4572class CleanupReturnInst : public Instruction {
4573private:
4574 CleanupReturnInst(const CleanupReturnInst &RI);
4575 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4576 Instruction *InsertBefore = nullptr);
4577 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4578 BasicBlock *InsertAtEnd);
4579
4580 void init(Value *CleanupPad, BasicBlock *UnwindBB);
4581
4582protected:
4583 // Note: Instruction needs to be a friend here to call cloneImpl.
4584 friend class Instruction;
4585
4586 CleanupReturnInst *cloneImpl() const;
4587
4588public:
4589 static CleanupReturnInst *Create(Value *CleanupPad,
4590 BasicBlock *UnwindBB = nullptr,
4591 Instruction *InsertBefore = nullptr) {
4592 assert(CleanupPad)((CleanupPad) ? static_cast<void> (0) : __assert_fail (
"CleanupPad", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4592, __PRETTY_FUNCTION__))
;
4593 unsigned Values = 1;
4594 if (UnwindBB)
4595 ++Values;
4596 return new (Values)
4597 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore);
4598 }
4599
4600 static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB,
4601 BasicBlock *InsertAtEnd) {
4602 assert(CleanupPad)((CleanupPad) ? static_cast<void> (0) : __assert_fail (
"CleanupPad", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4602, __PRETTY_FUNCTION__))
;
4603 unsigned Values = 1;
4604 if (UnwindBB)
4605 ++Values;
4606 return new (Values)
4607 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd);
4608 }
4609
4610 /// Provide fast operand accessors
4611 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4612
4613 bool hasUnwindDest() const { return getSubclassDataFromInstruction() & 1; }
4614 bool unwindsToCaller() const { return !hasUnwindDest(); }
4615
4616 /// Convenience accessor.
4617 CleanupPadInst *getCleanupPad() const {
4618 return cast<CleanupPadInst>(Op<0>());
4619 }
4620 void setCleanupPad(CleanupPadInst *CleanupPad) {
4621 assert(CleanupPad)((CleanupPad) ? static_cast<void> (0) : __assert_fail (
"CleanupPad", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4621, __PRETTY_FUNCTION__))
;
4622 Op<0>() = CleanupPad;
4623 }
4624
4625 unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; }
4626
4627 BasicBlock *getUnwindDest() const {
4628 return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr;
4629 }
4630 void setUnwindDest(BasicBlock *NewDest) {
4631 assert(NewDest)((NewDest) ? static_cast<void> (0) : __assert_fail ("NewDest"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4631, __PRETTY_FUNCTION__))
;
4632 assert(hasUnwindDest())((hasUnwindDest()) ? static_cast<void> (0) : __assert_fail
("hasUnwindDest()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4632, __PRETTY_FUNCTION__))
;
4633 Op<1>() = NewDest;
4634 }
4635
4636 // Methods for support type inquiry through isa, cast, and dyn_cast:
4637 static bool classof(const Instruction *I) {
4638 return (I->getOpcode() == Instruction::CleanupRet);
4639 }
4640 static bool classof(const Value *V) {
4641 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4642 }
4643
4644private:
4645 BasicBlock *getSuccessor(unsigned Idx) const {
4646 assert(Idx == 0)((Idx == 0) ? static_cast<void> (0) : __assert_fail ("Idx == 0"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4646, __PRETTY_FUNCTION__))
;
4647 return getUnwindDest();
4648 }
4649
4650 void setSuccessor(unsigned Idx, BasicBlock *B) {
4651 assert(Idx == 0)((Idx == 0) ? static_cast<void> (0) : __assert_fail ("Idx == 0"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4651, __PRETTY_FUNCTION__))
;
4652 setUnwindDest(B);
4653 }
4654
4655 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4656 // method so that subclasses cannot accidentally use it.
4657 void setInstructionSubclassData(unsigned short D) {
4658 Instruction::setInstructionSubclassData(D);
4659 }
4660};
4661
4662template <>
4663struct OperandTraits<CleanupReturnInst>
4664 : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {};
4665
4666DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)CleanupReturnInst::op_iterator CleanupReturnInst::op_begin() {
return OperandTraits<CleanupReturnInst>::op_begin(this
); } CleanupReturnInst::const_op_iterator CleanupReturnInst::
op_begin() const { return OperandTraits<CleanupReturnInst>
::op_begin(const_cast<CleanupReturnInst*>(this)); } CleanupReturnInst
::op_iterator CleanupReturnInst::op_end() { return OperandTraits
<CleanupReturnInst>::op_end(this); } CleanupReturnInst::
const_op_iterator CleanupReturnInst::op_end() const { return OperandTraits
<CleanupReturnInst>::op_end(const_cast<CleanupReturnInst
*>(this)); } Value *CleanupReturnInst::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<CleanupReturnInst
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4666, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<CleanupReturnInst>::op_begin(const_cast
<CleanupReturnInst*>(this))[i_nocapture].get()); } void
CleanupReturnInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((i_nocapture < OperandTraits<CleanupReturnInst>
::operands(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4666, __PRETTY_FUNCTION__)); OperandTraits<CleanupReturnInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
CleanupReturnInst::getNumOperands() const { return OperandTraits
<CleanupReturnInst>::operands(this); } template <int
Idx_nocapture> Use &CleanupReturnInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &CleanupReturnInst::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
4667
4668//===----------------------------------------------------------------------===//
4669// UnreachableInst Class
4670//===----------------------------------------------------------------------===//
4671
4672//===---------------------------------------------------------------------------
4673/// This function has undefined behavior. In particular, the
4674/// presence of this instruction indicates some higher level knowledge that the
4675/// end of the block cannot be reached.
4676///
4677class UnreachableInst : public Instruction {
4678protected:
4679 // Note: Instruction needs to be a friend here to call cloneImpl.
4680 friend class Instruction;
4681
4682 UnreachableInst *cloneImpl() const;
4683
4684public:
4685 explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr);
4686 explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd);
4687
4688 // allocate space for exactly zero operands
4689 void *operator new(size_t s) {
4690 return User::operator new(s, 0);
4691 }
4692
4693 unsigned getNumSuccessors() const { return 0; }
4694
4695 // Methods for support type inquiry through isa, cast, and dyn_cast:
4696 static bool classof(const Instruction *I) {
4697 return I->getOpcode() == Instruction::Unreachable;
4698 }
4699 static bool classof(const Value *V) {
4700 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4701 }
4702
4703private:
4704 BasicBlock *getSuccessor(unsigned idx) const {
4705 llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4705)
;
4706 }
4707
4708 void setSuccessor(unsigned idx, BasicBlock *B) {
4709 llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 4709)
;
4710 }
4711};
4712
4713//===----------------------------------------------------------------------===//
4714// TruncInst Class
4715//===----------------------------------------------------------------------===//
4716
4717/// This class represents a truncation of integer types.
4718class TruncInst : public CastInst {
4719protected:
4720 // Note: Instruction needs to be a friend here to call cloneImpl.
4721 friend class Instruction;
4722
4723 /// Clone an identical TruncInst
4724 TruncInst *cloneImpl() const;
4725
4726public:
4727 /// Constructor with insert-before-instruction semantics
4728 TruncInst(
4729 Value *S, ///< The value to be truncated
4730 Type *Ty, ///< The (smaller) type to truncate to
4731 const Twine &NameStr = "", ///< A name for the new instruction
4732 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4733 );
4734
4735 /// Constructor with insert-at-end-of-block semantics
4736 TruncInst(
4737 Value *S, ///< The value to be truncated
4738 Type *Ty, ///< The (smaller) type to truncate to
4739 const Twine &NameStr, ///< A name for the new instruction
4740 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4741 );
4742
4743 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4744 static bool classof(const Instruction *I) {
4745 return I->getOpcode() == Trunc;
4746 }
4747 static bool classof(const Value *V) {
4748 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4749 }
4750};
4751
4752//===----------------------------------------------------------------------===//
4753// ZExtInst Class
4754//===----------------------------------------------------------------------===//
4755
4756/// This class represents zero extension of integer types.
4757class ZExtInst : public CastInst {
4758protected:
4759 // Note: Instruction needs to be a friend here to call cloneImpl.
4760 friend class Instruction;
4761
4762 /// Clone an identical ZExtInst
4763 ZExtInst *cloneImpl() const;
4764
4765public:
4766 /// Constructor with insert-before-instruction semantics
4767 ZExtInst(
4768 Value *S, ///< The value to be zero extended
4769 Type *Ty, ///< The type to zero extend to
4770 const Twine &NameStr = "", ///< A name for the new instruction
4771 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4772 );
4773
4774 /// Constructor with insert-at-end semantics.
4775 ZExtInst(
4776 Value *S, ///< The value to be zero extended
4777 Type *Ty, ///< The type to zero extend to
4778 const Twine &NameStr, ///< A name for the new instruction
4779 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4780 );
4781
4782 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4783 static bool classof(const Instruction *I) {
4784 return I->getOpcode() == ZExt;
4785 }
4786 static bool classof(const Value *V) {
4787 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4788 }
4789};
4790
4791//===----------------------------------------------------------------------===//
4792// SExtInst Class
4793//===----------------------------------------------------------------------===//
4794
4795/// This class represents a sign extension of integer types.
4796class SExtInst : public CastInst {
4797protected:
4798 // Note: Instruction needs to be a friend here to call cloneImpl.
4799 friend class Instruction;
4800
4801 /// Clone an identical SExtInst
4802 SExtInst *cloneImpl() const;
4803
4804public:
4805 /// Constructor with insert-before-instruction semantics
4806 SExtInst(
4807 Value *S, ///< The value to be sign extended
4808 Type *Ty, ///< The type to sign extend to
4809 const Twine &NameStr = "", ///< A name for the new instruction
4810 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4811 );
4812
4813 /// Constructor with insert-at-end-of-block semantics
4814 SExtInst(
4815 Value *S, ///< The value to be sign extended
4816 Type *Ty, ///< The type to sign extend to
4817 const Twine &NameStr, ///< A name for the new instruction
4818 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4819 );
4820
4821 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4822 static bool classof(const Instruction *I) {
4823 return I->getOpcode() == SExt;
4824 }
4825 static bool classof(const Value *V) {
4826 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4827 }
4828};
4829
4830//===----------------------------------------------------------------------===//
4831// FPTruncInst Class
4832//===----------------------------------------------------------------------===//
4833
4834/// This class represents a truncation of floating point types.
4835class FPTruncInst : public CastInst {
4836protected:
4837 // Note: Instruction needs to be a friend here to call cloneImpl.
4838 friend class Instruction;
4839
4840 /// Clone an identical FPTruncInst
4841 FPTruncInst *cloneImpl() const;
4842
4843public:
4844 /// Constructor with insert-before-instruction semantics
4845 FPTruncInst(
4846 Value *S, ///< The value to be truncated
4847 Type *Ty, ///< The type to truncate to
4848 const Twine &NameStr = "", ///< A name for the new instruction
4849 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4850 );
4851
4852 /// Constructor with insert-before-instruction semantics
4853 FPTruncInst(
4854 Value *S, ///< The value to be truncated
4855 Type *Ty, ///< The type to truncate to
4856 const Twine &NameStr, ///< A name for the new instruction
4857 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4858 );
4859
4860 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4861 static bool classof(const Instruction *I) {
4862 return I->getOpcode() == FPTrunc;
4863 }
4864 static bool classof(const Value *V) {
4865 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4866 }
4867};
4868
4869//===----------------------------------------------------------------------===//
4870// FPExtInst Class
4871//===----------------------------------------------------------------------===//
4872
4873/// This class represents an extension of floating point types.
4874class FPExtInst : public CastInst {
4875protected:
4876 // Note: Instruction needs to be a friend here to call cloneImpl.
4877 friend class Instruction;
4878
4879 /// Clone an identical FPExtInst
4880 FPExtInst *cloneImpl() const;
4881
4882public:
4883 /// Constructor with insert-before-instruction semantics
4884 FPExtInst(
4885 Value *S, ///< The value to be extended
4886 Type *Ty, ///< The type to extend to
4887 const Twine &NameStr = "", ///< A name for the new instruction
4888 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4889 );
4890
4891 /// Constructor with insert-at-end-of-block semantics
4892 FPExtInst(
4893 Value *S, ///< The value to be extended
4894 Type *Ty, ///< The type to extend to
4895 const Twine &NameStr, ///< A name for the new instruction
4896 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4897 );
4898
4899 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4900 static bool classof(const Instruction *I) {
4901 return I->getOpcode() == FPExt;
4902 }
4903 static bool classof(const Value *V) {
4904 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4905 }
4906};
4907
4908//===----------------------------------------------------------------------===//
4909// UIToFPInst Class
4910//===----------------------------------------------------------------------===//
4911
4912/// This class represents a cast unsigned integer to floating point.
4913class UIToFPInst : public CastInst {
4914protected:
4915 // Note: Instruction needs to be a friend here to call cloneImpl.
4916 friend class Instruction;
4917
4918 /// Clone an identical UIToFPInst
4919 UIToFPInst *cloneImpl() const;
4920
4921public:
4922 /// Constructor with insert-before-instruction semantics
4923 UIToFPInst(
4924 Value *S, ///< The value to be converted
4925 Type *Ty, ///< The type to convert to
4926 const Twine &NameStr = "", ///< A name for the new instruction
4927 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4928 );
4929
4930 /// Constructor with insert-at-end-of-block semantics
4931 UIToFPInst(
4932 Value *S, ///< The value to be converted
4933 Type *Ty, ///< The type to convert to
4934 const Twine &NameStr, ///< A name for the new instruction
4935 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4936 );
4937
4938 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4939 static bool classof(const Instruction *I) {
4940 return I->getOpcode() == UIToFP;
4941 }
4942 static bool classof(const Value *V) {
4943 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4944 }
4945};
4946
4947//===----------------------------------------------------------------------===//
4948// SIToFPInst Class
4949//===----------------------------------------------------------------------===//
4950
4951/// This class represents a cast from signed integer to floating point.
4952class SIToFPInst : public CastInst {
4953protected:
4954 // Note: Instruction needs to be a friend here to call cloneImpl.
4955 friend class Instruction;
4956
4957 /// Clone an identical SIToFPInst
4958 SIToFPInst *cloneImpl() const;
4959
4960public:
4961 /// Constructor with insert-before-instruction semantics
4962 SIToFPInst(
4963 Value *S, ///< The value to be converted
4964 Type *Ty, ///< The type to convert to
4965 const Twine &NameStr = "", ///< A name for the new instruction
4966 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4967 );
4968
4969 /// Constructor with insert-at-end-of-block semantics
4970 SIToFPInst(
4971 Value *S, ///< The value to be converted
4972 Type *Ty, ///< The type to convert to
4973 const Twine &NameStr, ///< A name for the new instruction
4974 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4975 );
4976
4977 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4978 static bool classof(const Instruction *I) {
4979 return I->getOpcode() == SIToFP;
4980 }
4981 static bool classof(const Value *V) {
4982 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4983 }
4984};
4985
4986//===----------------------------------------------------------------------===//
4987// FPToUIInst Class
4988//===----------------------------------------------------------------------===//
4989
4990/// This class represents a cast from floating point to unsigned integer
4991class FPToUIInst : public CastInst {
4992protected:
4993 // Note: Instruction needs to be a friend here to call cloneImpl.
4994 friend class Instruction;
4995
4996 /// Clone an identical FPToUIInst
4997 FPToUIInst *cloneImpl() const;
4998
4999public:
5000 /// Constructor with insert-before-instruction semantics
5001 FPToUIInst(
5002 Value *S, ///< The value to be converted
5003 Type *Ty, ///< The type to convert to
5004 const Twine &NameStr = "", ///< A name for the new instruction
5005 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5006 );
5007
5008 /// Constructor with insert-at-end-of-block semantics
5009 FPToUIInst(
5010 Value *S, ///< The value to be converted
5011 Type *Ty, ///< The type to convert to
5012 const Twine &NameStr, ///< A name for the new instruction
5013 BasicBlock *InsertAtEnd ///< Where to insert the new instruction
5014 );
5015
5016 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5017 static bool classof(const Instruction *I) {
5018 return I->getOpcode() == FPToUI;
5019 }
5020 static bool classof(const Value *V) {
5021 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5022 }
5023};
5024
5025//===----------------------------------------------------------------------===//
5026// FPToSIInst Class
5027//===----------------------------------------------------------------------===//
5028
5029/// This class represents a cast from floating point to signed integer.
5030class FPToSIInst : public CastInst {
5031protected:
5032 // Note: Instruction needs to be a friend here to call cloneImpl.
5033 friend class Instruction;
5034
5035 /// Clone an identical FPToSIInst
5036 FPToSIInst *cloneImpl() const;
5037
5038public:
5039 /// Constructor with insert-before-instruction semantics
5040 FPToSIInst(
5041 Value *S, ///< The value to be converted
5042 Type *Ty, ///< The type to convert to
5043 const Twine &NameStr = "", ///< A name for the new instruction
5044 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5045 );
5046
5047 /// Constructor with insert-at-end-of-block semantics
5048 FPToSIInst(
5049 Value *S, ///< The value to be converted
5050 Type *Ty, ///< The type to convert to
5051 const Twine &NameStr, ///< A name for the new instruction
5052 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5053 );
5054
5055 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5056 static bool classof(const Instruction *I) {
5057 return I->getOpcode() == FPToSI;
5058 }
5059 static bool classof(const Value *V) {
5060 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5061 }
5062};
5063
5064//===----------------------------------------------------------------------===//
5065// IntToPtrInst Class
5066//===----------------------------------------------------------------------===//
5067
5068/// This class represents a cast from an integer to a pointer.
5069class IntToPtrInst : public CastInst {
5070public:
5071 // Note: Instruction needs to be a friend here to call cloneImpl.
5072 friend class Instruction;
5073
5074 /// Constructor with insert-before-instruction semantics
5075 IntToPtrInst(
5076 Value *S, ///< The value to be converted
5077 Type *Ty, ///< The type to convert to
5078 const Twine &NameStr = "", ///< A name for the new instruction
5079 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5080 );
5081
5082 /// Constructor with insert-at-end-of-block semantics
5083 IntToPtrInst(
5084 Value *S, ///< The value to be converted
5085 Type *Ty, ///< The type to convert to
5086 const Twine &NameStr, ///< A name for the new instruction
5087 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5088 );
5089
5090 /// Clone an identical IntToPtrInst.
5091 IntToPtrInst *cloneImpl() const;
5092
5093 /// Returns the address space of this instruction's pointer type.
5094 unsigned getAddressSpace() const {
5095 return getType()->getPointerAddressSpace();
5096 }
5097
5098 // Methods for support type inquiry through isa, cast, and dyn_cast:
5099 static bool classof(const Instruction *I) {
5100 return I->getOpcode() == IntToPtr;
5101 }
5102 static bool classof(const Value *V) {
5103 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5104 }
5105};
5106
5107//===----------------------------------------------------------------------===//
5108// PtrToIntInst Class
5109//===----------------------------------------------------------------------===//
5110
5111/// This class represents a cast from a pointer to an integer.
5112class PtrToIntInst : public CastInst {
5113protected:
5114 // Note: Instruction needs to be a friend here to call cloneImpl.
5115 friend class Instruction;
5116
5117 /// Clone an identical PtrToIntInst.
5118 PtrToIntInst *cloneImpl() const;
5119
5120public:
5121 /// Constructor with insert-before-instruction semantics
5122 PtrToIntInst(
5123 Value *S, ///< The value to be converted
5124 Type *Ty, ///< The type to convert to
5125 const Twine &NameStr = "", ///< A name for the new instruction
5126 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5127 );
5128
5129 /// Constructor with insert-at-end-of-block semantics
5130 PtrToIntInst(
5131 Value *S, ///< The value to be converted
5132 Type *Ty, ///< The type to convert to
5133 const Twine &NameStr, ///< A name for the new instruction
5134 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5135 );
5136
5137 /// Gets the pointer operand.
5138 Value *getPointerOperand() { return getOperand(0); }
5139 /// Gets the pointer operand.
5140 const Value *getPointerOperand() const { return getOperand(0); }
5141 /// Gets the operand index of the pointer operand.
5142 static unsigned getPointerOperandIndex() { return 0U; }
5143
5144 /// Returns the address space of the pointer operand.
5145 unsigned getPointerAddressSpace() const {
5146 return getPointerOperand()->getType()->getPointerAddressSpace();
5147 }
5148
5149 // Methods for support type inquiry through isa, cast, and dyn_cast:
5150 static bool classof(const Instruction *I) {
5151 return I->getOpcode() == PtrToInt;
5152 }
5153 static bool classof(const Value *V) {
5154 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5155 }
5156};
5157
5158//===----------------------------------------------------------------------===//
5159// BitCastInst Class
5160//===----------------------------------------------------------------------===//
5161
5162/// This class represents a no-op cast from one type to another.
5163class BitCastInst : public CastInst {
5164protected:
5165 // Note: Instruction needs to be a friend here to call cloneImpl.
5166 friend class Instruction;
5167
5168 /// Clone an identical BitCastInst.
5169 BitCastInst *cloneImpl() const;
5170
5171public:
5172 /// Constructor with insert-before-instruction semantics
5173 BitCastInst(
5174 Value *S, ///< The value to be casted
5175 Type *Ty, ///< The type to casted to
5176 const Twine &NameStr = "", ///< A name for the new instruction
5177 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5178 );
5179
5180 /// Constructor with insert-at-end-of-block semantics
5181 BitCastInst(
5182 Value *S, ///< The value to be casted
5183 Type *Ty, ///< The type to casted to
5184 const Twine &NameStr, ///< A name for the new instruction
5185 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5186 );
5187
5188 // Methods for support type inquiry through isa, cast, and dyn_cast:
5189 static bool classof(const Instruction *I) {
5190 return I->getOpcode() == BitCast;
5191 }
5192 static bool classof(const Value *V) {
5193 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5194 }
5195};
5196
5197//===----------------------------------------------------------------------===//
5198// AddrSpaceCastInst Class
5199//===----------------------------------------------------------------------===//
5200
5201/// This class represents a conversion between pointers from one address space
5202/// to another.
5203class AddrSpaceCastInst : public CastInst {
5204protected:
5205 // Note: Instruction needs to be a friend here to call cloneImpl.
5206 friend class Instruction;
5207
5208 /// Clone an identical AddrSpaceCastInst.
5209 AddrSpaceCastInst *cloneImpl() const;
5210
5211public:
5212 /// Constructor with insert-before-instruction semantics
5213 AddrSpaceCastInst(
5214 Value *S, ///< The value to be casted
5215 Type *Ty, ///< The type to casted to
5216 const Twine &NameStr = "", ///< A name for the new instruction
5217 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5218 );
5219
5220 /// Constructor with insert-at-end-of-block semantics
5221 AddrSpaceCastInst(
5222 Value *S, ///< The value to be casted
5223 Type *Ty, ///< The type to casted to
5224 const Twine &NameStr, ///< A name for the new instruction
5225 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5226 );
5227
5228 // Methods for support type inquiry through isa, cast, and dyn_cast:
5229 static bool classof(const Instruction *I) {
5230 return I->getOpcode() == AddrSpaceCast;
5231 }
5232 static bool classof(const Value *V) {
5233 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5234 }
5235
5236 /// Gets the pointer operand.
5237 Value *getPointerOperand() {
5238 return getOperand(0);
5239 }
5240
5241 /// Gets the pointer operand.
5242 const Value *getPointerOperand() const {
5243 return getOperand(0);
5244 }
5245
5246 /// Gets the operand index of the pointer operand.
5247 static unsigned getPointerOperandIndex() {
5248 return 0U;
5249 }
5250
5251 /// Returns the address space of the pointer operand.
5252 unsigned getSrcAddressSpace() const {
5253 return getPointerOperand()->getType()->getPointerAddressSpace();
5254 }
5255
5256 /// Returns the address space of the result.
5257 unsigned getDestAddressSpace() const {
5258 return getType()->getPointerAddressSpace();
5259 }
5260};
5261
5262/// A helper function that returns the pointer operand of a load or store
5263/// instruction. Returns nullptr if not load or store.
5264inline const Value *getLoadStorePointerOperand(const Value *V) {
5265 if (auto *Load = dyn_cast<LoadInst>(V))
5266 return Load->getPointerOperand();
5267 if (auto *Store = dyn_cast<StoreInst>(V))
5268 return Store->getPointerOperand();
5269 return nullptr;
5270}
5271inline Value *getLoadStorePointerOperand(Value *V) {
5272 return const_cast<Value *>(
5273 getLoadStorePointerOperand(static_cast<const Value *>(V)));
5274}
5275
5276/// A helper function that returns the pointer operand of a load, store
5277/// or GEP instruction. Returns nullptr if not load, store, or GEP.
5278inline const Value *getPointerOperand(const Value *V) {
5279 if (auto *Ptr = getLoadStorePointerOperand(V))
5280 return Ptr;
5281 if (auto *Gep = dyn_cast<GetElementPtrInst>(V))
5282 return Gep->getPointerOperand();
5283 return nullptr;
5284}
5285inline Value *getPointerOperand(Value *V) {
5286 return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V)));
5287}
5288
5289/// A helper function that returns the alignment of load or store instruction.
5290inline MaybeAlign getLoadStoreAlignment(Value *I) {
5291 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
"Expected Load or Store instruction") ? static_cast<void>
(0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 5292, __PRETTY_FUNCTION__))
5292 "Expected Load or Store instruction")(((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
"Expected Load or Store instruction") ? static_cast<void>
(0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 5292, __PRETTY_FUNCTION__))
;
5293 if (auto *LI = dyn_cast<LoadInst>(I))
5294 return MaybeAlign(LI->getAlignment());
5295 return MaybeAlign(cast<StoreInst>(I)->getAlignment());
5296}
5297
5298/// A helper function that returns the address space of the pointer operand of
5299/// load or store instruction.
5300inline unsigned getLoadStoreAddressSpace(Value *I) {
5301 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
"Expected Load or Store instruction") ? static_cast<void>
(0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 5302, __PRETTY_FUNCTION__))
5302 "Expected Load or Store instruction")(((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
"Expected Load or Store instruction") ? static_cast<void>
(0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/IR/Instructions.h"
, 5302, __PRETTY_FUNCTION__))
;
5303 if (auto *LI = dyn_cast<LoadInst>(I))
5304 return LI->getPointerAddressSpace();
5305 return cast<StoreInst>(I)->getPointerAddressSpace();
5306}
5307
5308//===----------------------------------------------------------------------===//
5309// FreezeInst Class
5310//===----------------------------------------------------------------------===//
5311
5312/// This class represents a freeze function that returns random concrete
5313/// value if an operand is either a poison value or an undef value
5314class FreezeInst : public UnaryInstruction {
5315protected:
5316 // Note: Instruction needs to be a friend here to call cloneImpl.
5317 friend class Instruction;
5318
5319 /// Clone an identical FreezeInst
5320 FreezeInst *cloneImpl() const;
5321
5322public:
5323 explicit FreezeInst(Value *S,
5324 const Twine &NameStr = "",
5325 Instruction *InsertBefore = nullptr);
5326 FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd);
5327
5328 // Methods for support type inquiry through isa, cast, and dyn_cast:
5329 static inline bool classof(const Instruction *I) {
5330 return I->getOpcode() == Freeze;
5331 }
5332 static inline bool classof(const Value *V) {
5333 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5334 }
5335};
5336
5337} // end namespace llvm
5338
5339#endif // LLVM_IR_INSTRUCTIONS_H