LLVM 18.0.0git
Instructions.h
Go to the documentation of this file.
1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/Twine.h"
24#include "llvm/ADT/iterator.h"
26#include "llvm/IR/CFG.h"
27#include "llvm/IR/Constant.h"
29#include "llvm/IR/InstrTypes.h"
30#include "llvm/IR/Instruction.h"
32#include "llvm/IR/Use.h"
33#include "llvm/IR/User.h"
36#include <cassert>
37#include <cstddef>
38#include <cstdint>
39#include <iterator>
40#include <optional>
41
42namespace llvm {
43
44class APFloat;
45class APInt;
46class BasicBlock;
47class ConstantInt;
48class DataLayout;
49class StringRef;
50class Type;
51class Value;
52
53//===----------------------------------------------------------------------===//
54// AllocaInst Class
55//===----------------------------------------------------------------------===//
56
57/// an instruction to allocate memory on the stack
59 Type *AllocatedType;
60
61 using AlignmentField = AlignmentBitfieldElementT<0>;
62 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
64 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
65 SwiftErrorField>(),
66 "Bitfields must be contiguous");
67
68protected:
69 // Note: Instruction needs to be a friend here to call cloneImpl.
70 friend class Instruction;
71
72 AllocaInst *cloneImpl() const;
73
74public:
75 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
76 const Twine &Name, Instruction *InsertBefore);
77 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
78 const Twine &Name, BasicBlock *InsertAtEnd);
79
80 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
81 Instruction *InsertBefore);
82 AllocaInst(Type *Ty, unsigned AddrSpace,
83 const Twine &Name, BasicBlock *InsertAtEnd);
84
85 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
86 const Twine &Name = "", Instruction *InsertBefore = nullptr);
87 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
88 const Twine &Name, BasicBlock *InsertAtEnd);
89
90 /// Return true if there is an allocation size parameter to the allocation
91 /// instruction that is not 1.
92 bool isArrayAllocation() const;
93
94 /// Get the number of elements allocated. For a simple allocation of a single
95 /// element, this will return a constant 1 value.
96 const Value *getArraySize() const { return getOperand(0); }
97 Value *getArraySize() { return getOperand(0); }
98
99 /// Overload to return most specific pointer type.
101 return cast<PointerType>(Instruction::getType());
102 }
103
104 /// Return the address space for the allocation.
105 unsigned getAddressSpace() const {
106 return getType()->getAddressSpace();
107 }
108
109 /// Get allocation size in bytes. Returns std::nullopt if size can't be
110 /// determined, e.g. in case of a VLA.
111 std::optional<TypeSize> getAllocationSize(const DataLayout &DL) const;
112
113 /// Get allocation size in bits. Returns std::nullopt if size can't be
114 /// determined, e.g. in case of a VLA.
115 std::optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
116
117 /// Return the type that is being allocated by the instruction.
118 Type *getAllocatedType() const { return AllocatedType; }
119 /// for use only in special circumstances that need to generically
120 /// transform a whole instruction (eg: IR linking and vectorization).
121 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
122
123 /// Return the alignment of the memory that is being allocated by the
124 /// instruction.
125 Align getAlign() const {
126 return Align(1ULL << getSubclassData<AlignmentField>());
127 }
128
130 setSubclassData<AlignmentField>(Log2(Align));
131 }
132
133 /// Return true if this alloca is in the entry block of the function and is a
134 /// constant size. If so, the code generator will fold it into the
135 /// prolog/epilog code, so it is basically free.
136 bool isStaticAlloca() const;
137
138 /// Return true if this alloca is used as an inalloca argument to a call. Such
139 /// allocas are never considered static even if they are in the entry block.
140 bool isUsedWithInAlloca() const {
141 return getSubclassData<UsedWithInAllocaField>();
142 }
143
144 /// Specify whether this alloca is used to represent the arguments to a call.
145 void setUsedWithInAlloca(bool V) {
146 setSubclassData<UsedWithInAllocaField>(V);
147 }
148
149 /// Return true if this alloca is used as a swifterror argument to a call.
150 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
151 /// Specify whether this alloca is used to represent a swifterror.
152 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
153
154 // Methods for support type inquiry through isa, cast, and dyn_cast:
155 static bool classof(const Instruction *I) {
156 return (I->getOpcode() == Instruction::Alloca);
157 }
158 static bool classof(const Value *V) {
159 return isa<Instruction>(V) && classof(cast<Instruction>(V));
160 }
161
162private:
163 // Shadow Instruction::setInstructionSubclassData with a private forwarding
164 // method so that subclasses cannot accidentally use it.
165 template <typename Bitfield>
166 void setSubclassData(typename Bitfield::Type Value) {
167 Instruction::setSubclassData<Bitfield>(Value);
168 }
169};
170
171//===----------------------------------------------------------------------===//
172// LoadInst Class
173//===----------------------------------------------------------------------===//
174
175/// An instruction for reading from memory. This uses the SubclassData field in
176/// Value to store whether or not the load is volatile.
178 using VolatileField = BoolBitfieldElementT<0>;
181 static_assert(
182 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
183 "Bitfields must be contiguous");
184
185 void AssertOK();
186
187protected:
188 // Note: Instruction needs to be a friend here to call cloneImpl.
189 friend class Instruction;
190
191 LoadInst *cloneImpl() const;
192
193public:
194 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
195 Instruction *InsertBefore);
196 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
197 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
198 Instruction *InsertBefore);
199 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
200 BasicBlock *InsertAtEnd);
201 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
202 Align Align, Instruction *InsertBefore = nullptr);
203 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
204 Align Align, BasicBlock *InsertAtEnd);
205 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
208 Instruction *InsertBefore = nullptr);
209 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
211 BasicBlock *InsertAtEnd);
212
213 /// Return true if this is a load from a volatile memory location.
214 bool isVolatile() const { return getSubclassData<VolatileField>(); }
215
216 /// Specify whether this is a volatile load or not.
217 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
218
219 /// Return the alignment of the access that is being performed.
220 Align getAlign() const {
221 return Align(1ULL << (getSubclassData<AlignmentField>()));
222 }
223
225 setSubclassData<AlignmentField>(Log2(Align));
226 }
227
228 /// Returns the ordering constraint of this load instruction.
230 return getSubclassData<OrderingField>();
231 }
232 /// Sets the ordering constraint of this load instruction. May not be Release
233 /// or AcquireRelease.
235 setSubclassData<OrderingField>(Ordering);
236 }
237
238 /// Returns the synchronization scope ID of this load instruction.
240 return SSID;
241 }
242
243 /// Sets the synchronization scope ID of this load instruction.
245 this->SSID = SSID;
246 }
247
248 /// Sets the ordering constraint and the synchronization scope ID of this load
249 /// instruction.
252 setOrdering(Ordering);
253 setSyncScopeID(SSID);
254 }
255
256 bool isSimple() const { return !isAtomic() && !isVolatile(); }
257
258 bool isUnordered() const {
261 !isVolatile();
262 }
263
265 const Value *getPointerOperand() const { return getOperand(0); }
266 static unsigned getPointerOperandIndex() { return 0U; }
268
269 /// Returns the address space of the pointer operand.
270 unsigned getPointerAddressSpace() const {
272 }
273
274 // Methods for support type inquiry through isa, cast, and dyn_cast:
275 static bool classof(const Instruction *I) {
276 return I->getOpcode() == Instruction::Load;
277 }
278 static bool classof(const Value *V) {
279 return isa<Instruction>(V) && classof(cast<Instruction>(V));
280 }
281
282private:
283 // Shadow Instruction::setInstructionSubclassData with a private forwarding
284 // method so that subclasses cannot accidentally use it.
285 template <typename Bitfield>
286 void setSubclassData(typename Bitfield::Type Value) {
287 Instruction::setSubclassData<Bitfield>(Value);
288 }
289
290 /// The synchronization scope ID of this load instruction. Not quite enough
291 /// room in SubClassData for everything, so synchronization scope ID gets its
292 /// own field.
293 SyncScope::ID SSID;
294};
295
296//===----------------------------------------------------------------------===//
297// StoreInst Class
298//===----------------------------------------------------------------------===//
299
300/// An instruction for storing to memory.
301class StoreInst : public Instruction {
302 using VolatileField = BoolBitfieldElementT<0>;
305 static_assert(
306 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
307 "Bitfields must be contiguous");
308
309 void AssertOK();
310
311protected:
312 // Note: Instruction needs to be a friend here to call cloneImpl.
313 friend class Instruction;
314
315 StoreInst *cloneImpl() const;
316
317public:
318 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
319 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
320 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
321 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
323 Instruction *InsertBefore = nullptr);
325 BasicBlock *InsertAtEnd);
328 Instruction *InsertBefore = nullptr);
330 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
331
332 // allocate space for exactly two operands
333 void *operator new(size_t S) { return User::operator new(S, 2); }
334 void operator delete(void *Ptr) { User::operator delete(Ptr); }
335
336 /// Return true if this is a store to a volatile memory location.
337 bool isVolatile() const { return getSubclassData<VolatileField>(); }
338
339 /// Specify whether this is a volatile store or not.
340 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
341
342 /// Transparently provide more efficient getOperand methods.
344
345 Align getAlign() const {
346 return Align(1ULL << (getSubclassData<AlignmentField>()));
347 }
348
350 setSubclassData<AlignmentField>(Log2(Align));
351 }
352
353 /// Returns the ordering constraint of this store instruction.
355 return getSubclassData<OrderingField>();
356 }
357
358 /// Sets the ordering constraint of this store instruction. May not be
359 /// Acquire or AcquireRelease.
361 setSubclassData<OrderingField>(Ordering);
362 }
363
364 /// Returns the synchronization scope ID of this store instruction.
366 return SSID;
367 }
368
369 /// Sets the synchronization scope ID of this store instruction.
371 this->SSID = SSID;
372 }
373
374 /// Sets the ordering constraint and the synchronization scope ID of this
375 /// store instruction.
378 setOrdering(Ordering);
379 setSyncScopeID(SSID);
380 }
381
382 bool isSimple() const { return !isAtomic() && !isVolatile(); }
383
384 bool isUnordered() const {
387 !isVolatile();
388 }
389
391 const Value *getValueOperand() const { return getOperand(0); }
392
394 const Value *getPointerOperand() const { return getOperand(1); }
395 static unsigned getPointerOperandIndex() { return 1U; }
397
398 /// Returns the address space of the pointer operand.
399 unsigned getPointerAddressSpace() const {
401 }
402
403 // Methods for support type inquiry through isa, cast, and dyn_cast:
404 static bool classof(const Instruction *I) {
405 return I->getOpcode() == Instruction::Store;
406 }
407 static bool classof(const Value *V) {
408 return isa<Instruction>(V) && classof(cast<Instruction>(V));
409 }
410
411private:
412 // Shadow Instruction::setInstructionSubclassData with a private forwarding
413 // method so that subclasses cannot accidentally use it.
414 template <typename Bitfield>
415 void setSubclassData(typename Bitfield::Type Value) {
416 Instruction::setSubclassData<Bitfield>(Value);
417 }
418
419 /// The synchronization scope ID of this store instruction. Not quite enough
420 /// room in SubClassData for everything, so synchronization scope ID gets its
421 /// own field.
422 SyncScope::ID SSID;
423};
424
425template <>
426struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
427};
428
430
431//===----------------------------------------------------------------------===//
432// FenceInst Class
433//===----------------------------------------------------------------------===//
434
435/// An instruction for ordering other memory operations.
436class FenceInst : public Instruction {
437 using OrderingField = AtomicOrderingBitfieldElementT<0>;
438
439 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
440
441protected:
442 // Note: Instruction needs to be a friend here to call cloneImpl.
443 friend class Instruction;
444
445 FenceInst *cloneImpl() const;
446
447public:
448 // Ordering may only be Acquire, Release, AcquireRelease, or
449 // SequentiallyConsistent.
452 Instruction *InsertBefore = nullptr);
454 BasicBlock *InsertAtEnd);
455
456 // allocate space for exactly zero operands
457 void *operator new(size_t S) { return User::operator new(S, 0); }
458 void operator delete(void *Ptr) { User::operator delete(Ptr); }
459
460 /// Returns the ordering constraint of this fence instruction.
462 return getSubclassData<OrderingField>();
463 }
464
465 /// Sets the ordering constraint of this fence instruction. May only be
466 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
468 setSubclassData<OrderingField>(Ordering);
469 }
470
471 /// Returns the synchronization scope ID of this fence instruction.
473 return SSID;
474 }
475
476 /// Sets the synchronization scope ID of this fence instruction.
478 this->SSID = SSID;
479 }
480
481 // Methods for support type inquiry through isa, cast, and dyn_cast:
482 static bool classof(const Instruction *I) {
483 return I->getOpcode() == Instruction::Fence;
484 }
485 static bool classof(const Value *V) {
486 return isa<Instruction>(V) && classof(cast<Instruction>(V));
487 }
488
489private:
490 // Shadow Instruction::setInstructionSubclassData with a private forwarding
491 // method so that subclasses cannot accidentally use it.
492 template <typename Bitfield>
493 void setSubclassData(typename Bitfield::Type Value) {
494 Instruction::setSubclassData<Bitfield>(Value);
495 }
496
497 /// The synchronization scope ID of this fence instruction. Not quite enough
498 /// room in SubClassData for everything, so synchronization scope ID gets its
499 /// own field.
500 SyncScope::ID SSID;
501};
502
503//===----------------------------------------------------------------------===//
504// AtomicCmpXchgInst Class
505//===----------------------------------------------------------------------===//
506
507/// An instruction that atomically checks whether a
508/// specified value is in a memory location, and, if it is, stores a new value
509/// there. The value returned by this instruction is a pair containing the
510/// original value as first element, and an i1 indicating success (true) or
511/// failure (false) as second element.
512///
514 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
515 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
516 SyncScope::ID SSID);
517
518 template <unsigned Offset>
519 using AtomicOrderingBitfieldElement =
522
523protected:
524 // Note: Instruction needs to be a friend here to call cloneImpl.
525 friend class Instruction;
526
528
529public:
530 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
531 AtomicOrdering SuccessOrdering,
532 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
533 Instruction *InsertBefore = nullptr);
534 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
535 AtomicOrdering SuccessOrdering,
536 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
537 BasicBlock *InsertAtEnd);
538
539 // allocate space for exactly three operands
540 void *operator new(size_t S) { return User::operator new(S, 3); }
541 void operator delete(void *Ptr) { User::operator delete(Ptr); }
542
551 static_assert(
554 "Bitfields must be contiguous");
555
556 /// Return the alignment of the memory that is being allocated by the
557 /// instruction.
558 Align getAlign() const {
559 return Align(1ULL << getSubclassData<AlignmentField>());
560 }
561
563 setSubclassData<AlignmentField>(Log2(Align));
564 }
565
566 /// Return true if this is a cmpxchg from a volatile memory
567 /// location.
568 ///
569 bool isVolatile() const { return getSubclassData<VolatileField>(); }
570
571 /// Specify whether this is a volatile cmpxchg.
572 ///
573 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
574
575 /// Return true if this cmpxchg may spuriously fail.
576 bool isWeak() const { return getSubclassData<WeakField>(); }
577
578 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
579
580 /// Transparently provide more efficient getOperand methods.
582
584 return Ordering != AtomicOrdering::NotAtomic &&
585 Ordering != AtomicOrdering::Unordered;
586 }
587
589 return Ordering != AtomicOrdering::NotAtomic &&
590 Ordering != AtomicOrdering::Unordered &&
591 Ordering != AtomicOrdering::AcquireRelease &&
592 Ordering != AtomicOrdering::Release;
593 }
594
595 /// Returns the success ordering constraint of this cmpxchg instruction.
597 return getSubclassData<SuccessOrderingField>();
598 }
599
600 /// Sets the success ordering constraint of this cmpxchg instruction.
602 assert(isValidSuccessOrdering(Ordering) &&
603 "invalid CmpXchg success ordering");
604 setSubclassData<SuccessOrderingField>(Ordering);
605 }
606
607 /// Returns the failure ordering constraint of this cmpxchg instruction.
609 return getSubclassData<FailureOrderingField>();
610 }
611
612 /// Sets the failure ordering constraint of this cmpxchg instruction.
614 assert(isValidFailureOrdering(Ordering) &&
615 "invalid CmpXchg failure ordering");
616 setSubclassData<FailureOrderingField>(Ordering);
617 }
618
619 /// Returns a single ordering which is at least as strong as both the
620 /// success and failure orderings for this cmpxchg.
629 }
630 return getSuccessOrdering();
631 }
632
633 /// Returns the synchronization scope ID of this cmpxchg instruction.
635 return SSID;
636 }
637
638 /// Sets the synchronization scope ID of this cmpxchg instruction.
640 this->SSID = SSID;
641 }
642
644 const Value *getPointerOperand() const { return getOperand(0); }
645 static unsigned getPointerOperandIndex() { return 0U; }
646
648 const Value *getCompareOperand() const { return getOperand(1); }
649
651 const Value *getNewValOperand() const { return getOperand(2); }
652
653 /// Returns the address space of the pointer operand.
654 unsigned getPointerAddressSpace() const {
656 }
657
658 /// Returns the strongest permitted ordering on failure, given the
659 /// desired ordering on success.
660 ///
661 /// If the comparison in a cmpxchg operation fails, there is no atomic store
662 /// so release semantics cannot be provided. So this function drops explicit
663 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
664 /// operation would remain SequentiallyConsistent.
665 static AtomicOrdering
667 switch (SuccessOrdering) {
668 default:
669 llvm_unreachable("invalid cmpxchg success ordering");
678 }
679 }
680
681 // Methods for support type inquiry through isa, cast, and dyn_cast:
682 static bool classof(const Instruction *I) {
683 return I->getOpcode() == Instruction::AtomicCmpXchg;
684 }
685 static bool classof(const Value *V) {
686 return isa<Instruction>(V) && classof(cast<Instruction>(V));
687 }
688
689private:
690 // Shadow Instruction::setInstructionSubclassData with a private forwarding
691 // method so that subclasses cannot accidentally use it.
692 template <typename Bitfield>
693 void setSubclassData(typename Bitfield::Type Value) {
694 Instruction::setSubclassData<Bitfield>(Value);
695 }
696
697 /// The synchronization scope ID of this cmpxchg instruction. Not quite
698 /// enough room in SubClassData for everything, so synchronization scope ID
699 /// gets its own field.
700 SyncScope::ID SSID;
701};
702
703template <>
705 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
706};
707
709
710//===----------------------------------------------------------------------===//
711// AtomicRMWInst Class
712//===----------------------------------------------------------------------===//
713
714/// an instruction that atomically reads a memory location,
715/// combines it with another value, and then stores the result back. Returns
716/// the old value.
717///
719protected:
720 // Note: Instruction needs to be a friend here to call cloneImpl.
721 friend class Instruction;
722
723 AtomicRMWInst *cloneImpl() const;
724
725public:
726 /// This enumeration lists the possible modifications atomicrmw can make. In
727 /// the descriptions, 'p' is the pointer to the instruction's memory location,
728 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
729 /// instruction. These instructions always return 'old'.
730 enum BinOp : unsigned {
731 /// *p = v
733 /// *p = old + v
735 /// *p = old - v
737 /// *p = old & v
739 /// *p = ~(old & v)
741 /// *p = old | v
743 /// *p = old ^ v
745 /// *p = old >signed v ? old : v
747 /// *p = old <signed v ? old : v
749 /// *p = old >unsigned v ? old : v
751 /// *p = old <unsigned v ? old : v
753
754 /// *p = old + v
756
757 /// *p = old - v
759
760 /// *p = maxnum(old, v)
761 /// \p maxnum matches the behavior of \p llvm.maxnum.*.
763
764 /// *p = minnum(old, v)
765 /// \p minnum matches the behavior of \p llvm.minnum.*.
767
768 /// Increment one up to a maximum value.
769 /// *p = (old u>= v) ? 0 : (old + 1)
771
772 /// Decrement one until a minimum value or zero.
773 /// *p = ((old == 0) || (old u> v)) ? v : (old - 1)
775
776 FIRST_BINOP = Xchg,
777 LAST_BINOP = UDecWrap,
778 BAD_BINOP
779 };
780
781private:
782 template <unsigned Offset>
783 using AtomicOrderingBitfieldElement =
786
787 template <unsigned Offset>
788 using BinOpBitfieldElement =
790
791public:
792 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
793 AtomicOrdering Ordering, SyncScope::ID SSID,
794 Instruction *InsertBefore = nullptr);
795 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
796 AtomicOrdering Ordering, SyncScope::ID SSID,
797 BasicBlock *InsertAtEnd);
798
799 // allocate space for exactly two operands
800 void *operator new(size_t S) { return User::operator new(S, 2); }
801 void operator delete(void *Ptr) { User::operator delete(Ptr); }
802
806 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
810 "Bitfields must be contiguous");
811
812 BinOp getOperation() const { return getSubclassData<OperationField>(); }
813
814 static StringRef getOperationName(BinOp Op);
815
816 static bool isFPOperation(BinOp Op) {
817 switch (Op) {
822 return true;
823 default:
824 return false;
825 }
826 }
827
829 setSubclassData<OperationField>(Operation);
830 }
831
832 /// Return the alignment of the memory that is being allocated by the
833 /// instruction.
834 Align getAlign() const {
835 return Align(1ULL << getSubclassData<AlignmentField>());
836 }
837
839 setSubclassData<AlignmentField>(Log2(Align));
840 }
841
842 /// Return true if this is a RMW on a volatile memory location.
843 ///
844 bool isVolatile() const { return getSubclassData<VolatileField>(); }
845
846 /// Specify whether this is a volatile RMW or not.
847 ///
848 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
849
850 /// Transparently provide more efficient getOperand methods.
852
853 /// Returns the ordering constraint of this rmw instruction.
855 return getSubclassData<AtomicOrderingField>();
856 }
857
858 /// Sets the ordering constraint of this rmw instruction.
860 assert(Ordering != AtomicOrdering::NotAtomic &&
861 "atomicrmw instructions can only be atomic.");
862 assert(Ordering != AtomicOrdering::Unordered &&
863 "atomicrmw instructions cannot be unordered.");
864 setSubclassData<AtomicOrderingField>(Ordering);
865 }
866
867 /// Returns the synchronization scope ID of this rmw instruction.
869 return SSID;
870 }
871
872 /// Sets the synchronization scope ID of this rmw instruction.
874 this->SSID = SSID;
875 }
876
877 Value *getPointerOperand() { return getOperand(0); }
878 const Value *getPointerOperand() const { return getOperand(0); }
879 static unsigned getPointerOperandIndex() { return 0U; }
880
881 Value *getValOperand() { return getOperand(1); }
882 const Value *getValOperand() const { return getOperand(1); }
883
884 /// Returns the address space of the pointer operand.
885 unsigned getPointerAddressSpace() const {
887 }
888
890 return isFPOperation(getOperation());
891 }
892
893 // Methods for support type inquiry through isa, cast, and dyn_cast:
894 static bool classof(const Instruction *I) {
895 return I->getOpcode() == Instruction::AtomicRMW;
896 }
897 static bool classof(const Value *V) {
898 return isa<Instruction>(V) && classof(cast<Instruction>(V));
899 }
900
901private:
902 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
903 AtomicOrdering Ordering, SyncScope::ID SSID);
904
905 // Shadow Instruction::setInstructionSubclassData with a private forwarding
906 // method so that subclasses cannot accidentally use it.
907 template <typename Bitfield>
908 void setSubclassData(typename Bitfield::Type Value) {
909 Instruction::setSubclassData<Bitfield>(Value);
910 }
911
912 /// The synchronization scope ID of this rmw instruction. Not quite enough
913 /// room in SubClassData for everything, so synchronization scope ID gets its
914 /// own field.
915 SyncScope::ID SSID;
916};
917
918template <>
920 : public FixedNumOperandTraits<AtomicRMWInst,2> {
921};
922
924
925//===----------------------------------------------------------------------===//
926// GetElementPtrInst Class
927//===----------------------------------------------------------------------===//
928
929// checkGEPType - Simple wrapper function to give a better assertion failure
930// message on bad indexes for a gep instruction.
931//
933 assert(Ty && "Invalid GetElementPtrInst indices for type!");
934 return Ty;
935}
936
937/// an instruction for type-safe pointer arithmetic to
938/// access elements of arrays and structs
939///
941 Type *SourceElementType;
942 Type *ResultElementType;
943
945
946 /// Constructors - Create a getelementptr instruction with a base pointer an
947 /// list of indices. The first ctor can optionally insert before an existing
948 /// instruction, the second appends the new instruction to the specified
949 /// BasicBlock.
950 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
951 ArrayRef<Value *> IdxList, unsigned Values,
952 const Twine &NameStr, Instruction *InsertBefore);
953 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
954 ArrayRef<Value *> IdxList, unsigned Values,
955 const Twine &NameStr, BasicBlock *InsertAtEnd);
956
957 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
958
959protected:
960 // Note: Instruction needs to be a friend here to call cloneImpl.
961 friend class Instruction;
962
964
965public:
966 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
967 ArrayRef<Value *> IdxList,
968 const Twine &NameStr = "",
969 Instruction *InsertBefore = nullptr) {
970 unsigned Values = 1 + unsigned(IdxList.size());
971 assert(PointeeType && "Must specify element type");
972 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
973 NameStr, InsertBefore);
974 }
975
976 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
977 ArrayRef<Value *> IdxList,
978 const Twine &NameStr,
979 BasicBlock *InsertAtEnd) {
980 unsigned Values = 1 + unsigned(IdxList.size());
981 assert(PointeeType && "Must specify element type");
982 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
983 NameStr, InsertAtEnd);
984 }
985
986 /// Create an "inbounds" getelementptr. See the documentation for the
987 /// "inbounds" flag in LangRef.html for details.
988 static GetElementPtrInst *
990 const Twine &NameStr = "",
991 Instruction *InsertBefore = nullptr) {
993 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
994 GEP->setIsInBounds(true);
995 return GEP;
996 }
997
999 ArrayRef<Value *> IdxList,
1000 const Twine &NameStr,
1001 BasicBlock *InsertAtEnd) {
1003 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
1004 GEP->setIsInBounds(true);
1005 return GEP;
1006 }
1007
1008 /// Transparently provide more efficient getOperand methods.
1010
1011 Type *getSourceElementType() const { return SourceElementType; }
1012
1013 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1014 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1015
1017 return ResultElementType;
1018 }
1019
1020 /// Returns the address space of this instruction's pointer type.
1021 unsigned getAddressSpace() const {
1022 // Note that this is always the same as the pointer operand's address space
1023 // and that is cheaper to compute, so cheat here.
1024 return getPointerAddressSpace();
1025 }
1026
1027 /// Returns the result type of a getelementptr with the given source
1028 /// element type and indexes.
1029 ///
1030 /// Null is returned if the indices are invalid for the specified
1031 /// source element type.
1032 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1033 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1034 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1035
1036 /// Return the type of the element at the given index of an indexable
1037 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1038 ///
1039 /// Returns null if the type can't be indexed, or the given index is not
1040 /// legal for the given type.
1041 static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1042 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1043
1044 inline op_iterator idx_begin() { return op_begin()+1; }
1045 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1046 inline op_iterator idx_end() { return op_end(); }
1047 inline const_op_iterator idx_end() const { return op_end(); }
1048
1050 return make_range(idx_begin(), idx_end());
1051 }
1052
1054 return make_range(idx_begin(), idx_end());
1055 }
1056
1058 return getOperand(0);
1059 }
1060 const Value *getPointerOperand() const {
1061 return getOperand(0);
1062 }
1063 static unsigned getPointerOperandIndex() {
1064 return 0U; // get index for modifying correct operand.
1065 }
1066
1067 /// Method to return the pointer operand as a
1068 /// PointerType.
1070 return getPointerOperand()->getType();
1071 }
1072
1073 /// Returns the address space of the pointer operand.
1074 unsigned getPointerAddressSpace() const {
1076 }
1077
1078 /// Returns the pointer type returned by the GEP
1079 /// instruction, which may be a vector of pointers.
1081 // Vector GEP
1082 Type *Ty = Ptr->getType();
1083 if (Ty->isVectorTy())
1084 return Ty;
1085
1086 for (Value *Index : IdxList)
1087 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1088 ElementCount EltCount = IndexVTy->getElementCount();
1089 return VectorType::get(Ty, EltCount);
1090 }
1091 // Scalar GEP
1092 return Ty;
1093 }
1094
1095 unsigned getNumIndices() const { // Note: always non-negative
1096 return getNumOperands() - 1;
1097 }
1098
1099 bool hasIndices() const {
1100 return getNumOperands() > 1;
1101 }
1102
1103 /// Return true if all of the indices of this GEP are
1104 /// zeros. If so, the result pointer and the first operand have the same
1105 /// value, just potentially different types.
1106 bool hasAllZeroIndices() const;
1107
1108 /// Return true if all of the indices of this GEP are
1109 /// constant integers. If so, the result pointer and the first operand have
1110 /// a constant offset between them.
1111 bool hasAllConstantIndices() const;
1112
1113 /// Set or clear the inbounds flag on this GEP instruction.
1114 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1115 void setIsInBounds(bool b = true);
1116
1117 /// Determine whether the GEP has the inbounds flag.
1118 bool isInBounds() const;
1119
1120 /// Accumulate the constant address offset of this GEP if possible.
1121 ///
1122 /// This routine accepts an APInt into which it will accumulate the constant
1123 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1124 /// all-constant, it returns false and the value of the offset APInt is
1125 /// undefined (it is *not* preserved!). The APInt passed into this routine
1126 /// must be at least as wide as the IntPtr type for the address space of
1127 /// the base GEP pointer.
1128 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1129 bool collectOffset(const DataLayout &DL, unsigned BitWidth,
1130 MapVector<Value *, APInt> &VariableOffsets,
1131 APInt &ConstantOffset) const;
1132 // Methods for support type inquiry through isa, cast, and dyn_cast:
1133 static bool classof(const Instruction *I) {
1134 return (I->getOpcode() == Instruction::GetElementPtr);
1135 }
1136 static bool classof(const Value *V) {
1137 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1138 }
1139};
1140
1141template <>
1143 public VariadicOperandTraits<GetElementPtrInst, 1> {
1144};
1145
1146GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1147 ArrayRef<Value *> IdxList, unsigned Values,
1148 const Twine &NameStr,
1149 Instruction *InsertBefore)
1150 : Instruction(getGEPReturnType(Ptr, IdxList), GetElementPtr,
1151 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1152 Values, InsertBefore),
1153 SourceElementType(PointeeType),
1154 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1155 init(Ptr, IdxList, NameStr);
1156}
1157
1158GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1159 ArrayRef<Value *> IdxList, unsigned Values,
1160 const Twine &NameStr,
1161 BasicBlock *InsertAtEnd)
1162 : Instruction(getGEPReturnType(Ptr, IdxList), GetElementPtr,
1163 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1164 Values, InsertAtEnd),
1165 SourceElementType(PointeeType),
1166 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1167 init(Ptr, IdxList, NameStr);
1168}
1169
1170DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)
1171
1172//===----------------------------------------------------------------------===//
1173// ICmpInst Class
1174//===----------------------------------------------------------------------===//
1175
1176/// This instruction compares its operands according to the predicate given
1177/// to the constructor. It only operates on integers or pointers. The operands
1178/// must be identical types.
1179/// Represent an integer comparison operator.
1180class ICmpInst: public CmpInst {
1181 void AssertOK() {
1182 assert(isIntPredicate() &&
1183 "Invalid ICmp predicate value");
1184 assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1185 "Both operands to ICmp instruction are not of the same type!");
1186 // Check that the operands are the right type
1187 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||
1188 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
1189 "Invalid operand types for ICmp instruction");
1190 }
1191
1192protected:
1193 // Note: Instruction needs to be a friend here to call cloneImpl.
1194 friend class Instruction;
1195
1196 /// Clone an identical ICmpInst
1197 ICmpInst *cloneImpl() const;
1198
1199public:
1200 /// Constructor with insert-before-instruction semantics.
1202 Instruction *InsertBefore, ///< Where to insert
1203 Predicate pred, ///< The predicate to use for the comparison
1204 Value *LHS, ///< The left-hand-side of the expression
1205 Value *RHS, ///< The right-hand-side of the expression
1206 const Twine &NameStr = "" ///< Name of the instruction
1207 ) : CmpInst(makeCmpResultType(LHS->getType()),
1208 Instruction::ICmp, pred, LHS, RHS, NameStr,
1209 InsertBefore) {
1210#ifndef NDEBUG
1211 AssertOK();
1212#endif
1213 }
1214
1215 /// Constructor with insert-at-end semantics.
1217 BasicBlock &InsertAtEnd, ///< Block to insert into.
1218 Predicate pred, ///< The predicate to use for the comparison
1219 Value *LHS, ///< The left-hand-side of the expression
1220 Value *RHS, ///< The right-hand-side of the expression
1221 const Twine &NameStr = "" ///< Name of the instruction
1222 ) : CmpInst(makeCmpResultType(LHS->getType()),
1223 Instruction::ICmp, pred, LHS, RHS, NameStr,
1224 &InsertAtEnd) {
1225#ifndef NDEBUG
1226 AssertOK();
1227#endif
1228 }
1229
1230 /// Constructor with no-insertion semantics
1232 Predicate pred, ///< The predicate to use for the comparison
1233 Value *LHS, ///< The left-hand-side of the expression
1234 Value *RHS, ///< The right-hand-side of the expression
1235 const Twine &NameStr = "" ///< Name of the instruction
1236 ) : CmpInst(makeCmpResultType(LHS->getType()),
1237 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1238#ifndef NDEBUG
1239 AssertOK();
1240#endif
1241 }
1242
1243 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1244 /// @returns the predicate that would be the result if the operand were
1245 /// regarded as signed.
1246 /// Return the signed version of the predicate
1248 return getSignedPredicate(getPredicate());
1249 }
1250
1251 /// This is a static version that you can use without an instruction.
1252 /// Return the signed version of the predicate.
1253 static Predicate getSignedPredicate(Predicate pred);
1254
1255 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1256 /// @returns the predicate that would be the result if the operand were
1257 /// regarded as unsigned.
1258 /// Return the unsigned version of the predicate
1260 return getUnsignedPredicate(getPredicate());
1261 }
1262
1263 /// This is a static version that you can use without an instruction.
1264 /// Return the unsigned version of the predicate.
1265 static Predicate getUnsignedPredicate(Predicate pred);
1266
1267 /// Return true if this predicate is either EQ or NE. This also
1268 /// tests for commutativity.
1269 static bool isEquality(Predicate P) {
1270 return P == ICMP_EQ || P == ICMP_NE;
1271 }
1272
1273 /// Return true if this predicate is either EQ or NE. This also
1274 /// tests for commutativity.
1275 bool isEquality() const {
1276 return isEquality(getPredicate());
1277 }
1278
1279 /// @returns true if the predicate of this ICmpInst is commutative
1280 /// Determine if this relation is commutative.
1281 bool isCommutative() const { return isEquality(); }
1282
1283 /// Return true if the predicate is relational (not EQ or NE).
1284 ///
1285 bool isRelational() const {
1286 return !isEquality();
1287 }
1288
1289 /// Return true if the predicate is relational (not EQ or NE).
1290 ///
1291 static bool isRelational(Predicate P) {
1292 return !isEquality(P);
1293 }
1294
1295 /// Return true if the predicate is SGT or UGT.
1296 ///
1297 static bool isGT(Predicate P) {
1298 return P == ICMP_SGT || P == ICMP_UGT;
1299 }
1300
1301 /// Return true if the predicate is SLT or ULT.
1302 ///
1303 static bool isLT(Predicate P) {
1304 return P == ICMP_SLT || P == ICMP_ULT;
1305 }
1306
1307 /// Return true if the predicate is SGE or UGE.
1308 ///
1309 static bool isGE(Predicate P) {
1310 return P == ICMP_SGE || P == ICMP_UGE;
1311 }
1312
1313 /// Return true if the predicate is SLE or ULE.
1314 ///
1315 static bool isLE(Predicate P) {
1316 return P == ICMP_SLE || P == ICMP_ULE;
1317 }
1318
1319 /// Returns the sequence of all ICmp predicates.
1320 ///
1321 static auto predicates() { return ICmpPredicates(); }
1322
1323 /// Exchange the two operands to this instruction in such a way that it does
1324 /// not modify the semantics of the instruction. The predicate value may be
1325 /// changed to retain the same result if the predicate is order dependent
1326 /// (e.g. ult).
1327 /// Swap operands and adjust predicate.
1329 setPredicate(getSwappedPredicate());
1330 Op<0>().swap(Op<1>());
1331 }
1332
1333 /// Return result of `LHS Pred RHS` comparison.
1334 static bool compare(const APInt &LHS, const APInt &RHS,
1335 ICmpInst::Predicate Pred);
1336
1337 // Methods for support type inquiry through isa, cast, and dyn_cast:
1338 static bool classof(const Instruction *I) {
1339 return I->getOpcode() == Instruction::ICmp;
1340 }
1341 static bool classof(const Value *V) {
1342 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1343 }
1344};
1345
1346//===----------------------------------------------------------------------===//
1347// FCmpInst Class
1348//===----------------------------------------------------------------------===//
1349
1350/// This instruction compares its operands according to the predicate given
1351/// to the constructor. It only operates on floating point values or packed
1352/// vectors of floating point values. The operands must be identical types.
1353/// Represents a floating point comparison operator.
1354class FCmpInst: public CmpInst {
1355 void AssertOK() {
1356 assert(isFPPredicate() && "Invalid FCmp predicate value");
1357 assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1358 "Both operands to FCmp instruction are not of the same type!");
1359 // Check that the operands are the right type
1360 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&
1361 "Invalid operand types for FCmp instruction");
1362 }
1363
1364protected:
1365 // Note: Instruction needs to be a friend here to call cloneImpl.
1366 friend class Instruction;
1367
1368 /// Clone an identical FCmpInst
1369 FCmpInst *cloneImpl() const;
1370
1371public:
1372 /// Constructor with insert-before-instruction semantics.
1374 Instruction *InsertBefore, ///< Where to insert
1375 Predicate pred, ///< The predicate to use for the comparison
1376 Value *LHS, ///< The left-hand-side of the expression
1377 Value *RHS, ///< The right-hand-side of the expression
1378 const Twine &NameStr = "" ///< Name of the instruction
1380 Instruction::FCmp, pred, LHS, RHS, NameStr,
1381 InsertBefore) {
1382 AssertOK();
1383 }
1384
1385 /// Constructor with insert-at-end semantics.
1387 BasicBlock &InsertAtEnd, ///< Block to insert into.
1388 Predicate pred, ///< The predicate to use for the comparison
1389 Value *LHS, ///< The left-hand-side of the expression
1390 Value *RHS, ///< The right-hand-side of the expression
1391 const Twine &NameStr = "" ///< Name of the instruction
1393 Instruction::FCmp, pred, LHS, RHS, NameStr,
1394 &InsertAtEnd) {
1395 AssertOK();
1396 }
1397
1398 /// Constructor with no-insertion semantics
1400 Predicate Pred, ///< The predicate to use for the comparison
1401 Value *LHS, ///< The left-hand-side of the expression
1402 Value *RHS, ///< The right-hand-side of the expression
1403 const Twine &NameStr = "", ///< Name of the instruction
1404 Instruction *FlagsSource = nullptr
1405 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1406 RHS, NameStr, nullptr, FlagsSource) {
1407 AssertOK();
1408 }
1409
1410 /// @returns true if the predicate of this instruction is EQ or NE.
1411 /// Determine if this is an equality predicate.
1412 static bool isEquality(Predicate Pred) {
1413 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1414 Pred == FCMP_UNE;
1415 }
1416
1417 /// @returns true if the predicate of this instruction is EQ or NE.
1418 /// Determine if this is an equality predicate.
1419 bool isEquality() const { return isEquality(getPredicate()); }
1420
1421 /// @returns true if the predicate of this instruction is commutative.
1422 /// Determine if this is a commutative predicate.
1423 bool isCommutative() const {
1424 return isEquality() ||
1425 getPredicate() == FCMP_FALSE ||
1426 getPredicate() == FCMP_TRUE ||
1427 getPredicate() == FCMP_ORD ||
1429 }
1430
1431 /// @returns true if the predicate is relational (not EQ or NE).
1432 /// Determine if this a relational predicate.
1433 bool isRelational() const { return !isEquality(); }
1434
1435 /// Exchange the two operands to this instruction in such a way that it does
1436 /// not modify the semantics of the instruction. The predicate value may be
1437 /// changed to retain the same result if the predicate is order dependent
1438 /// (e.g. ult).
1439 /// Swap operands and adjust predicate.
1442 Op<0>().swap(Op<1>());
1443 }
1444
1445 /// Returns the sequence of all FCmp predicates.
1446 ///
1447 static auto predicates() { return FCmpPredicates(); }
1448
1449 /// Return result of `LHS Pred RHS` comparison.
1450 static bool compare(const APFloat &LHS, const APFloat &RHS,
1451 FCmpInst::Predicate Pred);
1452
1453 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1454 static bool classof(const Instruction *I) {
1455 return I->getOpcode() == Instruction::FCmp;
1456 }
1457 static bool classof(const Value *V) {
1458 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1459 }
1460};
1461
1462//===----------------------------------------------------------------------===//
1463/// This class represents a function call, abstracting a target
1464/// machine's calling convention. This class uses low bit of the SubClassData
1465/// field to indicate whether or not this is a tail call. The rest of the bits
1466/// hold the calling convention of the call.
1467///
1468class CallInst : public CallBase {
1469 CallInst(const CallInst &CI);
1470
1471 /// Construct a CallInst given a range of arguments.
1472 /// Construct a CallInst from a range of arguments
1473 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1474 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1475 Instruction *InsertBefore);
1476
1477 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1478 const Twine &NameStr, Instruction *InsertBefore)
1479 : CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore) {}
1480
1481 /// Construct a CallInst given a range of arguments.
1482 /// Construct a CallInst from a range of arguments
1483 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1484 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1485 BasicBlock *InsertAtEnd);
1486
1487 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1488 Instruction *InsertBefore);
1489
1490 CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1491 BasicBlock *InsertAtEnd);
1492
1493 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1494 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1495 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1496
1497 /// Compute the number of operands to allocate.
1498 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1499 // We need one operand for the called function, plus the input operand
1500 // counts provided.
1501 return 1 + NumArgs + NumBundleInputs;
1502 }
1503
1504protected:
1505 // Note: Instruction needs to be a friend here to call cloneImpl.
1506 friend class Instruction;
1507
1508 CallInst *cloneImpl() const;
1509
1510public:
1511 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1512 Instruction *InsertBefore = nullptr) {
1513 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1514 }
1515
1517 const Twine &NameStr,
1518 Instruction *InsertBefore = nullptr) {
1519 return new (ComputeNumOperands(Args.size()))
1520 CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore);
1521 }
1522
1524 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
1525 const Twine &NameStr = "",
1526 Instruction *InsertBefore = nullptr) {
1527 const int NumOperands =
1528 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1529 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1530
1531 return new (NumOperands, DescriptorBytes)
1532 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1533 }
1534
1535 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1536 BasicBlock *InsertAtEnd) {
1537 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1538 }
1539
1541 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1542 return new (ComputeNumOperands(Args.size()))
1543 CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertAtEnd);
1544 }
1545
1548 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1549 const int NumOperands =
1550 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1551 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1552
1553 return new (NumOperands, DescriptorBytes)
1554 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1555 }
1556
1557 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1558 Instruction *InsertBefore = nullptr) {
1559 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1560 InsertBefore);
1561 }
1562
1564 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
1565 const Twine &NameStr = "",
1566 Instruction *InsertBefore = nullptr) {
1567 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1568 NameStr, InsertBefore);
1569 }
1570
1572 const Twine &NameStr,
1573 Instruction *InsertBefore = nullptr) {
1574 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1575 InsertBefore);
1576 }
1577
1578 static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1579 BasicBlock *InsertAtEnd) {
1580 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1581 InsertAtEnd);
1582 }
1583
1585 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1586 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1587 InsertAtEnd);
1588 }
1589
1592 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1593 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1594 NameStr, InsertAtEnd);
1595 }
1596
1597 /// Create a clone of \p CI with a different set of operand bundles and
1598 /// insert it before \p InsertPt.
1599 ///
1600 /// The returned call instruction is identical \p CI in every way except that
1601 /// the operand bundles for the new instruction are set to the operand bundles
1602 /// in \p Bundles.
1604 Instruction *InsertPt = nullptr);
1605
1606 // Note that 'musttail' implies 'tail'.
1607 enum TailCallKind : unsigned {
1614
1616 static_assert(
1617 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1618 "Bitfields must be contiguous");
1619
1621 return getSubclassData<TailCallKindField>();
1622 }
1623
1624 bool isTailCall() const {
1626 return Kind == TCK_Tail || Kind == TCK_MustTail;
1627 }
1628
1629 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1630
1631 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1632
1634 setSubclassData<TailCallKindField>(TCK);
1635 }
1636
1637 void setTailCall(bool IsTc = true) {
1639 }
1640
1641 /// Return true if the call can return twice
1642 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1643 void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); }
1644
1645 // Methods for support type inquiry through isa, cast, and dyn_cast:
1646 static bool classof(const Instruction *I) {
1647 return I->getOpcode() == Instruction::Call;
1648 }
1649 static bool classof(const Value *V) {
1650 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1651 }
1652
1653 /// Updates profile metadata by scaling it by \p S / \p T.
1655
1656private:
1657 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1658 // method so that subclasses cannot accidentally use it.
1659 template <typename Bitfield>
1660 void setSubclassData(typename Bitfield::Type Value) {
1661 Instruction::setSubclassData<Bitfield>(Value);
1662 }
1663};
1664
1665CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1666 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1667 BasicBlock *InsertAtEnd)
1668 : CallBase(Ty->getReturnType(), Instruction::Call,
1669 OperandTraits<CallBase>::op_end(this) -
1670 (Args.size() + CountBundleInputs(Bundles) + 1),
1671 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1672 InsertAtEnd) {
1673 init(Ty, Func, Args, Bundles, NameStr);
1674}
1675
1676CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1677 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1678 Instruction *InsertBefore)
1679 : CallBase(Ty->getReturnType(), Instruction::Call,
1680 OperandTraits<CallBase>::op_end(this) -
1681 (Args.size() + CountBundleInputs(Bundles) + 1),
1682 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1683 InsertBefore) {
1684 init(Ty, Func, Args, Bundles, NameStr);
1685}
1686
1687//===----------------------------------------------------------------------===//
1688// SelectInst Class
1689//===----------------------------------------------------------------------===//
1690
1691/// This class represents the LLVM 'select' instruction.
1692///
1693class SelectInst : public Instruction {
1694 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1695 Instruction *InsertBefore)
1696 : Instruction(S1->getType(), Instruction::Select,
1697 &Op<0>(), 3, InsertBefore) {
1698 init(C, S1, S2);
1699 setName(NameStr);
1700 }
1701
1702 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1703 BasicBlock *InsertAtEnd)
1704 : Instruction(S1->getType(), Instruction::Select,
1705 &Op<0>(), 3, InsertAtEnd) {
1706 init(C, S1, S2);
1707 setName(NameStr);
1708 }
1709
1710 void init(Value *C, Value *S1, Value *S2) {
1711 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select");
1712 Op<0>() = C;
1713 Op<1>() = S1;
1714 Op<2>() = S2;
1715 }
1716
1717protected:
1718 // Note: Instruction needs to be a friend here to call cloneImpl.
1719 friend class Instruction;
1720
1721 SelectInst *cloneImpl() const;
1722
1723public:
1724 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1725 const Twine &NameStr = "",
1726 Instruction *InsertBefore = nullptr,
1727 Instruction *MDFrom = nullptr) {
1728 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1729 if (MDFrom)
1730 Sel->copyMetadata(*MDFrom);
1731 return Sel;
1732 }
1733
1734 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1735 const Twine &NameStr,
1736 BasicBlock *InsertAtEnd) {
1737 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1738 }
1739
1740 const Value *getCondition() const { return Op<0>(); }
1741 const Value *getTrueValue() const { return Op<1>(); }
1742 const Value *getFalseValue() const { return Op<2>(); }
1743 Value *getCondition() { return Op<0>(); }
1744 Value *getTrueValue() { return Op<1>(); }
1745 Value *getFalseValue() { return Op<2>(); }
1746
1747 void setCondition(Value *V) { Op<0>() = V; }
1748 void setTrueValue(Value *V) { Op<1>() = V; }
1749 void setFalseValue(Value *V) { Op<2>() = V; }
1750
1751 /// Swap the true and false values of the select instruction.
1752 /// This doesn't swap prof metadata.
1753 void swapValues() { Op<1>().swap(Op<2>()); }
1754
1755 /// Return a string if the specified operands are invalid
1756 /// for a select operation, otherwise return null.
1757 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1758
1759 /// Transparently provide more efficient getOperand methods.
1761
1763 return static_cast<OtherOps>(Instruction::getOpcode());
1764 }
1765
1766 // Methods for support type inquiry through isa, cast, and dyn_cast:
1767 static bool classof(const Instruction *I) {
1768 return I->getOpcode() == Instruction::Select;
1769 }
1770 static bool classof(const Value *V) {
1771 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1772 }
1773};
1774
1775template <>
1776struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1777};
1778
1780
1781//===----------------------------------------------------------------------===//
1782// VAArgInst Class
1783//===----------------------------------------------------------------------===//
1784
1785/// This class represents the va_arg llvm instruction, which returns
1786/// an argument of the specified type given a va_list and increments that list
1787///
1789protected:
1790 // Note: Instruction needs to be a friend here to call cloneImpl.
1791 friend class Instruction;
1792
1793 VAArgInst *cloneImpl() const;
1794
1795public:
1796 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1797 Instruction *InsertBefore = nullptr)
1798 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1799 setName(NameStr);
1800 }
1801
1802 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1803 BasicBlock *InsertAtEnd)
1804 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1805 setName(NameStr);
1806 }
1807
1808 Value *getPointerOperand() { return getOperand(0); }
1809 const Value *getPointerOperand() const { return getOperand(0); }
1810 static unsigned getPointerOperandIndex() { return 0U; }
1811
1812 // Methods for support type inquiry through isa, cast, and dyn_cast:
1813 static bool classof(const Instruction *I) {
1814 return I->getOpcode() == VAArg;
1815 }
1816 static bool classof(const Value *V) {
1817 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1818 }
1819};
1820
1821//===----------------------------------------------------------------------===//
1822// ExtractElementInst Class
1823//===----------------------------------------------------------------------===//
1824
1825/// This instruction extracts a single (scalar)
1826/// element from a VectorType value
1827///
1829 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1830 Instruction *InsertBefore = nullptr);
1831 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1832 BasicBlock *InsertAtEnd);
1833
1834protected:
1835 // Note: Instruction needs to be a friend here to call cloneImpl.
1836 friend class Instruction;
1837
1839
1840public:
1842 const Twine &NameStr = "",
1843 Instruction *InsertBefore = nullptr) {
1844 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1845 }
1846
1848 const Twine &NameStr,
1849 BasicBlock *InsertAtEnd) {
1850 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1851 }
1852
1853 /// Return true if an extractelement instruction can be
1854 /// formed with the specified operands.
1855 static bool isValidOperands(const Value *Vec, const Value *Idx);
1856
1858 Value *getIndexOperand() { return Op<1>(); }
1859 const Value *getVectorOperand() const { return Op<0>(); }
1860 const Value *getIndexOperand() const { return Op<1>(); }
1861
1863 return cast<VectorType>(getVectorOperand()->getType());
1864 }
1865
1866 /// Transparently provide more efficient getOperand methods.
1868
1869 // Methods for support type inquiry through isa, cast, and dyn_cast:
1870 static bool classof(const Instruction *I) {
1871 return I->getOpcode() == Instruction::ExtractElement;
1872 }
1873 static bool classof(const Value *V) {
1874 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1875 }
1876};
1877
1878template <>
1880 public FixedNumOperandTraits<ExtractElementInst, 2> {
1881};
1882
1884
1885//===----------------------------------------------------------------------===//
1886// InsertElementInst Class
1887//===----------------------------------------------------------------------===//
1888
1889/// This instruction inserts a single (scalar)
1890/// element into a VectorType value
1891///
1893 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1894 const Twine &NameStr = "",
1895 Instruction *InsertBefore = nullptr);
1896 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1897 BasicBlock *InsertAtEnd);
1898
1899protected:
1900 // Note: Instruction needs to be a friend here to call cloneImpl.
1901 friend class Instruction;
1902
1903 InsertElementInst *cloneImpl() const;
1904
1905public:
1906 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1907 const Twine &NameStr = "",
1908 Instruction *InsertBefore = nullptr) {
1909 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1910 }
1911
1912 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1913 const Twine &NameStr,
1914 BasicBlock *InsertAtEnd) {
1915 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1916 }
1917
1918 /// Return true if an insertelement instruction can be
1919 /// formed with the specified operands.
1920 static bool isValidOperands(const Value *Vec, const Value *NewElt,
1921 const Value *Idx);
1922
1923 /// Overload to return most specific vector type.
1924 ///
1926 return cast<VectorType>(Instruction::getType());
1927 }
1928
1929 /// Transparently provide more efficient getOperand methods.
1931
1932 // Methods for support type inquiry through isa, cast, and dyn_cast:
1933 static bool classof(const Instruction *I) {
1934 return I->getOpcode() == Instruction::InsertElement;
1935 }
1936 static bool classof(const Value *V) {
1937 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1938 }
1939};
1940
1941template <>
1943 public FixedNumOperandTraits<InsertElementInst, 3> {
1944};
1945
1947
1948//===----------------------------------------------------------------------===//
1949// ShuffleVectorInst Class
1950//===----------------------------------------------------------------------===//
1951
1952constexpr int PoisonMaskElem = -1;
1953
1954/// This instruction constructs a fixed permutation of two
1955/// input vectors.
1956///
1957/// For each element of the result vector, the shuffle mask selects an element
1958/// from one of the input vectors to copy to the result. Non-negative elements
1959/// in the mask represent an index into the concatenated pair of input vectors.
1960/// PoisonMaskElem (-1) specifies that the result element is poison.
1961///
1962/// For scalable vectors, all the elements of the mask must be 0 or -1. This
1963/// requirement may be relaxed in the future.
1965 SmallVector<int, 4> ShuffleMask;
1966 Constant *ShuffleMaskForBitcode;
1967
1968protected:
1969 // Note: Instruction needs to be a friend here to call cloneImpl.
1970 friend class Instruction;
1971
1972 ShuffleVectorInst *cloneImpl() const;
1973
1974public:
1975 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "",
1976 Instruction *InsertBefore = nullptr);
1977 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr,
1978 BasicBlock *InsertAtEnd);
1979 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "",
1980 Instruction *InsertBefore = nullptr);
1981 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr,
1982 BasicBlock *InsertAtEnd);
1983 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
1984 const Twine &NameStr = "",
1985 Instruction *InsertBefor = nullptr);
1986 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
1987 const Twine &NameStr, BasicBlock *InsertAtEnd);
1989 const Twine &NameStr = "",
1990 Instruction *InsertBefor = nullptr);
1992 const Twine &NameStr, BasicBlock *InsertAtEnd);
1993
1994 void *operator new(size_t S) { return User::operator new(S, 2); }
1995 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
1996
1997 /// Swap the operands and adjust the mask to preserve the semantics
1998 /// of the instruction.
1999 void commute();
2000
2001 /// Return true if a shufflevector instruction can be
2002 /// formed with the specified operands.
2003 static bool isValidOperands(const Value *V1, const Value *V2,
2004 const Value *Mask);
2005 static bool isValidOperands(const Value *V1, const Value *V2,
2006 ArrayRef<int> Mask);
2007
2008 /// Overload to return most specific vector type.
2009 ///
2011 return cast<VectorType>(Instruction::getType());
2012 }
2013
2014 /// Transparently provide more efficient getOperand methods.
2016
2017 /// Return the shuffle mask value of this instruction for the given element
2018 /// index. Return PoisonMaskElem if the element is undef.
2019 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2020
2021 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2022 /// elements of the mask are returned as PoisonMaskElem.
2023 static void getShuffleMask(const Constant *Mask,
2024 SmallVectorImpl<int> &Result);
2025
2026 /// Return the mask for this instruction as a vector of integers. Undefined
2027 /// elements of the mask are returned as PoisonMaskElem.
2029 Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2030 }
2031
2032 /// Return the mask for this instruction, for use in bitcode.
2033 ///
2034 /// TODO: This is temporary until we decide a new bitcode encoding for
2035 /// shufflevector.
2036 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2037
2038 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2039 Type *ResultTy);
2040
2041 void setShuffleMask(ArrayRef<int> Mask);
2042
2043 ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2044
2045 /// Return true if this shuffle returns a vector with a different number of
2046 /// elements than its source vectors.
2047 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2048 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2049 bool changesLength() const {
2050 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2051 ->getElementCount()
2052 .getKnownMinValue();
2053 unsigned NumMaskElts = ShuffleMask.size();
2054 return NumSourceElts != NumMaskElts;
2055 }
2056
2057 /// Return true if this shuffle returns a vector with a greater number of
2058 /// elements than its source vectors.
2059 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2060 bool increasesLength() const {
2061 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2062 ->getElementCount()
2063 .getKnownMinValue();
2064 unsigned NumMaskElts = ShuffleMask.size();
2065 return NumSourceElts < NumMaskElts;
2066 }
2067
2068 /// Return true if this shuffle mask chooses elements from exactly one source
2069 /// vector.
2070 /// Example: <7,5,undef,7>
2071 /// This assumes that vector operands are the same length as the mask.
2072 static bool isSingleSourceMask(ArrayRef<int> Mask);
2073 static bool isSingleSourceMask(const Constant *Mask) {
2074 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2075 SmallVector<int, 16> MaskAsInts;
2076 getShuffleMask(Mask, MaskAsInts);
2077 return isSingleSourceMask(MaskAsInts);
2078 }
2079
2080 /// Return true if this shuffle chooses elements from exactly one source
2081 /// vector without changing the length of that vector.
2082 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2083 /// TODO: Optionally allow length-changing shuffles.
2084 bool isSingleSource() const {
2085 return !changesLength() && isSingleSourceMask(ShuffleMask);
2086 }
2087
2088 /// Return true if this shuffle mask chooses elements from exactly one source
2089 /// vector without lane crossings. A shuffle using this mask is not
2090 /// necessarily a no-op because it may change the number of elements from its
2091 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2092 /// Example: <undef,undef,2,3>
2093 static bool isIdentityMask(ArrayRef<int> Mask);
2094 static bool isIdentityMask(const Constant *Mask) {
2095 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2096
2097 // Not possible to express a shuffle mask for a scalable vector for this
2098 // case.
2099 if (isa<ScalableVectorType>(Mask->getType()))
2100 return false;
2101
2102 SmallVector<int, 16> MaskAsInts;
2103 getShuffleMask(Mask, MaskAsInts);
2104 return isIdentityMask(MaskAsInts);
2105 }
2106
2107 /// Return true if this shuffle chooses elements from exactly one source
2108 /// vector without lane crossings and does not change the number of elements
2109 /// from its input vectors.
2110 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2111 bool isIdentity() const {
2112 // Not possible to express a shuffle mask for a scalable vector for this
2113 // case.
2114 if (isa<ScalableVectorType>(getType()))
2115 return false;
2116
2117 return !changesLength() && isIdentityMask(ShuffleMask);
2118 }
2119
2120 /// Return true if this shuffle lengthens exactly one source vector with
2121 /// undefs in the high elements.
2122 bool isIdentityWithPadding() const;
2123
2124 /// Return true if this shuffle extracts the first N elements of exactly one
2125 /// source vector.
2126 bool isIdentityWithExtract() const;
2127
2128 /// Return true if this shuffle concatenates its 2 source vectors. This
2129 /// returns false if either input is undefined. In that case, the shuffle is
2130 /// is better classified as an identity with padding operation.
2131 bool isConcat() const;
2132
2133 /// Return true if this shuffle mask chooses elements from its source vectors
2134 /// without lane crossings. A shuffle using this mask would be
2135 /// equivalent to a vector select with a constant condition operand.
2136 /// Example: <4,1,6,undef>
2137 /// This returns false if the mask does not choose from both input vectors.
2138 /// In that case, the shuffle is better classified as an identity shuffle.
2139 /// This assumes that vector operands are the same length as the mask
2140 /// (a length-changing shuffle can never be equivalent to a vector select).
2141 static bool isSelectMask(ArrayRef<int> Mask);
2142 static bool isSelectMask(const Constant *Mask) {
2143 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2144 SmallVector<int, 16> MaskAsInts;
2145 getShuffleMask(Mask, MaskAsInts);
2146 return isSelectMask(MaskAsInts);
2147 }
2148
2149 /// Return true if this shuffle chooses elements from its source vectors
2150 /// without lane crossings and all operands have the same number of elements.
2151 /// In other words, this shuffle is equivalent to a vector select with a
2152 /// constant condition operand.
2153 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2154 /// This returns false if the mask does not choose from both input vectors.
2155 /// In that case, the shuffle is better classified as an identity shuffle.
2156 /// TODO: Optionally allow length-changing shuffles.
2157 bool isSelect() const {
2158 return !changesLength() && isSelectMask(ShuffleMask);
2159 }
2160
2161 /// Return true if this shuffle mask swaps the order of elements from exactly
2162 /// one source vector.
2163 /// Example: <7,6,undef,4>
2164 /// This assumes that vector operands are the same length as the mask.
2165 static bool isReverseMask(ArrayRef<int> Mask);
2166 static bool isReverseMask(const Constant *Mask) {
2167 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2168 SmallVector<int, 16> MaskAsInts;
2169 getShuffleMask(Mask, MaskAsInts);
2170 return isReverseMask(MaskAsInts);
2171 }
2172
2173 /// Return true if this shuffle swaps the order of elements from exactly
2174 /// one source vector.
2175 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2176 /// TODO: Optionally allow length-changing shuffles.
2177 bool isReverse() const {
2178 return !changesLength() && isReverseMask(ShuffleMask);
2179 }
2180
2181 /// Return true if this shuffle mask chooses all elements with the same value
2182 /// as the first element of exactly one source vector.
2183 /// Example: <4,undef,undef,4>
2184 /// This assumes that vector operands are the same length as the mask.
2185 static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2186 static bool isZeroEltSplatMask(const Constant *Mask) {
2187 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2188 SmallVector<int, 16> MaskAsInts;
2189 getShuffleMask(Mask, MaskAsInts);
2190 return isZeroEltSplatMask(MaskAsInts);
2191 }
2192
2193 /// Return true if all elements of this shuffle are the same value as the
2194 /// first element of exactly one source vector without changing the length
2195 /// of that vector.
2196 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2197 /// TODO: Optionally allow length-changing shuffles.
2198 /// TODO: Optionally allow splats from other elements.
2199 bool isZeroEltSplat() const {
2200 return !changesLength() && isZeroEltSplatMask(ShuffleMask);
2201 }
2202
2203 /// Return true if this shuffle mask is a transpose mask.
2204 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2205 /// even- or odd-numbered vector elements from two n-dimensional source
2206 /// vectors and write each result into consecutive elements of an
2207 /// n-dimensional destination vector. Two shuffles are necessary to complete
2208 /// the transpose, one for the even elements and another for the odd elements.
2209 /// This description closely follows how the TRN1 and TRN2 AArch64
2210 /// instructions operate.
2211 ///
2212 /// For example, a simple 2x2 matrix can be transposed with:
2213 ///
2214 /// ; Original matrix
2215 /// m0 = < a, b >
2216 /// m1 = < c, d >
2217 ///
2218 /// ; Transposed matrix
2219 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2220 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2221 ///
2222 /// For matrices having greater than n columns, the resulting nx2 transposed
2223 /// matrix is stored in two result vectors such that one vector contains
2224 /// interleaved elements from all the even-numbered rows and the other vector
2225 /// contains interleaved elements from all the odd-numbered rows. For example,
2226 /// a 2x4 matrix can be transposed with:
2227 ///
2228 /// ; Original matrix
2229 /// m0 = < a, b, c, d >
2230 /// m1 = < e, f, g, h >
2231 ///
2232 /// ; Transposed matrix
2233 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2234 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2235 static bool isTransposeMask(ArrayRef<int> Mask);
2236 static bool isTransposeMask(const Constant *Mask) {
2237 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2238 SmallVector<int, 16> MaskAsInts;
2239 getShuffleMask(Mask, MaskAsInts);
2240 return isTransposeMask(MaskAsInts);
2241 }
2242
2243 /// Return true if this shuffle transposes the elements of its inputs without
2244 /// changing the length of the vectors. This operation may also be known as a
2245 /// merge or interleave. See the description for isTransposeMask() for the
2246 /// exact specification.
2247 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2248 bool isTranspose() const {
2249 return !changesLength() && isTransposeMask(ShuffleMask);
2250 }
2251
2252 /// Return true if this shuffle mask is a splice mask, concatenating the two
2253 /// inputs together and then extracts an original width vector starting from
2254 /// the splice index.
2255 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2256 static bool isSpliceMask(ArrayRef<int> Mask, int &Index);
2257 static bool isSpliceMask(const Constant *Mask, int &Index) {
2258 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2259 SmallVector<int, 16> MaskAsInts;
2260 getShuffleMask(Mask, MaskAsInts);
2261 return isSpliceMask(MaskAsInts, Index);
2262 }
2263
2264 /// Return true if this shuffle splices two inputs without changing the length
2265 /// of the vectors. This operation concatenates the two inputs together and
2266 /// then extracts an original width vector starting from the splice index.
2267 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2268 bool isSplice(int &Index) const {
2269 return !changesLength() && isSpliceMask(ShuffleMask, Index);
2270 }
2271
2272 /// Return true if this shuffle mask is an extract subvector mask.
2273 /// A valid extract subvector mask returns a smaller vector from a single
2274 /// source operand. The base extraction index is returned as well.
2275 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2276 int &Index);
2277 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2278 int &Index) {
2279 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2280 // Not possible to express a shuffle mask for a scalable vector for this
2281 // case.
2282 if (isa<ScalableVectorType>(Mask->getType()))
2283 return false;
2284 SmallVector<int, 16> MaskAsInts;
2285 getShuffleMask(Mask, MaskAsInts);
2286 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2287 }
2288
2289 /// Return true if this shuffle mask is an extract subvector mask.
2291 // Not possible to express a shuffle mask for a scalable vector for this
2292 // case.
2293 if (isa<ScalableVectorType>(getType()))
2294 return false;
2295
2296 int NumSrcElts =
2297 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2298 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2299 }
2300
2301 /// Return true if this shuffle mask is an insert subvector mask.
2302 /// A valid insert subvector mask inserts the lowest elements of a second
2303 /// source operand into an in-place first source operand.
2304 /// Both the sub vector width and the insertion index is returned.
2305 static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2306 int &NumSubElts, int &Index);
2307 static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts,
2308 int &NumSubElts, int &Index) {
2309 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2310 // Not possible to express a shuffle mask for a scalable vector for this
2311 // case.
2312 if (isa<ScalableVectorType>(Mask->getType()))
2313 return false;
2314 SmallVector<int, 16> MaskAsInts;
2315 getShuffleMask(Mask, MaskAsInts);
2316 return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index);
2317 }
2318
2319 /// Return true if this shuffle mask is an insert subvector mask.
2320 bool isInsertSubvectorMask(int &NumSubElts, int &Index) const {
2321 // Not possible to express a shuffle mask for a scalable vector for this
2322 // case.
2323 if (isa<ScalableVectorType>(getType()))
2324 return false;
2325
2326 int NumSrcElts =
2327 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2328 return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index);
2329 }
2330
2331 /// Return true if this shuffle mask replicates each of the \p VF elements
2332 /// in a vector \p ReplicationFactor times.
2333 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
2334 /// <0,0,0,1,1,1,2,2,2,3,3,3>
2335 static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor,
2336 int &VF);
2337 static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor,
2338 int &VF) {
2339 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2340 // Not possible to express a shuffle mask for a scalable vector for this
2341 // case.
2342 if (isa<ScalableVectorType>(Mask->getType()))
2343 return false;
2344 SmallVector<int, 16> MaskAsInts;
2345 getShuffleMask(Mask, MaskAsInts);
2346 return isReplicationMask(MaskAsInts, ReplicationFactor, VF);
2347 }
2348
2349 /// Return true if this shuffle mask is a replication mask.
2350 bool isReplicationMask(int &ReplicationFactor, int &VF) const;
2351
2352 /// Return true if this shuffle mask represents "clustered" mask of size VF,
2353 /// i.e. each index between [0..VF) is used exactly once in each submask of
2354 /// size VF.
2355 /// For example, the mask for \p VF=4 is:
2356 /// 0, 1, 2, 3, 3, 2, 0, 1 - "clustered", because each submask of size 4
2357 /// (0,1,2,3 and 3,2,0,1) uses indices [0..VF) exactly one time.
2358 /// 0, 1, 2, 3, 3, 3, 1, 0 - not "clustered", because
2359 /// element 3 is used twice in the second submask
2360 /// (3,3,1,0) and index 2 is not used at all.
2361 static bool isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF);
2362
2363 /// Return true if this shuffle mask is a one-use-single-source("clustered")
2364 /// mask.
2365 bool isOneUseSingleSourceMask(int VF) const;
2366
2367 /// Change values in a shuffle permute mask assuming the two vector operands
2368 /// of length InVecNumElts have swapped position.
2370 unsigned InVecNumElts) {
2371 for (int &Idx : Mask) {
2372 if (Idx == -1)
2373 continue;
2374 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2375 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
2376 "shufflevector mask index out of range");
2377 }
2378 }
2379
2380 /// Return if this shuffle interleaves its two input vectors together.
2381 bool isInterleave(unsigned Factor);
2382
2383 /// Return true if the mask interleaves one or more input vectors together.
2384 ///
2385 /// I.e. <0, LaneLen, ... , LaneLen*(Factor - 1), 1, LaneLen + 1, ...>
2386 /// E.g. For a Factor of 2 (LaneLen=4):
2387 /// <0, 4, 1, 5, 2, 6, 3, 7>
2388 /// E.g. For a Factor of 3 (LaneLen=4):
2389 /// <4, 0, 9, 5, 1, 10, 6, 2, 11, 7, 3, 12>
2390 /// E.g. For a Factor of 4 (LaneLen=2):
2391 /// <0, 2, 6, 4, 1, 3, 7, 5>
2392 ///
2393 /// NumInputElts is the total number of elements in the input vectors.
2394 ///
2395 /// StartIndexes are the first indexes of each vector being interleaved,
2396 /// substituting any indexes that were undef
2397 /// E.g. <4, -1, 2, 5, 1, 3> (Factor=3): StartIndexes=<4, 0, 2>
2398 ///
2399 /// Note that this does not check if the input vectors are consecutive:
2400 /// It will return true for masks such as
2401 /// <0, 4, 6, 1, 5, 7> (Factor=3, LaneLen=2)
2402 static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor,
2403 unsigned NumInputElts,
2404 SmallVectorImpl<unsigned> &StartIndexes);
2405 static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor,
2406 unsigned NumInputElts) {
2407 SmallVector<unsigned, 8> StartIndexes;
2408 return isInterleaveMask(Mask, Factor, NumInputElts, StartIndexes);
2409 }
2410
2411 /// Checks if the shuffle is a bit rotation of the first operand across
2412 /// multiple subelements, e.g:
2413 ///
2414 /// shuffle <8 x i8> %a, <8 x i8> poison, <8 x i32> <1, 0, 3, 2, 5, 4, 7, 6>
2415 ///
2416 /// could be expressed as
2417 ///
2418 /// rotl <4 x i16> %a, 8
2419 ///
2420 /// If it can be expressed as a rotation, returns the number of subelements to
2421 /// group by in NumSubElts and the number of bits to rotate left in RotateAmt.
2422 static bool isBitRotateMask(ArrayRef<int> Mask, unsigned EltSizeInBits,
2423 unsigned MinSubElts, unsigned MaxSubElts,
2424 unsigned &NumSubElts, unsigned &RotateAmt);
2425
2426 // Methods for support type inquiry through isa, cast, and dyn_cast:
2427 static bool classof(const Instruction *I) {
2428 return I->getOpcode() == Instruction::ShuffleVector;
2429 }
2430 static bool classof(const Value *V) {
2431 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2432 }
2433};
2434
2435template <>
2437 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2438
2440
2441//===----------------------------------------------------------------------===//
2442// ExtractValueInst Class
2443//===----------------------------------------------------------------------===//
2444
2445/// This instruction extracts a struct member or array
2446/// element value from an aggregate value.
2447///
2450
2452
2453 /// Constructors - Create a extractvalue instruction with a base aggregate
2454 /// value and a list of indices. The first ctor can optionally insert before
2455 /// an existing instruction, the second appends the new instruction to the
2456 /// specified BasicBlock.
2457 inline ExtractValueInst(Value *Agg,
2458 ArrayRef<unsigned> Idxs,
2459 const Twine &NameStr,
2460 Instruction *InsertBefore);
2461 inline ExtractValueInst(Value *Agg,
2462 ArrayRef<unsigned> Idxs,
2463 const Twine &NameStr, BasicBlock *InsertAtEnd);
2464
2465 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2466
2467protected:
2468 // Note: Instruction needs to be a friend here to call cloneImpl.
2469 friend class Instruction;
2470
2471 ExtractValueInst *cloneImpl() const;
2472
2473public:
2475 ArrayRef<unsigned> Idxs,
2476 const Twine &NameStr = "",
2477 Instruction *InsertBefore = nullptr) {
2478 return new
2479 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2480 }
2481
2483 ArrayRef<unsigned> Idxs,
2484 const Twine &NameStr,
2485 BasicBlock *InsertAtEnd) {
2486 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2487 }
2488
2489 /// Returns the type of the element that would be extracted
2490 /// with an extractvalue instruction with the specified parameters.
2491 ///
2492 /// Null is returned if the indices are invalid for the specified type.
2493 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2494
2495 using idx_iterator = const unsigned*;
2496
2497 inline idx_iterator idx_begin() const { return Indices.begin(); }
2498 inline idx_iterator idx_end() const { return Indices.end(); }
2500 return make_range(idx_begin(), idx_end());
2501 }
2502
2504 return getOperand(0);
2505 }
2507 return getOperand(0);
2508 }
2509 static unsigned getAggregateOperandIndex() {
2510 return 0U; // get index for modifying correct operand
2511 }
2512
2514 return Indices;
2515 }
2516
2517 unsigned getNumIndices() const {
2518 return (unsigned)Indices.size();
2519 }
2520
2521 bool hasIndices() const {
2522 return true;
2523 }
2524
2525 // Methods for support type inquiry through isa, cast, and dyn_cast:
2526 static bool classof(const Instruction *I) {
2527 return I->getOpcode() == Instruction::ExtractValue;
2528 }
2529 static bool classof(const Value *V) {
2530 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2531 }
2532};
2533
2534ExtractValueInst::ExtractValueInst(Value *Agg,
2535 ArrayRef<unsigned> Idxs,
2536 const Twine &NameStr,
2537 Instruction *InsertBefore)
2538 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2539 ExtractValue, Agg, InsertBefore) {
2540 init(Idxs, NameStr);
2541}
2542
2543ExtractValueInst::ExtractValueInst(Value *Agg,
2544 ArrayRef<unsigned> Idxs,
2545 const Twine &NameStr,
2546 BasicBlock *InsertAtEnd)
2547 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2548 ExtractValue, Agg, InsertAtEnd) {
2549 init(Idxs, NameStr);
2550}
2551
2552//===----------------------------------------------------------------------===//
2553// InsertValueInst Class
2554//===----------------------------------------------------------------------===//
2555
2556/// This instruction inserts a struct field of array element
2557/// value into an aggregate value.
2558///
2561
2562 InsertValueInst(const InsertValueInst &IVI);
2563
2564 /// Constructors - Create a insertvalue instruction with a base aggregate
2565 /// value, a value to insert, and a list of indices. The first ctor can
2566 /// optionally insert before an existing instruction, the second appends
2567 /// the new instruction to the specified BasicBlock.
2568 inline InsertValueInst(Value *Agg, Value *Val,
2569 ArrayRef<unsigned> Idxs,
2570 const Twine &NameStr,
2571 Instruction *InsertBefore);
2572 inline InsertValueInst(Value *Agg, Value *Val,
2573 ArrayRef<unsigned> Idxs,
2574 const Twine &NameStr, BasicBlock *InsertAtEnd);
2575
2576 /// Constructors - These two constructors are convenience methods because one
2577 /// and two index insertvalue instructions are so common.
2578 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2579 const Twine &NameStr = "",
2580 Instruction *InsertBefore = nullptr);
2581 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2582 BasicBlock *InsertAtEnd);
2583
2584 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2585 const Twine &NameStr);
2586
2587protected:
2588 // Note: Instruction needs to be a friend here to call cloneImpl.
2589 friend class Instruction;
2590
2591 InsertValueInst *cloneImpl() const;
2592
2593public:
2594 // allocate space for exactly two operands
2595 void *operator new(size_t S) { return User::operator new(S, 2); }
2596 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2597
2598 static InsertValueInst *Create(Value *Agg, Value *Val,
2599 ArrayRef<unsigned> Idxs,
2600 const Twine &NameStr = "",
2601 Instruction *InsertBefore = nullptr) {
2602 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2603 }
2604
2605 static InsertValueInst *Create(Value *Agg, Value *Val,
2606 ArrayRef<unsigned> Idxs,
2607 const Twine &NameStr,
2608 BasicBlock *InsertAtEnd) {
2609 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2610 }
2611
2612 /// Transparently provide more efficient getOperand methods.
2614
2615 using idx_iterator = const unsigned*;
2616
2617 inline idx_iterator idx_begin() const { return Indices.begin(); }
2618 inline idx_iterator idx_end() const { return Indices.end(); }
2620 return make_range(idx_begin(), idx_end());
2621 }
2622
2624 return getOperand(0);
2625 }
2627 return getOperand(0);
2628 }
2629 static unsigned getAggregateOperandIndex() {
2630 return 0U; // get index for modifying correct operand
2631 }
2632
2634 return getOperand(1);
2635 }
2637 return getOperand(1);
2638 }
2640 return 1U; // get index for modifying correct operand
2641 }
2642
2644 return Indices;
2645 }
2646
2647 unsigned getNumIndices() const {
2648 return (unsigned)Indices.size();
2649 }
2650
2651 bool hasIndices() const {
2652 return true;
2653 }
2654
2655 // Methods for support type inquiry through isa, cast, and dyn_cast:
2656 static bool classof(const Instruction *I) {
2657 return I->getOpcode() == Instruction::InsertValue;
2658 }
2659 static bool classof(const Value *V) {
2660 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2661 }
2662};
2663
2664template <>
2666 public FixedNumOperandTraits<InsertValueInst, 2> {
2667};
2668
2669InsertValueInst::InsertValueInst(Value *Agg,
2670 Value *Val,
2671 ArrayRef<unsigned> Idxs,
2672 const Twine &NameStr,
2673 Instruction *InsertBefore)
2674 : Instruction(Agg->getType(), InsertValue,
2675 OperandTraits<InsertValueInst>::op_begin(this),
2676 2, InsertBefore) {
2677 init(Agg, Val, Idxs, NameStr);
2678}
2679
2680InsertValueInst::InsertValueInst(Value *Agg,
2681 Value *Val,
2682 ArrayRef<unsigned> Idxs,
2683 const Twine &NameStr,
2684 BasicBlock *InsertAtEnd)
2685 : Instruction(Agg->getType(), InsertValue,
2686 OperandTraits<InsertValueInst>::op_begin(this),
2687 2, InsertAtEnd) {
2688 init(Agg, Val, Idxs, NameStr);
2689}
2690
2691DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)
2692
2693//===----------------------------------------------------------------------===//
2694// PHINode Class
2695//===----------------------------------------------------------------------===//
2696
2697// PHINode - The PHINode class is used to represent the magical mystical PHI
2698// node, that can not exist in nature, but can be synthesized in a computer
2699// scientist's overactive imagination.
2700//
2701class PHINode : public Instruction {
2702 /// The number of operands actually allocated. NumOperands is
2703 /// the number actually in use.
2704 unsigned ReservedSpace;
2705
2706 PHINode(const PHINode &PN);
2707
2708 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2709 const Twine &NameStr = "",
2710 Instruction *InsertBefore = nullptr)
2711 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2712 ReservedSpace(NumReservedValues) {
2713 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
2714 setName(NameStr);
2715 allocHungoffUses(ReservedSpace);
2716 }
2717
2718 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2719 BasicBlock *InsertAtEnd)
2720 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2721 ReservedSpace(NumReservedValues) {
2722 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
2723 setName(NameStr);
2724 allocHungoffUses(ReservedSpace);
2725 }
2726
2727protected:
2728 // Note: Instruction needs to be a friend here to call cloneImpl.
2729 friend class Instruction;
2730
2731 PHINode *cloneImpl() const;
2732
2733 // allocHungoffUses - this is more complicated than the generic
2734 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2735 // values and pointers to the incoming blocks, all in one allocation.
2736 void allocHungoffUses(unsigned N) {
2737 User::allocHungoffUses(N, /* IsPhi */ true);
2738 }
2739
2740public:
2741 /// Constructors - NumReservedValues is a hint for the number of incoming
2742 /// edges that this phi node will have (use 0 if you really have no idea).
2743 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2744 const Twine &NameStr = "",
2745 Instruction *InsertBefore = nullptr) {
2746 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2747 }
2748
2749 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2750 const Twine &NameStr, BasicBlock *InsertAtEnd) {
2751 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2752 }
2753
2754 /// Provide fast operand accessors
2756
2757 // Block iterator interface. This provides access to the list of incoming
2758 // basic blocks, which parallels the list of incoming values.
2759 // Please note that we are not providing non-const iterators for blocks to
2760 // force all updates go through an interface function.
2761
2764
2766 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2767 }
2768
2770 return block_begin() + getNumOperands();
2771 }
2772
2774 return make_range(block_begin(), block_end());
2775 }
2776
2777 op_range incoming_values() { return operands(); }
2778
2779 const_op_range incoming_values() const { return operands(); }
2780
2781 /// Return the number of incoming edges
2782 ///
2783 unsigned getNumIncomingValues() const { return getNumOperands(); }
2784
2785 /// Return incoming value number x
2786 ///
2787 Value *getIncomingValue(unsigned i) const {
2788 return getOperand(i);
2789 }
2790 void setIncomingValue(unsigned i, Value *V) {
2791 assert(V && "PHI node got a null value!");
2792 assert(getType() == V->getType() &&
2793 "All operands to PHI node must be the same type as the PHI node!");
2794 setOperand(i, V);
2795 }
2796
2797 static unsigned getOperandNumForIncomingValue(unsigned i) {
2798 return i;
2799 }
2800
2801 static unsigned getIncomingValueNumForOperand(unsigned i) {
2802 return i;
2803 }
2804
2805 /// Return incoming basic block number @p i.
2806 ///
2807 BasicBlock *getIncomingBlock(unsigned i) const {
2808 return block_begin()[i];
2809 }
2810
2811 /// Return incoming basic block corresponding
2812 /// to an operand of the PHI.
2813 ///
2815 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
2816 return getIncomingBlock(unsigned(&U - op_begin()));
2817 }
2818
2819 /// Return incoming basic block corresponding
2820 /// to value use iterator.
2821 ///
2823 return getIncomingBlock(I.getUse());
2824 }
2825
2826 void setIncomingBlock(unsigned i, BasicBlock *BB) {
2827 const_cast<block_iterator>(block_begin())[i] = BB;
2828 }
2829
2830 /// Copies the basic blocks from \p BBRange to the incoming basic block list
2831 /// of this PHINode, starting at \p ToIdx.
2833 uint32_t ToIdx = 0) {
2834 copy(BBRange, const_cast<block_iterator>(block_begin()) + ToIdx);
2835 }
2836
2837 /// Replace every incoming basic block \p Old to basic block \p New.
2839 assert(New && Old && "PHI node got a null basic block!");
2840 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2841 if (getIncomingBlock(Op) == Old)
2842 setIncomingBlock(Op, New);
2843 }
2844
2845 /// Add an incoming value to the end of the PHI list
2846 ///
2848 if (getNumOperands() == ReservedSpace)
2849 growOperands(); // Get more space!
2850 // Initialize some new operands.
2851 setNumHungOffUseOperands(getNumOperands() + 1);
2852 setIncomingValue(getNumOperands() - 1, V);
2853 setIncomingBlock(getNumOperands() - 1, BB);
2854 }
2855
2856 /// Remove an incoming value. This is useful if a
2857 /// predecessor basic block is deleted. The value removed is returned.
2858 ///
2859 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2860 /// is true), the PHI node is destroyed and any uses of it are replaced with
2861 /// dummy values. The only time there should be zero incoming values to a PHI
2862 /// node is when the block is dead, so this strategy is sound.
2863 ///
2864 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2865
2866 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2867 int Idx = getBasicBlockIndex(BB);
2868 assert(Idx >= 0 && "Invalid basic block argument to remove!");
2869 return removeIncomingValue(Idx, DeletePHIIfEmpty);
2870 }
2871
2872 /// Remove all incoming values for which the predicate returns true.
2873 /// The predicate accepts the incoming value index.
2874 void removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
2875 bool DeletePHIIfEmpty = true);
2876
2877 /// Return the first index of the specified basic
2878 /// block in the value list for this PHI. Returns -1 if no instance.
2879 ///
2880 int getBasicBlockIndex(const BasicBlock *BB) const {
2881 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2882 if (block_begin()[i] == BB)
2883 return i;
2884 return -1;
2885 }
2886
2888 int Idx = getBasicBlockIndex(BB);
2889 assert(Idx >= 0 && "Invalid basic block argument!");
2890 return getIncomingValue(Idx);
2891 }
2892
2893 /// Set every incoming value(s) for block \p BB to \p V.
2895 assert(BB && "PHI node got a null basic block!");
2896 bool Found = false;
2897 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2898 if (getIncomingBlock(Op) == BB) {
2899 Found = true;
2900 setIncomingValue(Op, V);
2901 }
2902 (void)Found;
2903 assert(Found && "Invalid basic block argument to set!");
2904 }
2905
2906 /// If the specified PHI node always merges together the
2907 /// same value, return the value, otherwise return null.
2908 Value *hasConstantValue() const;
2909
2910 /// Whether the specified PHI node always merges
2911 /// together the same value, assuming undefs are equal to a unique
2912 /// non-undef value.
2913 bool hasConstantOrUndefValue() const;
2914
2915 /// If the PHI node is complete which means all of its parent's predecessors
2916 /// have incoming value in this PHI, return true, otherwise return false.
2917 bool isComplete() const {
2919 [this](const BasicBlock *Pred) {
2920 return getBasicBlockIndex(Pred) >= 0;
2921 });
2922 }
2923
2924 /// Methods for support type inquiry through isa, cast, and dyn_cast:
2925 static bool classof(const Instruction *I) {
2926 return I->getOpcode() == Instruction::PHI;
2927 }
2928 static bool classof(const Value *V) {
2929 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2930 }
2931
2932private:
2933 void growOperands();
2934};
2935
2936template <>
2938};
2939
2941
2942//===----------------------------------------------------------------------===//
2943// LandingPadInst Class
2944//===----------------------------------------------------------------------===//
2945
2946//===---------------------------------------------------------------------------
2947/// The landingpad instruction holds all of the information
2948/// necessary to generate correct exception handling. The landingpad instruction
2949/// cannot be moved from the top of a landing pad block, which itself is
2950/// accessible only from the 'unwind' edge of an invoke. This uses the
2951/// SubclassData field in Value to store whether or not the landingpad is a
2952/// cleanup.
2953///
2955 using CleanupField = BoolBitfieldElementT<0>;
2956
2957 /// The number of operands actually allocated. NumOperands is
2958 /// the number actually in use.
2959 unsigned ReservedSpace;
2960
2961 LandingPadInst(const LandingPadInst &LP);
2962
2963public:
2965
2966private:
2967 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2968 const Twine &NameStr, Instruction *InsertBefore);
2969 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2970 const Twine &NameStr, BasicBlock *InsertAtEnd);
2971
2972 // Allocate space for exactly zero operands.
2973 void *operator new(size_t S) { return User::operator new(S); }
2974
2975 void growOperands(unsigned Size);
2976 void init(unsigned NumReservedValues, const Twine &NameStr);
2977
2978protected:
2979 // Note: Instruction needs to be a friend here to call cloneImpl.
2980 friend class Instruction;
2981
2982 LandingPadInst *cloneImpl() const;
2983
2984public:
2985 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2986
2987 /// Constructors - NumReservedClauses is a hint for the number of incoming
2988 /// clauses that this landingpad will have (use 0 if you really have no idea).
2989 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2990 const Twine &NameStr = "",
2991 Instruction *InsertBefore = nullptr);
2992 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2993 const Twine &NameStr, BasicBlock *InsertAtEnd);
2994
2995 /// Provide fast operand accessors
2997
2998 /// Return 'true' if this landingpad instruction is a
2999 /// cleanup. I.e., it should be run when unwinding even if its landing pad
3000 /// doesn't catch the exception.
3001 bool isCleanup() const { return getSubclassData<CleanupField>(); }
3002
3003 /// Indicate that this landingpad instruction is a cleanup.
3004 void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
3005
3006 /// Add a catch or filter clause to the landing pad.
3007 void addClause(Constant *ClauseVal);
3008
3009 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
3010 /// determine what type of clause this is.
3011 Constant *getClause(unsigned Idx) const {
3012 return cast<Constant>(getOperandList()[Idx]);
3013 }
3014
3015 /// Return 'true' if the clause and index Idx is a catch clause.
3016 bool isCatch(unsigned Idx) const {
3017 return !isa<ArrayType>(getOperandList()[Idx]->getType());
3018 }
3019
3020 /// Return 'true' if the clause and index Idx is a filter clause.
3021 bool isFilter(unsigned Idx) const {
3022 return isa<ArrayType>(getOperandList()[Idx]->getType());
3023 }
3024
3025 /// Get the number of clauses for this landing pad.
3026 unsigned getNumClauses() const { return getNumOperands(); }
3027
3028 /// Grow the size of the operand list to accommodate the new
3029 /// number of clauses.
3030 void reserveClauses(unsigned Size) { growOperands(Size); }
3031
3032 // Methods for support type inquiry through isa, cast, and dyn_cast:
3033 static bool classof(const Instruction *I) {
3034 return I->getOpcode() == Instruction::LandingPad;
3035 }
3036 static bool classof(const Value *V) {
3037 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3038 }
3039};
3040
3041template <>
3043};
3044
3046
3047//===----------------------------------------------------------------------===//
3048// ReturnInst Class
3049//===----------------------------------------------------------------------===//
3050
3051//===---------------------------------------------------------------------------
3052/// Return a value (possibly void), from a function. Execution
3053/// does not continue in this function any longer.
3054///
3055class ReturnInst : public Instruction {
3056 ReturnInst(const ReturnInst &RI);
3057
3058private:
3059 // ReturnInst constructors:
3060 // ReturnInst() - 'ret void' instruction
3061 // ReturnInst( null) - 'ret void' instruction
3062 // ReturnInst(Value* X) - 'ret X' instruction
3063 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
3064 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
3065 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
3066 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
3067 //
3068 // NOTE: If the Value* passed is of type void then the constructor behaves as
3069 // if it was passed NULL.
3070 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
3071 Instruction *InsertBefore = nullptr);
3072 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
3073 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
3074
3075protected:
3076 // Note: Instruction needs to be a friend here to call cloneImpl.
3077 friend class Instruction;
3078
3079 ReturnInst *cloneImpl() const;
3080
3081public:
3082 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
3083 Instruction *InsertBefore = nullptr) {
3084 return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
3085 }
3086
3088 BasicBlock *InsertAtEnd) {
3089 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
3090 }
3091
3092 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
3093 return new(0) ReturnInst(C, InsertAtEnd);
3094 }
3095
3096 /// Provide fast operand accessors
3098
3099 /// Convenience accessor. Returns null if there is no return value.
3101 return getNumOperands() != 0 ? getOperand(0) : nullptr;
3102 }
3103
3104 unsigned getNumSuccessors() const { return 0; }
3105
3106 // Methods for support type inquiry through isa, cast, and dyn_cast:
3107 static bool classof(const Instruction *I) {
3108 return (I->getOpcode() == Instruction::Ret);
3109 }
3110 static bool classof(const Value *V) {
3111 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3112 }
3113
3114private:
3115 BasicBlock *getSuccessor(unsigned idx) const {
3116 llvm_unreachable("ReturnInst has no successors!");
3117 }
3118
3119 void setSuccessor(unsigned idx, BasicBlock *B) {
3120 llvm_unreachable("ReturnInst has no successors!");
3121 }
3122};
3123
3124template <>
3125struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3126};
3127
3129
3130//===----------------------------------------------------------------------===//
3131// BranchInst Class
3132//===----------------------------------------------------------------------===//
3133
3134//===---------------------------------------------------------------------------
3135/// Conditional or Unconditional Branch instruction.
3136///
3137class BranchInst : public Instruction {
3138 /// Ops list - Branches are strange. The operands are ordered:
3139 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3140 /// they don't have to check for cond/uncond branchness. These are mostly
3141 /// accessed relative from op_end().
3142 BranchInst(const BranchInst &BI);
3143 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3144 // BranchInst(BB *B) - 'br B'
3145 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3146 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3147 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3148 // BranchInst(BB* B, BB *I) - 'br B' insert at end
3149 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3150 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3151 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3152 Instruction *InsertBefore = nullptr);
3153 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3154 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3155 BasicBlock *InsertAtEnd);
3156
3157 void AssertOK();
3158
3159protected:
3160 // Note: Instruction needs to be a friend here to call cloneImpl.
3161 friend class Instruction;
3162
3163 BranchInst *cloneImpl() const;
3164
3165public:
3166 /// Iterator type that casts an operand to a basic block.
3167 ///
3168 /// This only makes sense because the successors are stored as adjacent
3169 /// operands for branch instructions.
3171 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3172 std::random_access_iterator_tag, BasicBlock *,
3173 ptrdiff_t, BasicBlock *, BasicBlock *> {
3175
3176 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3177 BasicBlock *operator->() const { return operator*(); }
3178 };
3179
3180 /// The const version of `succ_op_iterator`.
3182 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3183 std::random_access_iterator_tag,
3184 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3185 const BasicBlock *> {
3188
3189 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3190 const BasicBlock *operator->() const { return operator*(); }
3191 };
3192
3194 Instruction *InsertBefore = nullptr) {
3195 return new(1) BranchInst(IfTrue, InsertBefore);
3196 }
3197
3198 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3199 Value *Cond, Instruction *InsertBefore = nullptr) {
3200 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3201 }
3202
3203 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3204 return new(1) BranchInst(IfTrue, InsertAtEnd);
3205 }
3206
3207 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3208 Value *Cond, BasicBlock *InsertAtEnd) {
3209 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3210 }
3211
3212 /// Transparently provide more efficient getOperand methods.
3214
3215 bool isUnconditional() const { return getNumOperands() == 1; }
3216 bool isConditional() const { return getNumOperands() == 3; }
3217
3219 assert(isConditional() && "Cannot get condition of an uncond branch!");
3220 return Op<-3>();
3221 }
3222
3224 assert(isConditional() && "Cannot set condition of unconditional branch!");
3225 Op<-3>() = V;
3226 }
3227
3228 unsigned getNumSuccessors() const { return 1+isConditional(); }
3229
3230 BasicBlock *getSuccessor(unsigned i) const {
3231 assert(i < getNumSuccessors() && "Successor # out of range for Branch!");
3232 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3233 }
3234
3235 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3236 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!");
3237 *(&Op<-1>() - idx) = NewSucc;
3238 }
3239
3240 /// Swap the successors of this branch instruction.
3241 ///
3242 /// Swaps the successors of the branch instruction. This also swaps any
3243 /// branch weight metadata associated with the instruction so that it
3244 /// continues to map correctly to each operand.
3245 void swapSuccessors();
3246
3248 return make_range(
3249 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3250 succ_op_iterator(value_op_end()));
3251 }
3252
3255 std::next(value_op_begin(), isConditional() ? 1 : 0)),
3256 const_succ_op_iterator(value_op_end()));
3257 }
3258
3259 // Methods for support type inquiry through isa, cast, and dyn_cast:
3260 static bool classof(const Instruction *I) {
3261 return (I->getOpcode() == Instruction::Br);
3262 }
3263 static bool classof(const Value *V) {
3264 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3265 }
3266};
3267
3268template <>
3269struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3270};
3271
3273
3274//===----------------------------------------------------------------------===//
3275// SwitchInst Class
3276//===----------------------------------------------------------------------===//
3277
3278//===---------------------------------------------------------------------------
3279/// Multiway switch
3280///
3281class SwitchInst : public Instruction {
3282 unsigned ReservedSpace;
3283
3284 // Operand[0] = Value to switch on
3285 // Operand[1] = Default basic block destination
3286 // Operand[2n ] = Value to match
3287 // Operand[2n+1] = BasicBlock to go to on match
3288 SwitchInst(const SwitchInst &SI);
3289
3290 /// Create a new switch instruction, specifying a value to switch on and a
3291 /// default destination. The number of additional cases can be specified here
3292 /// to make memory allocation more efficient. This constructor can also
3293 /// auto-insert before another instruction.
3294 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3295 Instruction *InsertBefore);
3296
3297 /// Create a new switch instruction, specifying a value to switch on and a
3298 /// default destination. The number of additional cases can be specified here
3299 /// to make memory allocation more efficient. This constructor also
3300 /// auto-inserts at the end of the specified BasicBlock.
3301 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3302 BasicBlock *InsertAtEnd);
3303
3304 // allocate space for exactly zero operands
3305 void *operator new(size_t S) { return User::operator new(S); }
3306
3307 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3308 void growOperands();
3309
3310protected:
3311 // Note: Instruction needs to be a friend here to call cloneImpl.
3312 friend class Instruction;
3313
3314 SwitchInst *cloneImpl() const;
3315
3316public:
3317 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3318
3319 // -2
3320 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3321
3322 template <typename CaseHandleT> class CaseIteratorImpl;
3323
3324 /// A handle to a particular switch case. It exposes a convenient interface
3325 /// to both the case value and the successor block.
3326 ///
3327 /// We define this as a template and instantiate it to form both a const and
3328 /// non-const handle.
3329 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3331 // Directly befriend both const and non-const iterators.
3332 friend class SwitchInst::CaseIteratorImpl<
3333 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3334
3335 protected:
3336 // Expose the switch type we're parameterized with to the iterator.
3337 using SwitchInstType = SwitchInstT;
3338
3339 SwitchInstT *SI;
3341
3342 CaseHandleImpl() = default;
3344
3345 public:
3346 /// Resolves case value for current case.
3347 ConstantIntT *getCaseValue() const {
3348 assert((unsigned)Index < SI->getNumCases() &&
3349 "Index out the number of cases.");
3350 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3351 }
3352
3353 /// Resolves successor for current case.
3354 BasicBlockT *getCaseSuccessor() const {
3355 assert(((unsigned)Index < SI->getNumCases() ||
3356 (unsigned)Index == DefaultPseudoIndex) &&
3357 "Index out the number of cases.");
3358 return SI->getSuccessor(getSuccessorIndex());
3359 }
3360
3361 /// Returns number of current case.
3362 unsigned getCaseIndex() const { return Index; }
3363
3364 /// Returns successor index for current case successor.
3365 unsigned getSuccessorIndex() const {
3366 assert(((unsigned)Index == DefaultPseudoIndex ||
3367 (unsigned)Index < SI->getNumCases()) &&
3368 "Index out the number of cases.");
3369 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3370 }
3371
3372 bool operator==(const CaseHandleImpl &RHS) const {
3373 assert(SI == RHS.SI && "Incompatible operators.");
3374 return Index == RHS.Index;
3375 }
3376 };
3377
3380
3382 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3384
3385 public:
3387
3388 /// Sets the new value for current case.
3389 void setValue(ConstantInt *V) const {
3390 assert((unsigned)Index < SI->getNumCases() &&
3391 "Index out the number of cases.");
3392 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3393 }
3394
3395 /// Sets the new successor for current case.
3396 void setSuccessor(BasicBlock *S) const {
3397 SI->setSuccessor(getSuccessorIndex(), S);
3398 }
3399 };
3400
3401 template <typename CaseHandleT>
3403 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3404 std::random_access_iterator_tag,
3405 const CaseHandleT> {
3406 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3407
3408 CaseHandleT Case;
3409
3410 public:
3411 /// Default constructed iterator is in an invalid state until assigned to
3412 /// a case for a particular switch.
3413 CaseIteratorImpl() = default;
3414
3415 /// Initializes case iterator for given SwitchInst and for given
3416 /// case number.
3417 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3418
3419 /// Initializes case iterator for given SwitchInst and for given
3420 /// successor index.
3422 unsigned SuccessorIndex) {
3423 assert(SuccessorIndex < SI->getNumSuccessors() &&
3424 "Successor index # out of range!");
3425 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3426 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3427 }
3428
3429 /// Support converting to the const variant. This will be a no-op for const
3430 /// variant.
3432 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3433 }
3434
3436 // Check index correctness after addition.
3437 // Note: Index == getNumCases() means end().
3438 assert(Case.Index + N >= 0 &&
3439 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
3440 "Case.Index out the number of cases.");
3441 Case.Index += N;
3442 return *this;
3443 }
3445 // Check index correctness after subtraction.
3446 // Note: Case.Index == getNumCases() means end().
3447 assert(Case.Index - N >= 0 &&
3448 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
3449 "Case.Index out the number of cases.");
3450 Case.Index -= N;
3451 return *this;
3452 }
3454 assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3455 return Case.Index - RHS.Case.Index;
3456 }
3457 bool operator==(const CaseIteratorImpl &RHS) const {
3458 return Case == RHS.Case;
3459 }
3460 bool operator<(const CaseIteratorImpl &RHS) const {
3461 assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3462 return Case.Index < RHS.Case.Index;
3463 }
3464 const CaseHandleT &operator*() const { return Case; }
3465 };
3466
3469
3471 unsigned NumCases,
3472 Instruction *InsertBefore = nullptr) {
3473 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3474 }
3475
3477 unsigned NumCases, BasicBlock *InsertAtEnd) {
3478 return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3479 }
3480
3481 /// Provide fast operand accessors
3483
3484 // Accessor Methods for Switch stmt
3485 Value *getCondition() const { return getOperand(0); }
3486 void setCondition(Value *V) { setOperand(0, V); }
3487
3489 return cast<BasicBlock>(getOperand(1));
3490 }
3491
3492 void setDefaultDest(BasicBlock *DefaultCase) {
3493 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3494 }
3495
3496 /// Return the number of 'cases' in this switch instruction, excluding the
3497 /// default case.
3498 unsigned getNumCases() const {
3499 return getNumOperands()/2 - 1;
3500 }
3501
3502 /// Returns a read/write iterator that points to the first case in the
3503 /// SwitchInst.
3505 return CaseIt(this, 0);
3506 }
3507
3508 /// Returns a read-only iterator that points to the first case in the
3509 /// SwitchInst.
3511 return ConstCaseIt(this, 0);
3512 }
3513
3514 /// Returns a read/write iterator that points one past the last in the
3515 /// SwitchInst.
3517 return CaseIt(this, getNumCases());
3518 }
3519
3520 /// Returns a read-only iterator that points one past the last in the
3521 /// SwitchInst.
3523 return ConstCaseIt(this, getNumCases());
3524 }
3525
3526 /// Iteration adapter for range-for loops.
3528 return make_range(case_begin(), case_end());
3529 }
3530
3531 /// Constant iteration adapter for range-for loops.
3533 return make_range(case_begin(), case_end());
3534 }
3535
3536 /// Returns an iterator that points to the default case.
3537 /// Note: this iterator allows to resolve successor only. Attempt
3538 /// to resolve case value causes an assertion.
3539 /// Also note, that increment and decrement also causes an assertion and
3540 /// makes iterator invalid.
3542 return CaseIt(this, DefaultPseudoIndex);
3543 }
3545 return ConstCaseIt(this, DefaultPseudoIndex);
3546 }
3547
3548 /// Search all of the case values for the specified constant. If it is
3549 /// explicitly handled, return the case iterator of it, otherwise return
3550 /// default case iterator to indicate that it is handled by the default
3551 /// handler.
3553 return CaseIt(
3554 this,
3555 const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex());
3556 }
3558 ConstCaseIt I = llvm::find_if(cases(), [C](const ConstCaseHandle &Case) {
3559 return Case.getCaseValue() == C;
3560 });
3561 if (I != case_end())
3562 return I;
3563
3564 return case_default();
3565 }
3566
3567 /// Finds the unique case value for a given successor. Returns null if the
3568 /// successor is not found, not unique, or is the default case.
3570 if (BB == getDefaultDest())
3571 return nullptr;
3572
3573 ConstantInt *CI = nullptr;
3574 for (auto Case : cases()) {
3575 if (Case.getCaseSuccessor() != BB)
3576 continue;
3577
3578 if (CI)
3579 return nullptr; // Multiple cases lead to BB.
3580
3581 CI = Case.getCaseValue();
3582 }
3583
3584 return CI;
3585 }
3586
3587 /// Add an entry to the switch instruction.
3588 /// Note:
3589 /// This action invalidates case_end(). Old case_end() iterator will
3590 /// point to the added case.
3591 void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3592
3593 /// This method removes the specified case and its successor from the switch
3594 /// instruction. Note that this operation may reorder the remaining cases at
3595 /// index idx and above.
3596 /// Note:
3597 /// This action invalidates iterators for all cases following the one removed,
3598 /// including the case_end() iterator. It returns an iterator for the next
3599 /// case.
3600 CaseIt removeCase(CaseIt I);
3601
3602 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3603 BasicBlock *getSuccessor(unsigned idx) const {
3604 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!");
3605 return cast<BasicBlock>(getOperand(idx*2+1));
3606 }
3607 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3608 assert(idx < getNumSuccessors() && "Successor # out of range for switch!");
3609 setOperand(idx * 2 + 1, NewSucc);
3610 }
3611
3612 // Methods for support type inquiry through isa, cast, and dyn_cast:
3613 static bool classof(const Instruction *I) {
3614 return I->getOpcode() == Instruction::Switch;
3615 }
3616 static bool classof(const Value *V) {
3617 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3618 }
3619};
3620
3621/// A wrapper class to simplify modification of SwitchInst cases along with
3622/// their prof branch_weights metadata.
3624 SwitchInst &SI;
3625 std::optional<SmallVector<uint32_t, 8>> Weights;
3626 bool Changed = false;
3627
3628protected:
3630
3631 void init();
3632
3633public:
3634 using CaseWeightOpt = std::optional<uint32_t>;
3635 SwitchInst *operator->() { return &SI; }
3636 SwitchInst &operator*() { return SI; }
3637 operator SwitchInst *() { return &SI; }
3638
3640
3642 if (Changed)
3643 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3644 }
3645
3646 /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3647 /// correspondent branch weight.
3649
3650 /// Delegate the call to the underlying SwitchInst::addCase() and set the
3651 /// specified branch weight for the added case.
3652 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3653
3654 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3655 /// this object to not touch the underlying SwitchInst in destructor.
3657
3658 void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3659 CaseWeightOpt getSuccessorWeight(unsigned idx);
3660
3661 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3662};
3663
3664template <>
3666};
3667
3669
3670//===----------------------------------------------------------------------===//
3671// IndirectBrInst Class
3672//===----------------------------------------------------------------------===//
3673
3674//===---------------------------------------------------------------------------
3675/// Indirect Branch Instruction.
3676///
3678 unsigned ReservedSpace;
3679
3680 // Operand[0] = Address to jump to
3681 // Operand[n+1] = n-th destination
3682 IndirectBrInst(const IndirectBrInst &IBI);
3683
3684 /// Create a new indirectbr instruction, specifying an
3685 /// Address to jump to. The number of expected destinations can be specified
3686 /// here to make memory allocation more efficient. This constructor can also
3687 /// autoinsert before another instruction.
3688 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3689
3690 /// Create a new indirectbr instruction, specifying an
3691 /// Address to jump to. The number of expected destinations can be specified
3692 /// here to make memory allocation more efficient. This constructor also
3693 /// autoinserts at the end of the specified BasicBlock.
3694 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3695
3696 // allocate space for exactly zero operands
3697 void *operator new(size_t S) { return User::operator new(S); }
3698
3699 void init(Value *Address, unsigned NumDests);
3700 void growOperands();
3701
3702protected:
3703 // Note: Instruction needs to be a friend here to call cloneImpl.
3704 friend class Instruction;
3705
3706 IndirectBrInst *cloneImpl() const;
3707
3708public:
3709 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3710
3711 /// Iterator type that casts an operand to a basic block.
3712 ///
3713 /// This only makes sense because the successors are stored as adjacent
3714 /// operands for indirectbr instructions.
3716 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3717 std::random_access_iterator_tag, BasicBlock *,
3718 ptrdiff_t, BasicBlock *, BasicBlock *> {
3720
3721 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3722 BasicBlock *operator->() const { return operator*(); }
3723 };
3724
3725 /// The const version of `succ_op_iterator`.
3727 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3728 std::random_access_iterator_tag,
3729 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3730 const BasicBlock *> {
3733
3734 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3735 const BasicBlock *operator->() const { return operator*(); }
3736 };
3737
3738 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3739 Instruction *InsertBefore = nullptr) {
3740 return new IndirectBrInst(Address, NumDests, InsertBefore);
3741 }
3742
3743 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3744 BasicBlock *InsertAtEnd) {
3745 return new IndirectBrInst(Address, NumDests, InsertAtEnd);
3746 }
3747
3748 /// Provide fast operand accessors.
3750
3751 // Accessor Methods for IndirectBrInst instruction.
3752 Value *getAddress() { return getOperand(0); }
3753 const Value *getAddress() const { return getOperand(0); }
3754 void setAddress(Value *V) { setOperand(0, V); }
3755
3756 /// return the number of possible destinations in this
3757 /// indirectbr instruction.
3758 unsigned getNumDestinations() const { return getNumOperands()-1; }
3759
3760 /// Return the specified destination.
3761 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3762 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3763
3764 /// Add a destination.
3765 ///
3766 void addDestination(BasicBlock *Dest);
3767
3768 /// This method removes the specified successor from the
3769 /// indirectbr instruction.
3770 void removeDestination(unsigned i);
3771
3772 unsigned getNumSuccessors() const { return getNumOperands()-1; }
3773 BasicBlock *getSuccessor(unsigned i) const {
3774 return cast<BasicBlock>(getOperand(i+1));
3775 }
3776 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3777 setOperand(i + 1, NewSucc);
3778 }
3779
3781 return make_range(succ_op_iterator(std::next(value_op_begin())),
3782 succ_op_iterator(value_op_end()));
3783 }
3784
3786 return make_range(const_succ_op_iterator(std::next(value_op_begin())),
3787 const_succ_op_iterator(value_op_end()));
3788 }
3789
3790 // Methods for support type inquiry through isa, cast, and dyn_cast:
3791 static bool classof(const Instruction *I) {
3792 return I->getOpcode() == Instruction::IndirectBr;
3793 }
3794 static bool classof(const Value *V) {
3795 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3796 }
3797};
3798
3799template <>
3801};
3802
3804
3805//===----------------------------------------------------------------------===//
3806// InvokeInst Class
3807//===----------------------------------------------------------------------===//
3808
3809/// Invoke instruction. The SubclassData field is used to hold the
3810/// calling convention of the call.
3811///
3812class InvokeInst : public CallBase {
3813 /// The number of operands for this call beyond the called function,
3814 /// arguments, and operand bundles.
3815 static constexpr int NumExtraOperands = 2;
3816
3817 /// The index from the end of the operand array to the normal destination.
3818 static constexpr int NormalDestOpEndIdx = -3;
3819
3820 /// The index from the end of the operand array to the unwind destination.
3821 static constexpr int UnwindDestOpEndIdx = -2;
3822
3823 InvokeInst(const InvokeInst &BI);
3824
3825 /// Construct an InvokeInst given a range of arguments.
3826 ///
3827 /// Construct an InvokeInst from a range of arguments
3828 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3829 BasicBlock *IfException, ArrayRef<Value *> Args,
3830 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3831 const Twine &NameStr, Instruction *InsertBefore);
3832
3833 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3834 BasicBlock *IfException, ArrayRef<Value *> Args,
3835 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3836 const Twine &NameStr, BasicBlock *InsertAtEnd);
3837
3838 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3839 BasicBlock *IfException, ArrayRef<Value *> Args,
3840 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3841
3842 /// Compute the number of operands to allocate.
3843 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
3844 // We need one operand for the called function, plus our extra operands and
3845 // the input operand counts provided.
3846 return 1 + NumExtraOperands + NumArgs + NumBundleInputs;
3847 }
3848
3849protected:
3850 // Note: Instruction needs to be a friend here to call cloneImpl.
3851 friend class Instruction;
3852
3853 InvokeInst *cloneImpl() const;
3854
3855public:
3856 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3857 BasicBlock *IfException, ArrayRef<Value *> Args,
3858 const Twine &NameStr,
3859 Instruction *InsertBefore = nullptr) {
3860 int NumOperands = ComputeNumOperands(Args.size());
3861 return new (NumOperands)
3862 InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt,
3863 NumOperands, NameStr, InsertBefore);
3864 }
3865
3866 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3867 BasicBlock *IfException, ArrayRef<Value *> Args,
3868 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
3869 const Twine &NameStr = "",
3870 Instruction *InsertBefore = nullptr) {
3871 int NumOperands =
3872 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3873 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3874
3875 return new (NumOperands, DescriptorBytes)
3876 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3877 NameStr, InsertBefore);
3878 }
3879
3880 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3881 BasicBlock *IfException, ArrayRef<Value *> Args,
3882 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3883 int NumOperands = ComputeNumOperands(Args.size());
3884 return new (NumOperands)
3885 InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt,
3886 NumOperands, NameStr, InsertAtEnd);
3887 }
3888
3889 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3890 BasicBlock *IfException, ArrayRef<Value *> Args,
3892 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3893 int NumOperands =
3894 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3895 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3896
3897 return new (NumOperands, DescriptorBytes)
3898 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3899 NameStr, InsertAtEnd);
3900 }
3901
3903 BasicBlock *IfException, ArrayRef<Value *> Args,
3904 const Twine &NameStr,
3905 Instruction *InsertBefore = nullptr) {
3906 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3907 IfException, Args, std::nullopt, NameStr, InsertBefore);
3908 }
3909
3911 BasicBlock *IfException, ArrayRef<Value *> Args,
3912 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
3913 const Twine &NameStr = "",
3914 Instruction *InsertBefore = nullptr) {
3915 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3916 IfException, Args, Bundles, NameStr, InsertBefore);
3917 }
3918
3920 BasicBlock *IfException, ArrayRef<Value *> Args,
3921 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3922 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3923 IfException, Args, NameStr, InsertAtEnd);
3924 }
3925
3927 BasicBlock *IfException, ArrayRef<Value *> Args,
3929 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3930 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3931 IfException, Args, Bundles, NameStr, InsertAtEnd);
3932 }
3933
3934 /// Create a clone of \p II with a different set of operand bundles and
3935 /// insert it before \p InsertPt.
3936 ///
3937 /// The returned invoke instruction is identical to \p II in every way except
3938 /// that the operand bundles for the new instruction are set to the operand
3939 /// bundles in \p Bundles.
3940 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3941 Instruction *InsertPt = nullptr);
3942
3943 // get*Dest - Return the destination basic blocks...
3945 return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
3946 }
3948 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
3949 }
3951 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3952 }
3954 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3955 }
3956
3957 /// Get the landingpad instruction from the landing pad
3958 /// block (the unwind destination).
3959 LandingPadInst *getLandingPadInst() const;
3960
3961 BasicBlock *getSuccessor(unsigned i) const {
3962 assert(i < 2 && "Successor # out of range for invoke!");
3963 return i == 0 ? getNormalDest() : getUnwindDest();
3964 }
3965
3966 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3967 assert(i < 2 && "Successor # out of range for invoke!");
3968 if (i == 0)
3969 setNormalDest(NewSucc);
3970 else
3971 setUnwindDest(NewSucc);
3972 }
3973
3974 unsigned getNumSuccessors() const { return 2; }
3975
3976 // Methods for support type inquiry through isa, cast, and dyn_cast:
3977 static bool classof(const Instruction *I) {
3978 return (I->getOpcode() == Instruction::Invoke);
3979 }
3980 static bool classof(const Value *V) {
3981 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3982 }
3983
3984private:
3985 // Shadow Instruction::setInstructionSubclassData with a private forwarding
3986 // method so that subclasses cannot accidentally use it.
3987 template <typename Bitfield>
3988 void setSubclassData(typename Bitfield::Type Value) {
3989 Instruction::setSubclassData<Bitfield>(Value);
3990 }
3991};
3992
3993InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3994 BasicBlock *IfException, ArrayRef<Value *> Args,
3995 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3996 const Twine &NameStr, Instruction *InsertBefore)
3997 : CallBase(Ty->getReturnType(), Instruction::Invoke,
3998 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3999 InsertBefore) {
4000 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
4001}
4002
4003InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4004 BasicBlock *IfException, ArrayRef<Value *> Args,
4005 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4006 const Twine &NameStr, BasicBlock *InsertAtEnd)
4007 : CallBase(Ty->getReturnType(), Instruction::Invoke,
4008 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4009 InsertAtEnd) {
4010 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
4011}
4012
4013//===----------------------------------------------------------------------===//
4014// CallBrInst Class
4015//===----------------------------------------------------------------------===//
4016
4017/// CallBr instruction, tracking function calls that may not return control but
4018/// instead transfer it to a third location. The SubclassData field is used to
4019/// hold the calling convention of the call.
4020///
4021class CallBrInst : public CallBase {
4022
4023 unsigned NumIndirectDests;
4024
4025 CallBrInst(const CallBrInst &BI);
4026
4027 /// Construct a CallBrInst given a range of arguments.
4028 ///
4029 /// Construct a CallBrInst from a range of arguments
4030 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4031 ArrayRef<BasicBlock *> IndirectDests,
4032 ArrayRef<Value *> Args,
4033 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4034 const Twine &NameStr, Instruction *InsertBefore);
4035
4036 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4037 ArrayRef<BasicBlock *> IndirectDests,
4038 ArrayRef<Value *> Args,
4039 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4040 const Twine &NameStr, BasicBlock *InsertAtEnd);
4041
4042 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
4043 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
4044 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
4045
4046 /// Compute the number of operands to allocate.
4047 static int ComputeNumOperands(int NumArgs, int NumIndirectDests,
4048 int NumBundleInputs = 0) {
4049 // We need one operand for the called function, plus our extra operands and
4050 // the input operand counts provided.
4051 return 2 + NumIndirectDests + NumArgs + NumBundleInputs;
4052 }
4053
4054protected:
4055 // Note: Instruction needs to be a friend here to call cloneImpl.
4056 friend class Instruction;
4057
4058 CallBrInst *cloneImpl() const;
4059
4060public:
4062 BasicBlock *DefaultDest,
4063 ArrayRef<BasicBlock *> IndirectDests,
4064 ArrayRef<Value *> Args, const Twine &NameStr,
4065 Instruction *InsertBefore = nullptr) {
4066 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4067 return new (NumOperands)
4068 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, std::nullopt,
4069 NumOperands, NameStr, InsertBefore);
4070 }
4071
4072 static CallBrInst *
4073 Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4074 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
4075 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
4076 const Twine &NameStr = "", Instruction *InsertBefore = nullptr) {
4077 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4078 CountBundleInputs(Bundles));
4079 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4080
4081 return new (NumOperands, DescriptorBytes)
4082 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4083 NumOperands, NameStr, InsertBefore);
4084 }
4085
4087 BasicBlock *DefaultDest,
4088 ArrayRef<BasicBlock *> IndirectDests,
4089 ArrayRef<Value *> Args, const Twine &NameStr,
4090 BasicBlock *InsertAtEnd) {
4091 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4092 return new (NumOperands)
4093 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, std::nullopt,
4094 NumOperands, NameStr, InsertAtEnd);
4095 }
4096
4098 BasicBlock *DefaultDest,
4099 ArrayRef<BasicBlock *> IndirectDests,
4100 ArrayRef<Value *> Args,
4102 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4103 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4104 CountBundleInputs(Bundles));
4105 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4106
4107 return new (NumOperands, DescriptorBytes)
4108 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4109 NumOperands, NameStr, InsertAtEnd);
4110 }
4111
4112 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4113 ArrayRef<BasicBlock *> IndirectDests,
4114 ArrayRef<Value *> Args, const Twine &NameStr,
4115 Instruction *InsertBefore = nullptr) {
4116 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4117 IndirectDests, Args, NameStr, InsertBefore);
4118 }
4119
4120 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4121 ArrayRef<BasicBlock *> IndirectDests,
4122 ArrayRef<Value *> Args,
4123 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
4124 const Twine &NameStr = "",
4125 Instruction *InsertBefore = nullptr) {
4126 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4127 IndirectDests, Args, Bundles, NameStr, InsertBefore);
4128 }
4129
4130 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4131 ArrayRef<BasicBlock *> IndirectDests,
4132 ArrayRef<Value *> Args, const Twine &NameStr,
4133 BasicBlock *InsertAtEnd) {
4134 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4135 IndirectDests, Args, NameStr, InsertAtEnd);
4136 }
4137
4139 BasicBlock *DefaultDest,
4140 ArrayRef<BasicBlock *> IndirectDests,
4141 ArrayRef<Value *> Args,
4143 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4144 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4145 IndirectDests, Args, Bundles, NameStr, InsertAtEnd);
4146 }
4147
4148 /// Create a clone of \p CBI with a different set of operand bundles and
4149 /// insert it before \p InsertPt.
4150 ///
4151 /// The returned callbr instruction is identical to \p CBI in every way
4152 /// except that the operand bundles for the new instruction are set to the
4153 /// operand bundles in \p Bundles.
4154 static CallBrInst *Create(CallBrInst *CBI,
4156 Instruction *InsertPt = nullptr);
4157
4158 /// Return the number of callbr indirect dest labels.
4159 ///
4160 unsigned getNumIndirectDests() const { return NumIndirectDests; }
4161
4162 /// getIndirectDestLabel - Return the i-th indirect dest label.
4163 ///
4164 Value *getIndirectDestLabel(unsigned i) const {
4165 assert(i < getNumIndirectDests() && "Out of bounds!");
4166 return getOperand(i + arg_size() + getNumTotalBundleOperands() + 1);
4167 }
4168
4169 Value *getIndirectDestLabelUse(unsigned i) const {
4170 assert(i < getNumIndirectDests() && "Out of bounds!");
4171 return getOperandUse(i + arg_size() + getNumTotalBundleOperands() + 1);
4172 }
4173
4174 // Return the destination basic blocks...
4176 return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1));
4177 }
4178 BasicBlock *getIndirectDest(unsigned i) const {
4179 return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
4180 }
4182 SmallVector<BasicBlock *, 16> IndirectDests;
4183 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
4184 IndirectDests.push_back(getIndirectDest(i));
4185 return IndirectDests;
4186 }
4188 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
4189 }
4190 void setIndirectDest(unsigned i, BasicBlock *B) {
4191 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
4192 }
4193
4194 BasicBlock *getSuccessor(unsigned i) const {
4195 assert(i < getNumSuccessors() + 1 &&
4196 "Successor # out of range for callbr!");
4197 return i == 0 ? getDefaultDest() : getIndirectDest(i - 1);
4198 }
4199
4200 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
4201 assert(i < getNumIndirectDests() + 1 &&
4202 "Successor # out of range for callbr!");
4203 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc);
4204 }
4205
4206 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
4207
4208 // Methods for support type inquiry through isa, cast, and dyn_cast:
4209 static bool classof(const Instruction *I) {
4210 return (I->getOpcode() == Instruction::CallBr);
4211 }
4212 static bool classof(const Value *V) {
4213 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4214 }
4215
4216private:
4217 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4218 // method so that subclasses cannot accidentally use it.
4219 template <typename Bitfield>
4220 void setSubclassData(typename Bitfield::Type Value) {
4221 Instruction::setSubclassData<Bitfield>(Value);
4222 }
4223};
4224
4225CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4226 ArrayRef<BasicBlock *> IndirectDests,
4227 ArrayRef<Value *> Args,
4228 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4229 const Twine &NameStr, Instruction *InsertBefore)
4230 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4231 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4232 InsertBefore) {
4233 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4234}
4235
4236CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4237 ArrayRef<BasicBlock *> IndirectDests,
4238 ArrayRef<Value *> Args,
4239 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4240 const Twine &NameStr, BasicBlock *InsertAtEnd)
4241 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4242 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4243 InsertAtEnd) {
4244 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4245}
4246
4247//===----------------------------------------------------------------------===//
4248// ResumeInst Class
4249//===----------------------------------------------------------------------===//
4250
4251//===---------------------------------------------------------------------------
4252/// Resume the propagation of an exception.
4253///
4254class ResumeInst : public Instruction {
4255 ResumeInst(const ResumeInst &RI);
4256
4257 explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
4258 ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
4259
4260protected:
4261 // Note: Instruction needs to be a friend here to call cloneImpl.
4262 friend class Instruction;
4263
4264 ResumeInst *cloneImpl() const;
4265
4266public:
4267 static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
4268 return new(1) ResumeInst(Exn, InsertBefore);
4269 }
4270
4271 static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
4272 return new(1) ResumeInst(Exn, InsertAtEnd);
4273 }
4274
4275 /// Provide fast operand accessors
4277
4278 /// Convenience accessor.
4279 Value *getValue() const { return Op<0>(); }
4280
4281 unsigned getNumSuccessors() const { return 0; }
4282
4283 // Methods for support type inquiry through isa, cast, and dyn_cast:
4284 static bool classof(const Instruction *I) {
4285 return I->getOpcode() == Instruction::Resume;
4286 }
4287 static bool classof(const Value *V) {
4288 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4289 }
4290
4291private:
4292 BasicBlock *getSuccessor(unsigned idx) const {
4293 llvm_unreachable("ResumeInst has no successors!");
4294 }
4295
4296 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4297 llvm_unreachable("ResumeInst has no successors!");
4298 }
4299};
4300
4301template <>
4303 public FixedNumOperandTraits<ResumeInst, 1> {
4304};
4305
4307
4308//===----------------------------------------------------------------------===//
4309// CatchSwitchInst Class
4310//===----------------------------------------------------------------------===//
4312 using UnwindDestField = BoolBitfieldElementT<0>;
4313
4314 /// The number of operands actually allocated. NumOperands is
4315 /// the number actually in use.
4316 unsigned ReservedSpace;
4317
4318 // Operand[0] = Outer scope
4319 // Operand[1] = Unwind block destination
4320 // Operand[n] = BasicBlock to go to on match
4321 CatchSwitchInst(const CatchSwitchInst &CSI);
4322
4323 /// Create a new switch instruction, specifying a
4324 /// default destination. The number of additional handlers can be specified
4325 /// here to make memory allocation more efficient.
4326 /// This constructor can also autoinsert before another instruction.
4327 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4328 unsigned NumHandlers, const Twine &NameStr,
4329 Instruction *InsertBefore);
4330
4331 /// Create a new switch instruction, specifying a
4332 /// default destination. The number of additional handlers can be specified
4333 /// here to make memory allocation more efficient.
4334 /// This constructor also autoinserts at the end of the specified BasicBlock.
4335 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4336 unsigned NumHandlers, const Twine &NameStr,
4337 BasicBlock *InsertAtEnd);
4338
4339 // allocate space for exactly zero operands
4340 void *operator new(size_t S) { return User::operator new(S); }
4341
4342 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4343 void growOperands(unsigned Size);
4344
4345protected:
4346 // Note: Instruction needs to be a friend here to call cloneImpl.
4347 friend class Instruction;
4348
4349 CatchSwitchInst *cloneImpl() const;
4350
4351public:
4352 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
4353
4354 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4355 unsigned NumHandlers,
4356 const Twine &NameStr = "",
4357 Instruction *InsertBefore = nullptr) {
4358 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4359 InsertBefore);
4360 }
4361
4362 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4363 unsigned NumHandlers, const Twine &NameStr,
4364 BasicBlock *InsertAtEnd) {
4365 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4366 InsertAtEnd);
4367 }
4368
4369 /// Provide fast operand accessors
4371
4372 // Accessor Methods for CatchSwitch stmt
4373 Value *getParentPad() const { return getOperand(0); }
4374 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4375
4376 // Accessor Methods for CatchSwitch stmt
4377 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4378 bool unwindsToCaller() const { return !hasUnwindDest(); }
4380 if (hasUnwindDest())
4381 return cast<BasicBlock>(getOperand(1));
4382 return nullptr;
4383 }
4384 void setUnwindDest(BasicBlock *UnwindDest) {
4385 assert(UnwindDest);
4386 assert(hasUnwindDest());
4387 setOperand(1, UnwindDest);
4388 }
4389
4390 /// return the number of 'handlers' in this catchswitch
4391 /// instruction, except the default handler
4392 unsigned getNumHandlers() const {
4393 if (hasUnwindDest())
4394 return getNumOperands() - 2;
4395 return getNumOperands() - 1;
4396 }
4397
4398private:
4399 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
4400 static const BasicBlock *handler_helper(const Value *V) {
4401 return cast<BasicBlock>(V);
4402 }
4403
4404public:
4405 using DerefFnTy = BasicBlock *(*)(Value *);
4408 using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4412
4413 /// Returns an iterator that points to the first handler in CatchSwitchInst.
4415 op_iterator It = op_begin() + 1;
4416 if (hasUnwindDest())
4417 ++It;
4418 return handler_iterator(It, DerefFnTy(handler_helper));
4419 }
4420
4421 /// Returns an iterator that points to the first handler in the
4422 /// CatchSwitchInst.
4424 const_op_iterator It = op_begin() + 1;
4425 if (hasUnwindDest())
4426 ++It;
4427 return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4428 }
4429
4430 /// Returns a read-only iterator that points one past the last
4431 /// handler in the CatchSwitchInst.
4433 return handler_iterator(op_end(), DerefFnTy(handler_helper));
4434 }
4435
4436 /// Returns an iterator that points one past the last handler in the
4437 /// CatchSwitchInst.
4439 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4440 }
4441
4442 /// iteration adapter for range-for loops.
4444 return make_range(handler_begin(), handler_end());
4445 }
4446
4447 /// iteration adapter for range-for loops.
4449 return make_range(handler_begin(), handler_end());
4450 }
4451
4452 /// Add an entry to the switch instruction...
4453 /// Note:
4454 /// This action invalidates handler_end(). Old handler_end() iterator will
4455 /// point to the added handler.
4456 void addHandler(BasicBlock *Dest);
4457
4458 void removeHandler(handler_iterator HI);
4459
4460 unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4461 BasicBlock *getSuccessor(unsigned Idx) const {
4462 assert(Idx < getNumSuccessors() &&
4463 "Successor # out of range for catchswitch!");
4464 return cast<BasicBlock>(getOperand(Idx + 1));
4465 }
4466 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
4467 assert(Idx < getNumSuccessors() &&
4468 "Successor # out of range for catchswitch!");
4469 setOperand(Idx + 1, NewSucc);
4470 }
4471
4472 // Methods for support type inquiry through isa, cast, and dyn_cast:
4473 static bool classof(const Instruction *I) {
4474 return I->getOpcode() == Instruction::CatchSwitch;
4475 }
4476 static bool