LLVM 17.0.0git
Instructions.h
Go to the documentation of this file.
1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/Twine.h"
24#include "llvm/ADT/iterator.h"
26#include "llvm/IR/CFG.h"
27#include "llvm/IR/Constant.h"
29#include "llvm/IR/InstrTypes.h"
30#include "llvm/IR/Instruction.h"
32#include "llvm/IR/Use.h"
33#include "llvm/IR/User.h"
36#include <cassert>
37#include <cstddef>
38#include <cstdint>
39#include <iterator>
40#include <optional>
41
42namespace llvm {
43
44class APFloat;
45class APInt;
46class BasicBlock;
47class ConstantInt;
48class DataLayout;
49class StringRef;
50class Type;
51class Value;
52
53//===----------------------------------------------------------------------===//
54// AllocaInst Class
55//===----------------------------------------------------------------------===//
56
57/// an instruction to allocate memory on the stack
59 Type *AllocatedType;
60
61 using AlignmentField = AlignmentBitfieldElementT<0>;
62 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
64 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
65 SwiftErrorField>(),
66 "Bitfields must be contiguous");
67
68protected:
69 // Note: Instruction needs to be a friend here to call cloneImpl.
70 friend class Instruction;
71
72 AllocaInst *cloneImpl() const;
73
74public:
75 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
76 const Twine &Name, Instruction *InsertBefore);
77 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
78 const Twine &Name, BasicBlock *InsertAtEnd);
79
80 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
81 Instruction *InsertBefore);
82 AllocaInst(Type *Ty, unsigned AddrSpace,
83 const Twine &Name, BasicBlock *InsertAtEnd);
84
85 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
86 const Twine &Name = "", Instruction *InsertBefore = nullptr);
87 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
88 const Twine &Name, BasicBlock *InsertAtEnd);
89
90 /// Return true if there is an allocation size parameter to the allocation
91 /// instruction that is not 1.
92 bool isArrayAllocation() const;
93
94 /// Get the number of elements allocated. For a simple allocation of a single
95 /// element, this will return a constant 1 value.
96 const Value *getArraySize() const { return getOperand(0); }
97 Value *getArraySize() { return getOperand(0); }
98
99 /// Overload to return most specific pointer type.
101 return cast<PointerType>(Instruction::getType());
102 }
103
104 /// Return the address space for the allocation.
105 unsigned getAddressSpace() const {
106 return getType()->getAddressSpace();
107 }
108
109 /// Get allocation size in bytes. Returns std::nullopt if size can't be
110 /// determined, e.g. in case of a VLA.
111 std::optional<TypeSize> getAllocationSize(const DataLayout &DL) const;
112
113 /// Get allocation size in bits. Returns std::nullopt if size can't be
114 /// determined, e.g. in case of a VLA.
115 std::optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
116
117 /// Return the type that is being allocated by the instruction.
118 Type *getAllocatedType() const { return AllocatedType; }
119 /// for use only in special circumstances that need to generically
120 /// transform a whole instruction (eg: IR linking and vectorization).
121 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
122
123 /// Return the alignment of the memory that is being allocated by the
124 /// instruction.
125 Align getAlign() const {
126 return Align(1ULL << getSubclassData<AlignmentField>());
127 }
128
130 setSubclassData<AlignmentField>(Log2(Align));
131 }
132
133 /// Return true if this alloca is in the entry block of the function and is a
134 /// constant size. If so, the code generator will fold it into the
135 /// prolog/epilog code, so it is basically free.
136 bool isStaticAlloca() const;
137
138 /// Return true if this alloca is used as an inalloca argument to a call. Such
139 /// allocas are never considered static even if they are in the entry block.
140 bool isUsedWithInAlloca() const {
141 return getSubclassData<UsedWithInAllocaField>();
142 }
143
144 /// Specify whether this alloca is used to represent the arguments to a call.
145 void setUsedWithInAlloca(bool V) {
146 setSubclassData<UsedWithInAllocaField>(V);
147 }
148
149 /// Return true if this alloca is used as a swifterror argument to a call.
150 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
151 /// Specify whether this alloca is used to represent a swifterror.
152 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
153
154 // Methods for support type inquiry through isa, cast, and dyn_cast:
155 static bool classof(const Instruction *I) {
156 return (I->getOpcode() == Instruction::Alloca);
157 }
158 static bool classof(const Value *V) {
159 return isa<Instruction>(V) && classof(cast<Instruction>(V));
160 }
161
162private:
163 // Shadow Instruction::setInstructionSubclassData with a private forwarding
164 // method so that subclasses cannot accidentally use it.
165 template <typename Bitfield>
166 void setSubclassData(typename Bitfield::Type Value) {
167 Instruction::setSubclassData<Bitfield>(Value);
168 }
169};
170
171//===----------------------------------------------------------------------===//
172// LoadInst Class
173//===----------------------------------------------------------------------===//
174
175/// An instruction for reading from memory. This uses the SubclassData field in
176/// Value to store whether or not the load is volatile.
178 using VolatileField = BoolBitfieldElementT<0>;
181 static_assert(
182 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
183 "Bitfields must be contiguous");
184
185 void AssertOK();
186
187protected:
188 // Note: Instruction needs to be a friend here to call cloneImpl.
189 friend class Instruction;
190
191 LoadInst *cloneImpl() const;
192
193public:
194 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
195 Instruction *InsertBefore);
196 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
197 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
198 Instruction *InsertBefore);
199 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
200 BasicBlock *InsertAtEnd);
201 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
202 Align Align, Instruction *InsertBefore = nullptr);
203 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
204 Align Align, BasicBlock *InsertAtEnd);
205 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
208 Instruction *InsertBefore = nullptr);
209 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
211 BasicBlock *InsertAtEnd);
212
213 /// Return true if this is a load from a volatile memory location.
214 bool isVolatile() const { return getSubclassData<VolatileField>(); }
215
216 /// Specify whether this is a volatile load or not.
217 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
218
219 /// Return the alignment of the access that is being performed.
220 Align getAlign() const {
221 return Align(1ULL << (getSubclassData<AlignmentField>()));
222 }
223
225 setSubclassData<AlignmentField>(Log2(Align));
226 }
227
228 /// Returns the ordering constraint of this load instruction.
230 return getSubclassData<OrderingField>();
231 }
232 /// Sets the ordering constraint of this load instruction. May not be Release
233 /// or AcquireRelease.
235 setSubclassData<OrderingField>(Ordering);
236 }
237
238 /// Returns the synchronization scope ID of this load instruction.
240 return SSID;
241 }
242
243 /// Sets the synchronization scope ID of this load instruction.
245 this->SSID = SSID;
246 }
247
248 /// Sets the ordering constraint and the synchronization scope ID of this load
249 /// instruction.
252 setOrdering(Ordering);
253 setSyncScopeID(SSID);
254 }
255
256 bool isSimple() const { return !isAtomic() && !isVolatile(); }
257
258 bool isUnordered() const {
261 !isVolatile();
262 }
263
265 const Value *getPointerOperand() const { return getOperand(0); }
266 static unsigned getPointerOperandIndex() { return 0U; }
268
269 /// Returns the address space of the pointer operand.
270 unsigned getPointerAddressSpace() const {
272 }
273
274 // Methods for support type inquiry through isa, cast, and dyn_cast:
275 static bool classof(const Instruction *I) {
276 return I->getOpcode() == Instruction::Load;
277 }
278 static bool classof(const Value *V) {
279 return isa<Instruction>(V) && classof(cast<Instruction>(V));
280 }
281
282private:
283 // Shadow Instruction::setInstructionSubclassData with a private forwarding
284 // method so that subclasses cannot accidentally use it.
285 template <typename Bitfield>
286 void setSubclassData(typename Bitfield::Type Value) {
287 Instruction::setSubclassData<Bitfield>(Value);
288 }
289
290 /// The synchronization scope ID of this load instruction. Not quite enough
291 /// room in SubClassData for everything, so synchronization scope ID gets its
292 /// own field.
293 SyncScope::ID SSID;
294};
295
296//===----------------------------------------------------------------------===//
297// StoreInst Class
298//===----------------------------------------------------------------------===//
299
300/// An instruction for storing to memory.
301class StoreInst : public Instruction {
302 using VolatileField = BoolBitfieldElementT<0>;
305 static_assert(
306 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
307 "Bitfields must be contiguous");
308
309 void AssertOK();
310
311protected:
312 // Note: Instruction needs to be a friend here to call cloneImpl.
313 friend class Instruction;
314
315 StoreInst *cloneImpl() const;
316
317public:
318 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
319 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
320 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
321 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
323 Instruction *InsertBefore = nullptr);
325 BasicBlock *InsertAtEnd);
328 Instruction *InsertBefore = nullptr);
330 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
331
332 // allocate space for exactly two operands
333 void *operator new(size_t S) { return User::operator new(S, 2); }
334 void operator delete(void *Ptr) { User::operator delete(Ptr); }
335
336 /// Return true if this is a store to a volatile memory location.
337 bool isVolatile() const { return getSubclassData<VolatileField>(); }
338
339 /// Specify whether this is a volatile store or not.
340 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
341
342 /// Transparently provide more efficient getOperand methods.
344
345 Align getAlign() const {
346 return Align(1ULL << (getSubclassData<AlignmentField>()));
347 }
348
350 setSubclassData<AlignmentField>(Log2(Align));
351 }
352
353 /// Returns the ordering constraint of this store instruction.
355 return getSubclassData<OrderingField>();
356 }
357
358 /// Sets the ordering constraint of this store instruction. May not be
359 /// Acquire or AcquireRelease.
361 setSubclassData<OrderingField>(Ordering);
362 }
363
364 /// Returns the synchronization scope ID of this store instruction.
366 return SSID;
367 }
368
369 /// Sets the synchronization scope ID of this store instruction.
371 this->SSID = SSID;
372 }
373
374 /// Sets the ordering constraint and the synchronization scope ID of this
375 /// store instruction.
378 setOrdering(Ordering);
379 setSyncScopeID(SSID);
380 }
381
382 bool isSimple() const { return !isAtomic() && !isVolatile(); }
383
384 bool isUnordered() const {
387 !isVolatile();
388 }
389
391 const Value *getValueOperand() const { return getOperand(0); }
392
394 const Value *getPointerOperand() const { return getOperand(1); }
395 static unsigned getPointerOperandIndex() { return 1U; }
397
398 /// Returns the address space of the pointer operand.
399 unsigned getPointerAddressSpace() const {
401 }
402
403 // Methods for support type inquiry through isa, cast, and dyn_cast:
404 static bool classof(const Instruction *I) {
405 return I->getOpcode() == Instruction::Store;
406 }
407 static bool classof(const Value *V) {
408 return isa<Instruction>(V) && classof(cast<Instruction>(V));
409 }
410
411private:
412 // Shadow Instruction::setInstructionSubclassData with a private forwarding
413 // method so that subclasses cannot accidentally use it.
414 template <typename Bitfield>
415 void setSubclassData(typename Bitfield::Type Value) {
416 Instruction::setSubclassData<Bitfield>(Value);
417 }
418
419 /// The synchronization scope ID of this store instruction. Not quite enough
420 /// room in SubClassData for everything, so synchronization scope ID gets its
421 /// own field.
422 SyncScope::ID SSID;
423};
424
425template <>
426struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
427};
428
430
431//===----------------------------------------------------------------------===//
432// FenceInst Class
433//===----------------------------------------------------------------------===//
434
435/// An instruction for ordering other memory operations.
436class FenceInst : public Instruction {
437 using OrderingField = AtomicOrderingBitfieldElementT<0>;
438
439 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
440
441protected:
442 // Note: Instruction needs to be a friend here to call cloneImpl.
443 friend class Instruction;
444
445 FenceInst *cloneImpl() const;
446
447public:
448 // Ordering may only be Acquire, Release, AcquireRelease, or
449 // SequentiallyConsistent.
452 Instruction *InsertBefore = nullptr);
454 BasicBlock *InsertAtEnd);
455
456 // allocate space for exactly zero operands
457 void *operator new(size_t S) { return User::operator new(S, 0); }
458 void operator delete(void *Ptr) { User::operator delete(Ptr); }
459
460 /// Returns the ordering constraint of this fence instruction.
462 return getSubclassData<OrderingField>();
463 }
464
465 /// Sets the ordering constraint of this fence instruction. May only be
466 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
468 setSubclassData<OrderingField>(Ordering);
469 }
470
471 /// Returns the synchronization scope ID of this fence instruction.
473 return SSID;
474 }
475
476 /// Sets the synchronization scope ID of this fence instruction.
478 this->SSID = SSID;
479 }
480
481 // Methods for support type inquiry through isa, cast, and dyn_cast:
482 static bool classof(const Instruction *I) {
483 return I->getOpcode() == Instruction::Fence;
484 }
485 static bool classof(const Value *V) {
486 return isa<Instruction>(V) && classof(cast<Instruction>(V));
487 }
488
489private:
490 // Shadow Instruction::setInstructionSubclassData with a private forwarding
491 // method so that subclasses cannot accidentally use it.
492 template <typename Bitfield>
493 void setSubclassData(typename Bitfield::Type Value) {
494 Instruction::setSubclassData<Bitfield>(Value);
495 }
496
497 /// The synchronization scope ID of this fence instruction. Not quite enough
498 /// room in SubClassData for everything, so synchronization scope ID gets its
499 /// own field.
500 SyncScope::ID SSID;
501};
502
503//===----------------------------------------------------------------------===//
504// AtomicCmpXchgInst Class
505//===----------------------------------------------------------------------===//
506
507/// An instruction that atomically checks whether a
508/// specified value is in a memory location, and, if it is, stores a new value
509/// there. The value returned by this instruction is a pair containing the
510/// original value as first element, and an i1 indicating success (true) or
511/// failure (false) as second element.
512///
514 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
515 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
516 SyncScope::ID SSID);
517
518 template <unsigned Offset>
519 using AtomicOrderingBitfieldElement =
522
523protected:
524 // Note: Instruction needs to be a friend here to call cloneImpl.
525 friend class Instruction;
526
528
529public:
530 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
531 AtomicOrdering SuccessOrdering,
532 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
533 Instruction *InsertBefore = nullptr);
534 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
535 AtomicOrdering SuccessOrdering,
536 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
537 BasicBlock *InsertAtEnd);
538
539 // allocate space for exactly three operands
540 void *operator new(size_t S) { return User::operator new(S, 3); }
541 void operator delete(void *Ptr) { User::operator delete(Ptr); }
542
551 static_assert(
554 "Bitfields must be contiguous");
555
556 /// Return the alignment of the memory that is being allocated by the
557 /// instruction.
558 Align getAlign() const {
559 return Align(1ULL << getSubclassData<AlignmentField>());
560 }
561
563 setSubclassData<AlignmentField>(Log2(Align));
564 }
565
566 /// Return true if this is a cmpxchg from a volatile memory
567 /// location.
568 ///
569 bool isVolatile() const { return getSubclassData<VolatileField>(); }
570
571 /// Specify whether this is a volatile cmpxchg.
572 ///
573 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
574
575 /// Return true if this cmpxchg may spuriously fail.
576 bool isWeak() const { return getSubclassData<WeakField>(); }
577
578 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
579
580 /// Transparently provide more efficient getOperand methods.
582
584 return Ordering != AtomicOrdering::NotAtomic &&
585 Ordering != AtomicOrdering::Unordered;
586 }
587
589 return Ordering != AtomicOrdering::NotAtomic &&
590 Ordering != AtomicOrdering::Unordered &&
591 Ordering != AtomicOrdering::AcquireRelease &&
592 Ordering != AtomicOrdering::Release;
593 }
594
595 /// Returns the success ordering constraint of this cmpxchg instruction.
597 return getSubclassData<SuccessOrderingField>();
598 }
599
600 /// Sets the success ordering constraint of this cmpxchg instruction.
602 assert(isValidSuccessOrdering(Ordering) &&
603 "invalid CmpXchg success ordering");
604 setSubclassData<SuccessOrderingField>(Ordering);
605 }
606
607 /// Returns the failure ordering constraint of this cmpxchg instruction.
609 return getSubclassData<FailureOrderingField>();
610 }
611
612 /// Sets the failure ordering constraint of this cmpxchg instruction.
614 assert(isValidFailureOrdering(Ordering) &&
615 "invalid CmpXchg failure ordering");
616 setSubclassData<FailureOrderingField>(Ordering);
617 }
618
619 /// Returns a single ordering which is at least as strong as both the
620 /// success and failure orderings for this cmpxchg.
629 }
630 return getSuccessOrdering();
631 }
632
633 /// Returns the synchronization scope ID of this cmpxchg instruction.
635 return SSID;
636 }
637
638 /// Sets the synchronization scope ID of this cmpxchg instruction.
640 this->SSID = SSID;
641 }
642
644 const Value *getPointerOperand() const { return getOperand(0); }
645 static unsigned getPointerOperandIndex() { return 0U; }
646
648 const Value *getCompareOperand() const { return getOperand(1); }
649
651 const Value *getNewValOperand() const { return getOperand(2); }
652
653 /// Returns the address space of the pointer operand.
654 unsigned getPointerAddressSpace() const {
656 }
657
658 /// Returns the strongest permitted ordering on failure, given the
659 /// desired ordering on success.
660 ///
661 /// If the comparison in a cmpxchg operation fails, there is no atomic store
662 /// so release semantics cannot be provided. So this function drops explicit
663 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
664 /// operation would remain SequentiallyConsistent.
665 static AtomicOrdering
667 switch (SuccessOrdering) {
668 default:
669 llvm_unreachable("invalid cmpxchg success ordering");
678 }
679 }
680
681 // Methods for support type inquiry through isa, cast, and dyn_cast:
682 static bool classof(const Instruction *I) {
683 return I->getOpcode() == Instruction::AtomicCmpXchg;
684 }
685 static bool classof(const Value *V) {
686 return isa<Instruction>(V) && classof(cast<Instruction>(V));
687 }
688
689private:
690 // Shadow Instruction::setInstructionSubclassData with a private forwarding
691 // method so that subclasses cannot accidentally use it.
692 template <typename Bitfield>
693 void setSubclassData(typename Bitfield::Type Value) {
694 Instruction::setSubclassData<Bitfield>(Value);
695 }
696
697 /// The synchronization scope ID of this cmpxchg instruction. Not quite
698 /// enough room in SubClassData for everything, so synchronization scope ID
699 /// gets its own field.
700 SyncScope::ID SSID;
701};
702
703template <>
705 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
706};
707
709
710//===----------------------------------------------------------------------===//
711// AtomicRMWInst Class
712//===----------------------------------------------------------------------===//
713
714/// an instruction that atomically reads a memory location,
715/// combines it with another value, and then stores the result back. Returns
716/// the old value.
717///
719protected:
720 // Note: Instruction needs to be a friend here to call cloneImpl.
721 friend class Instruction;
722
723 AtomicRMWInst *cloneImpl() const;
724
725public:
726 /// This enumeration lists the possible modifications atomicrmw can make. In
727 /// the descriptions, 'p' is the pointer to the instruction's memory location,
728 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
729 /// instruction. These instructions always return 'old'.
730 enum BinOp : unsigned {
731 /// *p = v
733 /// *p = old + v
735 /// *p = old - v
737 /// *p = old & v
739 /// *p = ~(old & v)
741 /// *p = old | v
743 /// *p = old ^ v
745 /// *p = old >signed v ? old : v
747 /// *p = old <signed v ? old : v
749 /// *p = old >unsigned v ? old : v
751 /// *p = old <unsigned v ? old : v
753
754 /// *p = old + v
756
757 /// *p = old - v
759
760 /// *p = maxnum(old, v)
761 /// \p maxnum matches the behavior of \p llvm.maxnum.*.
763
764 /// *p = minnum(old, v)
765 /// \p minnum matches the behavior of \p llvm.minnum.*.
767
768 /// Increment one up to a maximum value.
769 /// *p = (old u>= v) ? 0 : (old + 1)
771
772 /// Decrement one until a minimum value or zero.
773 /// *p = ((old == 0) || (old u> v)) ? v : (old - 1)
775
776 FIRST_BINOP = Xchg,
777 LAST_BINOP = UDecWrap,
778 BAD_BINOP
779 };
780
781private:
782 template <unsigned Offset>
783 using AtomicOrderingBitfieldElement =
786
787 template <unsigned Offset>
788 using BinOpBitfieldElement =
790
791public:
792 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
793 AtomicOrdering Ordering, SyncScope::ID SSID,
794 Instruction *InsertBefore = nullptr);
795 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
796 AtomicOrdering Ordering, SyncScope::ID SSID,
797 BasicBlock *InsertAtEnd);
798
799 // allocate space for exactly two operands
800 void *operator new(size_t S) { return User::operator new(S, 2); }
801 void operator delete(void *Ptr) { User::operator delete(Ptr); }
802
806 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
810 "Bitfields must be contiguous");
811
812 BinOp getOperation() const { return getSubclassData<OperationField>(); }
813
814 static StringRef getOperationName(BinOp Op);
815
816 static bool isFPOperation(BinOp Op) {
817 switch (Op) {
822 return true;
823 default:
824 return false;
825 }
826 }
827
829 setSubclassData<OperationField>(Operation);
830 }
831
832 /// Return the alignment of the memory that is being allocated by the
833 /// instruction.
834 Align getAlign() const {
835 return Align(1ULL << getSubclassData<AlignmentField>());
836 }
837
839 setSubclassData<AlignmentField>(Log2(Align));
840 }
841
842 /// Return true if this is a RMW on a volatile memory location.
843 ///
844 bool isVolatile() const { return getSubclassData<VolatileField>(); }
845
846 /// Specify whether this is a volatile RMW or not.
847 ///
848 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
849
850 /// Transparently provide more efficient getOperand methods.
852
853 /// Returns the ordering constraint of this rmw instruction.
855 return getSubclassData<AtomicOrderingField>();
856 }
857
858 /// Sets the ordering constraint of this rmw instruction.
860 assert(Ordering != AtomicOrdering::NotAtomic &&
861 "atomicrmw instructions can only be atomic.");
862 assert(Ordering != AtomicOrdering::Unordered &&
863 "atomicrmw instructions cannot be unordered.");
864 setSubclassData<AtomicOrderingField>(Ordering);
865 }
866
867 /// Returns the synchronization scope ID of this rmw instruction.
869 return SSID;
870 }
871
872 /// Sets the synchronization scope ID of this rmw instruction.
874 this->SSID = SSID;
875 }
876
877 Value *getPointerOperand() { return getOperand(0); }
878 const Value *getPointerOperand() const { return getOperand(0); }
879 static unsigned getPointerOperandIndex() { return 0U; }
880
881 Value *getValOperand() { return getOperand(1); }
882 const Value *getValOperand() const { return getOperand(1); }
883
884 /// Returns the address space of the pointer operand.
885 unsigned getPointerAddressSpace() const {
887 }
888
890 return isFPOperation(getOperation());
891 }
892
893 // Methods for support type inquiry through isa, cast, and dyn_cast:
894 static bool classof(const Instruction *I) {
895 return I->getOpcode() == Instruction::AtomicRMW;
896 }
897 static bool classof(const Value *V) {
898 return isa<Instruction>(V) && classof(cast<Instruction>(V));
899 }
900
901private:
902 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
903 AtomicOrdering Ordering, SyncScope::ID SSID);
904
905 // Shadow Instruction::setInstructionSubclassData with a private forwarding
906 // method so that subclasses cannot accidentally use it.
907 template <typename Bitfield>
908 void setSubclassData(typename Bitfield::Type Value) {
909 Instruction::setSubclassData<Bitfield>(Value);
910 }
911
912 /// The synchronization scope ID of this rmw instruction. Not quite enough
913 /// room in SubClassData for everything, so synchronization scope ID gets its
914 /// own field.
915 SyncScope::ID SSID;
916};
917
918template <>
920 : public FixedNumOperandTraits<AtomicRMWInst,2> {
921};
922
924
925//===----------------------------------------------------------------------===//
926// GetElementPtrInst Class
927//===----------------------------------------------------------------------===//
928
929// checkGEPType - Simple wrapper function to give a better assertion failure
930// message on bad indexes for a gep instruction.
931//
933 assert(Ty && "Invalid GetElementPtrInst indices for type!");
934 return Ty;
935}
936
937/// an instruction for type-safe pointer arithmetic to
938/// access elements of arrays and structs
939///
941 Type *SourceElementType;
942 Type *ResultElementType;
943
945
946 /// Constructors - Create a getelementptr instruction with a base pointer an
947 /// list of indices. The first ctor can optionally insert before an existing
948 /// instruction, the second appends the new instruction to the specified
949 /// BasicBlock.
950 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
951 ArrayRef<Value *> IdxList, unsigned Values,
952 const Twine &NameStr, Instruction *InsertBefore);
953 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
954 ArrayRef<Value *> IdxList, unsigned Values,
955 const Twine &NameStr, BasicBlock *InsertAtEnd);
956
957 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
958
959protected:
960 // Note: Instruction needs to be a friend here to call cloneImpl.
961 friend class Instruction;
962
964
965public:
966 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
967 ArrayRef<Value *> IdxList,
968 const Twine &NameStr = "",
969 Instruction *InsertBefore = nullptr) {
970 unsigned Values = 1 + unsigned(IdxList.size());
971 assert(PointeeType && "Must specify element type");
972 assert(cast<PointerType>(Ptr->getType()->getScalarType())
973 ->isOpaqueOrPointeeTypeMatches(PointeeType));
974 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
975 NameStr, InsertBefore);
976 }
977
978 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
979 ArrayRef<Value *> IdxList,
980 const Twine &NameStr,
981 BasicBlock *InsertAtEnd) {
982 unsigned Values = 1 + unsigned(IdxList.size());
983 assert(PointeeType && "Must specify element type");
984 assert(cast<PointerType>(Ptr->getType()->getScalarType())
985 ->isOpaqueOrPointeeTypeMatches(PointeeType));
986 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
987 NameStr, InsertAtEnd);
988 }
989
990 /// Create an "inbounds" getelementptr. See the documentation for the
991 /// "inbounds" flag in LangRef.html for details.
992 static GetElementPtrInst *
994 const Twine &NameStr = "",
995 Instruction *InsertBefore = nullptr) {
997 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
998 GEP->setIsInBounds(true);
999 return GEP;
1000 }
1001
1003 ArrayRef<Value *> IdxList,
1004 const Twine &NameStr,
1005 BasicBlock *InsertAtEnd) {
1007 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
1008 GEP->setIsInBounds(true);
1009 return GEP;
1010 }
1011
1012 /// Transparently provide more efficient getOperand methods.
1014
1015 Type *getSourceElementType() const { return SourceElementType; }
1016
1017 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1018 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1019
1021 assert(cast<PointerType>(getType()->getScalarType())
1022 ->isOpaqueOrPointeeTypeMatches(ResultElementType));
1023 return ResultElementType;
1024 }
1025
1026 /// Returns the address space of this instruction's pointer type.
1027 unsigned getAddressSpace() const {
1028 // Note that this is always the same as the pointer operand's address space
1029 // and that is cheaper to compute, so cheat here.
1030 return getPointerAddressSpace();
1031 }
1032
1033 /// Returns the result type of a getelementptr with the given source
1034 /// element type and indexes.
1035 ///
1036 /// Null is returned if the indices are invalid for the specified
1037 /// source element type.
1038 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1039 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1040 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1041
1042 /// Return the type of the element at the given index of an indexable
1043 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1044 ///
1045 /// Returns null if the type can't be indexed, or the given index is not
1046 /// legal for the given type.
1047 static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1048 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1049
1050 inline op_iterator idx_begin() { return op_begin()+1; }
1051 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1052 inline op_iterator idx_end() { return op_end(); }
1053 inline const_op_iterator idx_end() const { return op_end(); }
1054
1056 return make_range(idx_begin(), idx_end());
1057 }
1058
1060 return make_range(idx_begin(), idx_end());
1061 }
1062
1064 return getOperand(0);
1065 }
1066 const Value *getPointerOperand() const {
1067 return getOperand(0);
1068 }
1069 static unsigned getPointerOperandIndex() {
1070 return 0U; // get index for modifying correct operand.
1071 }
1072
1073 /// Method to return the pointer operand as a
1074 /// PointerType.
1076 return getPointerOperand()->getType();
1077 }
1078
1079 /// Returns the address space of the pointer operand.
1080 unsigned getPointerAddressSpace() const {
1082 }
1083
1084 /// Returns the pointer type returned by the GEP
1085 /// instruction, which may be a vector of pointers.
1087 ArrayRef<Value *> IdxList) {
1088 PointerType *OrigPtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1089 unsigned AddrSpace = OrigPtrTy->getAddressSpace();
1090 Type *ResultElemTy = checkGEPType(getIndexedType(ElTy, IdxList));
1091 Type *PtrTy = OrigPtrTy->isOpaque()
1092 ? PointerType::get(OrigPtrTy->getContext(), AddrSpace)
1093 : PointerType::get(ResultElemTy, AddrSpace);
1094 // Vector GEP
1095 if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
1096 ElementCount EltCount = PtrVTy->getElementCount();
1097 return VectorType::get(PtrTy, EltCount);
1098 }
1099 for (Value *Index : IdxList)
1100 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1101 ElementCount EltCount = IndexVTy->getElementCount();
1102 return VectorType::get(PtrTy, EltCount);
1103 }
1104 // Scalar GEP
1105 return PtrTy;
1106 }
1107
1108 unsigned getNumIndices() const { // Note: always non-negative
1109 return getNumOperands() - 1;
1110 }
1111
1112 bool hasIndices() const {
1113 return getNumOperands() > 1;
1114 }
1115
1116 /// Return true if all of the indices of this GEP are
1117 /// zeros. If so, the result pointer and the first operand have the same
1118 /// value, just potentially different types.
1119 bool hasAllZeroIndices() const;
1120
1121 /// Return true if all of the indices of this GEP are
1122 /// constant integers. If so, the result pointer and the first operand have
1123 /// a constant offset between them.
1124 bool hasAllConstantIndices() const;
1125
1126 /// Set or clear the inbounds flag on this GEP instruction.
1127 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1128 void setIsInBounds(bool b = true);
1129
1130 /// Determine whether the GEP has the inbounds flag.
1131 bool isInBounds() const;
1132
1133 /// Accumulate the constant address offset of this GEP if possible.
1134 ///
1135 /// This routine accepts an APInt into which it will accumulate the constant
1136 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1137 /// all-constant, it returns false and the value of the offset APInt is
1138 /// undefined (it is *not* preserved!). The APInt passed into this routine
1139 /// must be at least as wide as the IntPtr type for the address space of
1140 /// the base GEP pointer.
1141 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1142 bool collectOffset(const DataLayout &DL, unsigned BitWidth,
1143 MapVector<Value *, APInt> &VariableOffsets,
1144 APInt &ConstantOffset) const;
1145 // Methods for support type inquiry through isa, cast, and dyn_cast:
1146 static bool classof(const Instruction *I) {
1147 return (I->getOpcode() == Instruction::GetElementPtr);
1148 }
1149 static bool classof(const Value *V) {
1150 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1151 }
1152};
1153
1154template <>
1156 public VariadicOperandTraits<GetElementPtrInst, 1> {
1157};
1158
1159GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1160 ArrayRef<Value *> IdxList, unsigned Values,
1161 const Twine &NameStr,
1162 Instruction *InsertBefore)
1163 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1164 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1165 Values, InsertBefore),
1166 SourceElementType(PointeeType),
1167 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1168 assert(cast<PointerType>(getType()->getScalarType())
1169 ->isOpaqueOrPointeeTypeMatches(ResultElementType));
1170 init(Ptr, IdxList, NameStr);
1171}
1172
1173GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1174 ArrayRef<Value *> IdxList, unsigned Values,
1175 const Twine &NameStr,
1176 BasicBlock *InsertAtEnd)
1177 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1178 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1179 Values, InsertAtEnd),
1180 SourceElementType(PointeeType),
1181 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1182 assert(cast<PointerType>(getType()->getScalarType())
1183 ->isOpaqueOrPointeeTypeMatches(ResultElementType));
1184 init(Ptr, IdxList, NameStr);
1185}
1186
1187DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)
1188
1189//===----------------------------------------------------------------------===//
1190// ICmpInst Class
1191//===----------------------------------------------------------------------===//
1192
1193/// This instruction compares its operands according to the predicate given
1194/// to the constructor. It only operates on integers or pointers. The operands
1195/// must be identical types.
1196/// Represent an integer comparison operator.
1197class ICmpInst: public CmpInst {
1198 void AssertOK() {
1199 assert(isIntPredicate() &&
1200 "Invalid ICmp predicate value");
1201 assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1202 "Both operands to ICmp instruction are not of the same type!");
1203 // Check that the operands are the right type
1204 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||
1205 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
1206 "Invalid operand types for ICmp instruction");
1207 }
1208
1209protected:
1210 // Note: Instruction needs to be a friend here to call cloneImpl.
1211 friend class Instruction;
1212
1213 /// Clone an identical ICmpInst
1214 ICmpInst *cloneImpl() const;
1215
1216public:
1217 /// Constructor with insert-before-instruction semantics.
1219 Instruction *InsertBefore, ///< Where to insert
1220 Predicate pred, ///< The predicate to use for the comparison
1221 Value *LHS, ///< The left-hand-side of the expression
1222 Value *RHS, ///< The right-hand-side of the expression
1223 const Twine &NameStr = "" ///< Name of the instruction
1224 ) : CmpInst(makeCmpResultType(LHS->getType()),
1225 Instruction::ICmp, pred, LHS, RHS, NameStr,
1226 InsertBefore) {
1227#ifndef NDEBUG
1228 AssertOK();
1229#endif
1230 }
1231
1232 /// Constructor with insert-at-end semantics.
1234 BasicBlock &InsertAtEnd, ///< Block to insert into.
1235 Predicate pred, ///< The predicate to use for the comparison
1236 Value *LHS, ///< The left-hand-side of the expression
1237 Value *RHS, ///< The right-hand-side of the expression
1238 const Twine &NameStr = "" ///< Name of the instruction
1239 ) : CmpInst(makeCmpResultType(LHS->getType()),
1240 Instruction::ICmp, pred, LHS, RHS, NameStr,
1241 &InsertAtEnd) {
1242#ifndef NDEBUG
1243 AssertOK();
1244#endif
1245 }
1246
1247 /// Constructor with no-insertion semantics
1249 Predicate pred, ///< The predicate to use for the comparison
1250 Value *LHS, ///< The left-hand-side of the expression
1251 Value *RHS, ///< The right-hand-side of the expression
1252 const Twine &NameStr = "" ///< Name of the instruction
1253 ) : CmpInst(makeCmpResultType(LHS->getType()),
1254 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1255#ifndef NDEBUG
1256 AssertOK();
1257#endif
1258 }
1259
1260 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1261 /// @returns the predicate that would be the result if the operand were
1262 /// regarded as signed.
1263 /// Return the signed version of the predicate
1265 return getSignedPredicate(getPredicate());
1266 }
1267
1268 /// This is a static version that you can use without an instruction.
1269 /// Return the signed version of the predicate.
1270 static Predicate getSignedPredicate(Predicate pred);
1271
1272 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1273 /// @returns the predicate that would be the result if the operand were
1274 /// regarded as unsigned.
1275 /// Return the unsigned version of the predicate
1277 return getUnsignedPredicate(getPredicate());
1278 }
1279
1280 /// This is a static version that you can use without an instruction.
1281 /// Return the unsigned version of the predicate.
1282 static Predicate getUnsignedPredicate(Predicate pred);
1283
1284 /// Return true if this predicate is either EQ or NE. This also
1285 /// tests for commutativity.
1286 static bool isEquality(Predicate P) {
1287 return P == ICMP_EQ || P == ICMP_NE;
1288 }
1289
1290 /// Return true if this predicate is either EQ or NE. This also
1291 /// tests for commutativity.
1292 bool isEquality() const {
1293 return isEquality(getPredicate());
1294 }
1295
1296 /// @returns true if the predicate of this ICmpInst is commutative
1297 /// Determine if this relation is commutative.
1298 bool isCommutative() const { return isEquality(); }
1299
1300 /// Return true if the predicate is relational (not EQ or NE).
1301 ///
1302 bool isRelational() const {
1303 return !isEquality();
1304 }
1305
1306 /// Return true if the predicate is relational (not EQ or NE).
1307 ///
1308 static bool isRelational(Predicate P) {
1309 return !isEquality(P);
1310 }
1311
1312 /// Return true if the predicate is SGT or UGT.
1313 ///
1314 static bool isGT(Predicate P) {
1315 return P == ICMP_SGT || P == ICMP_UGT;
1316 }
1317
1318 /// Return true if the predicate is SLT or ULT.
1319 ///
1320 static bool isLT(Predicate P) {
1321 return P == ICMP_SLT || P == ICMP_ULT;
1322 }
1323
1324 /// Return true if the predicate is SGE or UGE.
1325 ///
1326 static bool isGE(Predicate P) {
1327 return P == ICMP_SGE || P == ICMP_UGE;
1328 }
1329
1330 /// Return true if the predicate is SLE or ULE.
1331 ///
1332 static bool isLE(Predicate P) {
1333 return P == ICMP_SLE || P == ICMP_ULE;
1334 }
1335
1336 /// Returns the sequence of all ICmp predicates.
1337 ///
1338 static auto predicates() { return ICmpPredicates(); }
1339
1340 /// Exchange the two operands to this instruction in such a way that it does
1341 /// not modify the semantics of the instruction. The predicate value may be
1342 /// changed to retain the same result if the predicate is order dependent
1343 /// (e.g. ult).
1344 /// Swap operands and adjust predicate.
1346 setPredicate(getSwappedPredicate());
1347 Op<0>().swap(Op<1>());
1348 }
1349
1350 /// Return result of `LHS Pred RHS` comparison.
1351 static bool compare(const APInt &LHS, const APInt &RHS,
1352 ICmpInst::Predicate Pred);
1353
1354 // Methods for support type inquiry through isa, cast, and dyn_cast:
1355 static bool classof(const Instruction *I) {
1356 return I->getOpcode() == Instruction::ICmp;
1357 }
1358 static bool classof(const Value *V) {
1359 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1360 }
1361};
1362
1363//===----------------------------------------------------------------------===//
1364// FCmpInst Class
1365//===----------------------------------------------------------------------===//
1366
1367/// This instruction compares its operands according to the predicate given
1368/// to the constructor. It only operates on floating point values or packed
1369/// vectors of floating point values. The operands must be identical types.
1370/// Represents a floating point comparison operator.
1371class FCmpInst: public CmpInst {
1372 void AssertOK() {
1373 assert(isFPPredicate() && "Invalid FCmp predicate value");
1374 assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1375 "Both operands to FCmp instruction are not of the same type!");
1376 // Check that the operands are the right type
1377 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&
1378 "Invalid operand types for FCmp instruction");
1379 }
1380
1381protected:
1382 // Note: Instruction needs to be a friend here to call cloneImpl.
1383 friend class Instruction;
1384
1385 /// Clone an identical FCmpInst
1386 FCmpInst *cloneImpl() const;
1387
1388public:
1389 /// Constructor with insert-before-instruction semantics.
1391 Instruction *InsertBefore, ///< Where to insert
1392 Predicate pred, ///< The predicate to use for the comparison
1393 Value *LHS, ///< The left-hand-side of the expression
1394 Value *RHS, ///< The right-hand-side of the expression
1395 const Twine &NameStr = "" ///< Name of the instruction
1397 Instruction::FCmp, pred, LHS, RHS, NameStr,
1398 InsertBefore) {
1399 AssertOK();
1400 }
1401
1402 /// Constructor with insert-at-end semantics.
1404 BasicBlock &InsertAtEnd, ///< Block to insert into.
1405 Predicate pred, ///< The predicate to use for the comparison
1406 Value *LHS, ///< The left-hand-side of the expression
1407 Value *RHS, ///< The right-hand-side of the expression
1408 const Twine &NameStr = "" ///< Name of the instruction
1410 Instruction::FCmp, pred, LHS, RHS, NameStr,
1411 &InsertAtEnd) {
1412 AssertOK();
1413 }
1414
1415 /// Constructor with no-insertion semantics
1417 Predicate Pred, ///< The predicate to use for the comparison
1418 Value *LHS, ///< The left-hand-side of the expression
1419 Value *RHS, ///< The right-hand-side of the expression
1420 const Twine &NameStr = "", ///< Name of the instruction
1421 Instruction *FlagsSource = nullptr
1422 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1423 RHS, NameStr, nullptr, FlagsSource) {
1424 AssertOK();
1425 }
1426
1427 /// @returns true if the predicate of this instruction is EQ or NE.
1428 /// Determine if this is an equality predicate.
1429 static bool isEquality(Predicate Pred) {
1430 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1431 Pred == FCMP_UNE;
1432 }
1433
1434 /// @returns true if the predicate of this instruction is EQ or NE.
1435 /// Determine if this is an equality predicate.
1436 bool isEquality() const { return isEquality(getPredicate()); }
1437
1438 /// @returns true if the predicate of this instruction is commutative.
1439 /// Determine if this is a commutative predicate.
1440 bool isCommutative() const {
1441 return isEquality() ||
1442 getPredicate() == FCMP_FALSE ||
1443 getPredicate() == FCMP_TRUE ||
1444 getPredicate() == FCMP_ORD ||
1446 }
1447
1448 /// @returns true if the predicate is relational (not EQ or NE).
1449 /// Determine if this a relational predicate.
1450 bool isRelational() const { return !isEquality(); }
1451
1452 /// Exchange the two operands to this instruction in such a way that it does
1453 /// not modify the semantics of the instruction. The predicate value may be
1454 /// changed to retain the same result if the predicate is order dependent
1455 /// (e.g. ult).
1456 /// Swap operands and adjust predicate.
1459 Op<0>().swap(Op<1>());
1460 }
1461
1462 /// Returns the sequence of all FCmp predicates.
1463 ///
1464 static auto predicates() { return FCmpPredicates(); }
1465
1466 /// Return result of `LHS Pred RHS` comparison.
1467 static bool compare(const APFloat &LHS, const APFloat &RHS,
1468 FCmpInst::Predicate Pred);
1469
1470 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1471 static bool classof(const Instruction *I) {
1472 return I->getOpcode() == Instruction::FCmp;
1473 }
1474 static bool classof(const Value *V) {
1475 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1476 }
1477};
1478
1479//===----------------------------------------------------------------------===//
1480/// This class represents a function call, abstracting a target
1481/// machine's calling convention. This class uses low bit of the SubClassData
1482/// field to indicate whether or not this is a tail call. The rest of the bits
1483/// hold the calling convention of the call.
1484///
1485class CallInst : public CallBase {
1486 CallInst(const CallInst &CI);
1487
1488 /// Construct a CallInst given a range of arguments.
1489 /// Construct a CallInst from a range of arguments
1490 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1491 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1492 Instruction *InsertBefore);
1493
1494 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1495 const Twine &NameStr, Instruction *InsertBefore)
1496 : CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore) {}
1497
1498 /// Construct a CallInst given a range of arguments.
1499 /// Construct a CallInst from a range of arguments
1500 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1501 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1502 BasicBlock *InsertAtEnd);
1503
1504 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1505 Instruction *InsertBefore);
1506
1507 CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1508 BasicBlock *InsertAtEnd);
1509
1510 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1511 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1512 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1513
1514 /// Compute the number of operands to allocate.
1515 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1516 // We need one operand for the called function, plus the input operand
1517 // counts provided.
1518 return 1 + NumArgs + NumBundleInputs;
1519 }
1520
1521protected:
1522 // Note: Instruction needs to be a friend here to call cloneImpl.
1523 friend class Instruction;
1524
1525 CallInst *cloneImpl() const;
1526
1527public:
1528 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1529 Instruction *InsertBefore = nullptr) {
1530 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1531 }
1532
1534 const Twine &NameStr,
1535 Instruction *InsertBefore = nullptr) {
1536 return new (ComputeNumOperands(Args.size()))
1537 CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore);
1538 }
1539
1541 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
1542 const Twine &NameStr = "",
1543 Instruction *InsertBefore = nullptr) {
1544 const int NumOperands =
1545 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1546 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1547
1548 return new (NumOperands, DescriptorBytes)
1549 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1550 }
1551
1552 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1553 BasicBlock *InsertAtEnd) {
1554 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1555 }
1556
1558 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1559 return new (ComputeNumOperands(Args.size()))
1560 CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertAtEnd);
1561 }
1562
1565 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1566 const int NumOperands =
1567 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1568 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1569
1570 return new (NumOperands, DescriptorBytes)
1571 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1572 }
1573
1574 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1575 Instruction *InsertBefore = nullptr) {
1576 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1577 InsertBefore);
1578 }
1579
1581 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
1582 const Twine &NameStr = "",
1583 Instruction *InsertBefore = nullptr) {
1584 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1585 NameStr, InsertBefore);
1586 }
1587
1589 const Twine &NameStr,
1590 Instruction *InsertBefore = nullptr) {
1591 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1592 InsertBefore);
1593 }
1594
1595 static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1596 BasicBlock *InsertAtEnd) {
1597 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1598 InsertAtEnd);
1599 }
1600
1602 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1603 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1604 InsertAtEnd);
1605 }
1606
1609 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1610 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1611 NameStr, InsertAtEnd);
1612 }
1613
1614 /// Create a clone of \p CI with a different set of operand bundles and
1615 /// insert it before \p InsertPt.
1616 ///
1617 /// The returned call instruction is identical \p CI in every way except that
1618 /// the operand bundles for the new instruction are set to the operand bundles
1619 /// in \p Bundles.
1621 Instruction *InsertPt = nullptr);
1622
1623 /// Generate the IR for a call to malloc:
1624 /// 1. Compute the malloc call's argument as the specified type's size,
1625 /// possibly multiplied by the array size if the array size is not
1626 /// constant 1.
1627 /// 2. Call malloc with that argument.
1628 /// 3. Bitcast the result of the malloc call to the specified type.
1629 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1630 Type *AllocTy, Value *AllocSize,
1631 Value *ArraySize = nullptr,
1632 Function *MallocF = nullptr,
1633 const Twine &Name = "");
1634 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1635 Type *AllocTy, Value *AllocSize,
1636 Value *ArraySize = nullptr,
1637 Function *MallocF = nullptr,
1638 const Twine &Name = "");
1639 static Instruction *
1640 CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, Type *AllocTy,
1641 Value *AllocSize, Value *ArraySize = nullptr,
1642 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
1643 Function *MallocF = nullptr, const Twine &Name = "");
1644 static Instruction *
1645 CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, Type *AllocTy,
1646 Value *AllocSize, Value *ArraySize = nullptr,
1647 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
1648 Function *MallocF = nullptr, const Twine &Name = "");
1649 /// Generate the IR for a call to the builtin free function.
1650 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
1651 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
1652 static Instruction *CreateFree(Value *Source,
1654 Instruction *InsertBefore);
1655 static Instruction *CreateFree(Value *Source,
1657 BasicBlock *InsertAtEnd);
1658
1659 // Note that 'musttail' implies 'tail'.
1660 enum TailCallKind : unsigned {
1667
1669 static_assert(
1670 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1671 "Bitfields must be contiguous");
1672
1674 return getSubclassData<TailCallKindField>();
1675 }
1676
1677 bool isTailCall() const {
1679 return Kind == TCK_Tail || Kind == TCK_MustTail;
1680 }
1681
1682 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1683
1684 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1685
1687 setSubclassData<TailCallKindField>(TCK);
1688 }
1689
1690 void setTailCall(bool IsTc = true) {
1692 }
1693
1694 /// Return true if the call can return twice
1695 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1696 void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); }
1697
1698 // Methods for support type inquiry through isa, cast, and dyn_cast:
1699 static bool classof(const Instruction *I) {
1700 return I->getOpcode() == Instruction::Call;
1701 }
1702 static bool classof(const Value *V) {
1703 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1704 }
1705
1706 /// Updates profile metadata by scaling it by \p S / \p T.
1708
1709private:
1710 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1711 // method so that subclasses cannot accidentally use it.
1712 template <typename Bitfield>
1713 void setSubclassData(typename Bitfield::Type Value) {
1714 Instruction::setSubclassData<Bitfield>(Value);
1715 }
1716};
1717
1718CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1719 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1720 BasicBlock *InsertAtEnd)
1721 : CallBase(Ty->getReturnType(), Instruction::Call,
1722 OperandTraits<CallBase>::op_end(this) -
1723 (Args.size() + CountBundleInputs(Bundles) + 1),
1724 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1725 InsertAtEnd) {
1726 init(Ty, Func, Args, Bundles, NameStr);
1727}
1728
1729CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1730 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1731 Instruction *InsertBefore)
1732 : CallBase(Ty->getReturnType(), Instruction::Call,
1733 OperandTraits<CallBase>::op_end(this) -
1734 (Args.size() + CountBundleInputs(Bundles) + 1),
1735 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1736 InsertBefore) {
1737 init(Ty, Func, Args, Bundles, NameStr);
1738}
1739
1740//===----------------------------------------------------------------------===//
1741// SelectInst Class
1742//===----------------------------------------------------------------------===//
1743
1744/// This class represents the LLVM 'select' instruction.
1745///
1746class SelectInst : public Instruction {
1747 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1748 Instruction *InsertBefore)
1749 : Instruction(S1->getType(), Instruction::Select,
1750 &Op<0>(), 3, InsertBefore) {
1751 init(C, S1, S2);
1752 setName(NameStr);
1753 }
1754
1755 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1756 BasicBlock *InsertAtEnd)
1757 : Instruction(S1->getType(), Instruction::Select,
1758 &Op<0>(), 3, InsertAtEnd) {
1759 init(C, S1, S2);
1760 setName(NameStr);
1761 }
1762
1763 void init(Value *C, Value *S1, Value *S2) {
1764 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select");
1765 Op<0>() = C;
1766 Op<1>() = S1;
1767 Op<2>() = S2;
1768 }
1769
1770protected:
1771 // Note: Instruction needs to be a friend here to call cloneImpl.
1772 friend class Instruction;
1773
1774 SelectInst *cloneImpl() const;
1775
1776public:
1777 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1778 const Twine &NameStr = "",
1779 Instruction *InsertBefore = nullptr,
1780 Instruction *MDFrom = nullptr) {
1781 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1782 if (MDFrom)
1783 Sel->copyMetadata(*MDFrom);
1784 return Sel;
1785 }
1786
1787 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1788 const Twine &NameStr,
1789 BasicBlock *InsertAtEnd) {
1790 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1791 }
1792
1793 const Value *getCondition() const { return Op<0>(); }
1794 const Value *getTrueValue() const { return Op<1>(); }
1795 const Value *getFalseValue() const { return Op<2>(); }
1796 Value *getCondition() { return Op<0>(); }
1797 Value *getTrueValue() { return Op<1>(); }
1798 Value *getFalseValue() { return Op<2>(); }
1799
1800 void setCondition(Value *V) { Op<0>() = V; }
1801 void setTrueValue(Value *V) { Op<1>() = V; }
1802 void setFalseValue(Value *V) { Op<2>() = V; }
1803
1804 /// Swap the true and false values of the select instruction.
1805 /// This doesn't swap prof metadata.
1806 void swapValues() { Op<1>().swap(Op<2>()); }
1807
1808 /// Return a string if the specified operands are invalid
1809 /// for a select operation, otherwise return null.
1810 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1811
1812 /// Transparently provide more efficient getOperand methods.
1814
1816 return static_cast<OtherOps>(Instruction::getOpcode());
1817 }
1818
1819 // Methods for support type inquiry through isa, cast, and dyn_cast:
1820 static bool classof(const Instruction *I) {
1821 return I->getOpcode() == Instruction::Select;
1822 }
1823 static bool classof(const Value *V) {
1824 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1825 }
1826};
1827
1828template <>
1829struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1830};
1831
1833
1834//===----------------------------------------------------------------------===//
1835// VAArgInst Class
1836//===----------------------------------------------------------------------===//
1837
1838/// This class represents the va_arg llvm instruction, which returns
1839/// an argument of the specified type given a va_list and increments that list
1840///
1842protected:
1843 // Note: Instruction needs to be a friend here to call cloneImpl.
1844 friend class Instruction;
1845
1846 VAArgInst *cloneImpl() const;
1847
1848public:
1849 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1850 Instruction *InsertBefore = nullptr)
1851 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1852 setName(NameStr);
1853 }
1854
1855 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1856 BasicBlock *InsertAtEnd)
1857 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1858 setName(NameStr);
1859 }
1860
1861 Value *getPointerOperand() { return getOperand(0); }
1862 const Value *getPointerOperand() const { return getOperand(0); }
1863 static unsigned getPointerOperandIndex() { return 0U; }
1864
1865 // Methods for support type inquiry through isa, cast, and dyn_cast:
1866 static bool classof(const Instruction *I) {
1867 return I->getOpcode() == VAArg;
1868 }
1869 static bool classof(const Value *V) {
1870 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1871 }
1872};
1873
1874//===----------------------------------------------------------------------===//
1875// ExtractElementInst Class
1876//===----------------------------------------------------------------------===//
1877
1878/// This instruction extracts a single (scalar)
1879/// element from a VectorType value
1880///
1882 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1883 Instruction *InsertBefore = nullptr);
1884 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1885 BasicBlock *InsertAtEnd);
1886
1887protected:
1888 // Note: Instruction needs to be a friend here to call cloneImpl.
1889 friend class Instruction;
1890
1892
1893public:
1895 const Twine &NameStr = "",
1896 Instruction *InsertBefore = nullptr) {
1897 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1898 }
1899
1901 const Twine &NameStr,
1902 BasicBlock *InsertAtEnd) {
1903 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1904 }
1905
1906 /// Return true if an extractelement instruction can be
1907 /// formed with the specified operands.
1908 static bool isValidOperands(const Value *Vec, const Value *Idx);
1909
1910 Value *getVectorOperand() { return Op<0>(); }
1911 Value *getIndexOperand() { return Op<1>(); }
1912 const Value *getVectorOperand() const { return Op<0>(); }
1913 const Value *getIndexOperand() const { return Op<1>(); }
1914
1916 return cast<VectorType>(getVectorOperand()->getType());
1917 }
1918
1919 /// Transparently provide more efficient getOperand methods.
1921
1922 // Methods for support type inquiry through isa, cast, and dyn_cast:
1923 static bool classof(const Instruction *I) {
1924 return I->getOpcode() == Instruction::ExtractElement;
1925 }
1926 static bool classof(const Value *V) {
1927 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1928 }
1929};
1930
1931template <>
1933 public FixedNumOperandTraits<ExtractElementInst, 2> {
1934};
1935
1937
1938//===----------------------------------------------------------------------===//
1939// InsertElementInst Class
1940//===----------------------------------------------------------------------===//
1941
1942/// This instruction inserts a single (scalar)
1943/// element into a VectorType value
1944///
1946 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1947 const Twine &NameStr = "",
1948 Instruction *InsertBefore = nullptr);
1949 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1950 BasicBlock *InsertAtEnd);
1951
1952protected:
1953 // Note: Instruction needs to be a friend here to call cloneImpl.
1954 friend class Instruction;
1955
1956 InsertElementInst *cloneImpl() const;
1957
1958public:
1959 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1960 const Twine &NameStr = "",
1961 Instruction *InsertBefore = nullptr) {
1962 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1963 }
1964
1965 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1966 const Twine &NameStr,
1967 BasicBlock *InsertAtEnd) {
1968 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1969 }
1970
1971 /// Return true if an insertelement instruction can be
1972 /// formed with the specified operands.
1973 static bool isValidOperands(const Value *Vec, const Value *NewElt,
1974 const Value *Idx);
1975
1976 /// Overload to return most specific vector type.
1977 ///
1979 return cast<VectorType>(Instruction::getType());
1980 }
1981
1982 /// Transparently provide more efficient getOperand methods.
1984
1985 // Methods for support type inquiry through isa, cast, and dyn_cast:
1986 static bool classof(const Instruction *I) {
1987 return I->getOpcode() == Instruction::InsertElement;
1988 }
1989 static bool classof(const Value *V) {
1990 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1991 }
1992};
1993
1994template <>
1996 public FixedNumOperandTraits<InsertElementInst, 3> {
1997};
1998
2000
2001//===----------------------------------------------------------------------===//
2002// ShuffleVectorInst Class
2003//===----------------------------------------------------------------------===//
2004
2005constexpr int UndefMaskElem = -1;
2006
2007/// This instruction constructs a fixed permutation of two
2008/// input vectors.
2009///
2010/// For each element of the result vector, the shuffle mask selects an element
2011/// from one of the input vectors to copy to the result. Non-negative elements
2012/// in the mask represent an index into the concatenated pair of input vectors.
2013/// UndefMaskElem (-1) specifies that the result element is undefined.
2014///
2015/// For scalable vectors, all the elements of the mask must be 0 or -1. This
2016/// requirement may be relaxed in the future.
2018 SmallVector<int, 4> ShuffleMask;
2019 Constant *ShuffleMaskForBitcode;
2020
2021protected:
2022 // Note: Instruction needs to be a friend here to call cloneImpl.
2023 friend class Instruction;
2024
2025 ShuffleVectorInst *cloneImpl() const;
2026
2027public:
2028 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "",
2029 Instruction *InsertBefore = nullptr);
2030 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr,
2031 BasicBlock *InsertAtEnd);
2032 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "",
2033 Instruction *InsertBefore = nullptr);
2034 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr,
2035 BasicBlock *InsertAtEnd);
2036 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2037 const Twine &NameStr = "",
2038 Instruction *InsertBefor = nullptr);
2039 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2040 const Twine &NameStr, BasicBlock *InsertAtEnd);
2042 const Twine &NameStr = "",
2043 Instruction *InsertBefor = nullptr);
2045 const Twine &NameStr, BasicBlock *InsertAtEnd);
2046
2047 void *operator new(size_t S) { return User::operator new(S, 2); }
2048 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
2049
2050 /// Swap the operands and adjust the mask to preserve the semantics
2051 /// of the instruction.
2052 void commute();
2053
2054 /// Return true if a shufflevector instruction can be
2055 /// formed with the specified operands.
2056 static bool isValidOperands(const Value *V1, const Value *V2,
2057 const Value *Mask);
2058 static bool isValidOperands(const Value *V1, const Value *V2,
2059 ArrayRef<int> Mask);
2060
2061 /// Overload to return most specific vector type.
2062 ///
2064 return cast<VectorType>(Instruction::getType());
2065 }
2066
2067 /// Transparently provide more efficient getOperand methods.
2069
2070 /// Return the shuffle mask value of this instruction for the given element
2071 /// index. Return UndefMaskElem if the element is undef.
2072 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2073
2074 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2075 /// elements of the mask are returned as UndefMaskElem.
2076 static void getShuffleMask(const Constant *Mask,
2077 SmallVectorImpl<int> &Result);
2078
2079 /// Return the mask for this instruction as a vector of integers. Undefined
2080 /// elements of the mask are returned as UndefMaskElem.
2082 Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2083 }
2084
2085 /// Return the mask for this instruction, for use in bitcode.
2086 ///
2087 /// TODO: This is temporary until we decide a new bitcode encoding for
2088 /// shufflevector.
2089 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2090
2091 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2092 Type *ResultTy);
2093
2094 void setShuffleMask(ArrayRef<int> Mask);
2095
2096 ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2097
2098 /// Return true if this shuffle returns a vector with a different number of
2099 /// elements than its source vectors.
2100 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2101 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2102 bool changesLength() const {
2103 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2104 ->getElementCount()
2105 .getKnownMinValue();
2106 unsigned NumMaskElts = ShuffleMask.size();
2107 return NumSourceElts != NumMaskElts;
2108 }
2109
2110 /// Return true if this shuffle returns a vector with a greater number of
2111 /// elements than its source vectors.
2112 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2113 bool increasesLength() const {
2114 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2115 ->getElementCount()
2116 .getKnownMinValue();
2117 unsigned NumMaskElts = ShuffleMask.size();
2118 return NumSourceElts < NumMaskElts;
2119 }
2120
2121 /// Return true if this shuffle mask chooses elements from exactly one source
2122 /// vector.
2123 /// Example: <7,5,undef,7>
2124 /// This assumes that vector operands are the same length as the mask.
2125 static bool isSingleSourceMask(ArrayRef<int> Mask);
2126 static bool isSingleSourceMask(const Constant *Mask) {
2127 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2128 SmallVector<int, 16> MaskAsInts;
2129 getShuffleMask(Mask, MaskAsInts);
2130 return isSingleSourceMask(MaskAsInts);
2131 }
2132
2133 /// Return true if this shuffle chooses elements from exactly one source
2134 /// vector without changing the length of that vector.
2135 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2136 /// TODO: Optionally allow length-changing shuffles.
2137 bool isSingleSource() const {
2138 return !changesLength() && isSingleSourceMask(ShuffleMask);
2139 }
2140
2141 /// Return true if this shuffle mask chooses elements from exactly one source
2142 /// vector without lane crossings. A shuffle using this mask is not
2143 /// necessarily a no-op because it may change the number of elements from its
2144 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2145 /// Example: <undef,undef,2,3>
2146 static bool isIdentityMask(ArrayRef<int> Mask);
2147 static bool isIdentityMask(const Constant *Mask) {
2148 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2149
2150 // Not possible to express a shuffle mask for a scalable vector for this
2151 // case.
2152 if (isa<ScalableVectorType>(Mask->getType()))
2153 return false;
2154
2155 SmallVector<int, 16> MaskAsInts;
2156 getShuffleMask(Mask, MaskAsInts);
2157 return isIdentityMask(MaskAsInts);
2158 }
2159
2160 /// Return true if this shuffle chooses elements from exactly one source
2161 /// vector without lane crossings and does not change the number of elements
2162 /// from its input vectors.
2163 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2164 bool isIdentity() const {
2165 // Not possible to express a shuffle mask for a scalable vector for this
2166 // case.
2167 if (isa<ScalableVectorType>(getType()))
2168 return false;
2169
2170 return !changesLength() && isIdentityMask(ShuffleMask);
2171 }
2172
2173 /// Return true if this shuffle lengthens exactly one source vector with
2174 /// undefs in the high elements.
2175 bool isIdentityWithPadding() const;
2176
2177 /// Return true if this shuffle extracts the first N elements of exactly one
2178 /// source vector.
2179 bool isIdentityWithExtract() const;
2180
2181 /// Return true if this shuffle concatenates its 2 source vectors. This
2182 /// returns false if either input is undefined. In that case, the shuffle is
2183 /// is better classified as an identity with padding operation.
2184 bool isConcat() const;
2185
2186 /// Return true if this shuffle mask chooses elements from its source vectors
2187 /// without lane crossings. A shuffle using this mask would be
2188 /// equivalent to a vector select with a constant condition operand.
2189 /// Example: <4,1,6,undef>
2190 /// This returns false if the mask does not choose from both input vectors.
2191 /// In that case, the shuffle is better classified as an identity shuffle.
2192 /// This assumes that vector operands are the same length as the mask
2193 /// (a length-changing shuffle can never be equivalent to a vector select).
2194 static bool isSelectMask(ArrayRef<int> Mask);
2195 static bool isSelectMask(const Constant *Mask) {
2196 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2197 SmallVector<int, 16> MaskAsInts;
2198 getShuffleMask(Mask, MaskAsInts);
2199 return isSelectMask(MaskAsInts);
2200 }
2201
2202 /// Return true if this shuffle chooses elements from its source vectors
2203 /// without lane crossings and all operands have the same number of elements.
2204 /// In other words, this shuffle is equivalent to a vector select with a
2205 /// constant condition operand.
2206 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2207 /// This returns false if the mask does not choose from both input vectors.
2208 /// In that case, the shuffle is better classified as an identity shuffle.
2209 /// TODO: Optionally allow length-changing shuffles.
2210 bool isSelect() const {
2211 return !changesLength() && isSelectMask(ShuffleMask);
2212 }
2213
2214 /// Return true if this shuffle mask swaps the order of elements from exactly
2215 /// one source vector.
2216 /// Example: <7,6,undef,4>
2217 /// This assumes that vector operands are the same length as the mask.
2218 static bool isReverseMask(ArrayRef<int> Mask);
2219 static bool isReverseMask(const Constant *Mask) {
2220 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2221 SmallVector<int, 16> MaskAsInts;
2222 getShuffleMask(Mask, MaskAsInts);
2223 return isReverseMask(MaskAsInts);
2224 }
2225
2226 /// Return true if this shuffle swaps the order of elements from exactly
2227 /// one source vector.
2228 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2229 /// TODO: Optionally allow length-changing shuffles.
2230 bool isReverse() const {
2231 return !changesLength() && isReverseMask(ShuffleMask);
2232 }
2233
2234 /// Return true if this shuffle mask chooses all elements with the same value
2235 /// as the first element of exactly one source vector.
2236 /// Example: <4,undef,undef,4>
2237 /// This assumes that vector operands are the same length as the mask.
2238 static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2239 static bool isZeroEltSplatMask(const Constant *Mask) {
2240 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2241 SmallVector<int, 16> MaskAsInts;
2242 getShuffleMask(Mask, MaskAsInts);
2243 return isZeroEltSplatMask(MaskAsInts);
2244 }
2245
2246 /// Return true if all elements of this shuffle are the same value as the
2247 /// first element of exactly one source vector without changing the length
2248 /// of that vector.
2249 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2250 /// TODO: Optionally allow length-changing shuffles.
2251 /// TODO: Optionally allow splats from other elements.
2252 bool isZeroEltSplat() const {
2253 return !changesLength() && isZeroEltSplatMask(ShuffleMask);
2254 }
2255
2256 /// Return true if this shuffle mask is a transpose mask.
2257 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2258 /// even- or odd-numbered vector elements from two n-dimensional source
2259 /// vectors and write each result into consecutive elements of an
2260 /// n-dimensional destination vector. Two shuffles are necessary to complete
2261 /// the transpose, one for the even elements and another for the odd elements.
2262 /// This description closely follows how the TRN1 and TRN2 AArch64
2263 /// instructions operate.
2264 ///
2265 /// For example, a simple 2x2 matrix can be transposed with:
2266 ///
2267 /// ; Original matrix
2268 /// m0 = < a, b >
2269 /// m1 = < c, d >
2270 ///
2271 /// ; Transposed matrix
2272 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2273 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2274 ///
2275 /// For matrices having greater than n columns, the resulting nx2 transposed
2276 /// matrix is stored in two result vectors such that one vector contains
2277 /// interleaved elements from all the even-numbered rows and the other vector
2278 /// contains interleaved elements from all the odd-numbered rows. For example,
2279 /// a 2x4 matrix can be transposed with:
2280 ///
2281 /// ; Original matrix
2282 /// m0 = < a, b, c, d >
2283 /// m1 = < e, f, g, h >
2284 ///
2285 /// ; Transposed matrix
2286 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2287 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2288 static bool isTransposeMask(ArrayRef<int> Mask);
2289 static bool isTransposeMask(const Constant *Mask) {
2290 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2291 SmallVector<int, 16> MaskAsInts;
2292 getShuffleMask(Mask, MaskAsInts);
2293 return isTransposeMask(MaskAsInts);
2294 }
2295
2296 /// Return true if this shuffle transposes the elements of its inputs without
2297 /// changing the length of the vectors. This operation may also be known as a
2298 /// merge or interleave. See the description for isTransposeMask() for the
2299 /// exact specification.
2300 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2301 bool isTranspose() const {
2302 return !changesLength() && isTransposeMask(ShuffleMask);
2303 }
2304
2305 /// Return true if this shuffle mask is a splice mask, concatenating the two
2306 /// inputs together and then extracts an original width vector starting from
2307 /// the splice index.
2308 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2309 static bool isSpliceMask(ArrayRef<int> Mask, int &Index);
2310 static bool isSpliceMask(const Constant *Mask, int &Index) {
2311 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2312 SmallVector<int, 16> MaskAsInts;
2313 getShuffleMask(Mask, MaskAsInts);
2314 return isSpliceMask(MaskAsInts, Index);
2315 }
2316
2317 /// Return true if this shuffle splices two inputs without changing the length
2318 /// of the vectors. This operation concatenates the two inputs together and
2319 /// then extracts an original width vector starting from the splice index.
2320 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2321 bool isSplice(int &Index) const {
2322 return !changesLength() && isSpliceMask(ShuffleMask, Index);
2323 }
2324
2325 /// Return true if this shuffle mask is an extract subvector mask.
2326 /// A valid extract subvector mask returns a smaller vector from a single
2327 /// source operand. The base extraction index is returned as well.
2328 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2329 int &Index);
2330 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2331 int &Index) {
2332 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2333 // Not possible to express a shuffle mask for a scalable vector for this
2334 // case.
2335 if (isa<ScalableVectorType>(Mask->getType()))
2336 return false;
2337 SmallVector<int, 16> MaskAsInts;
2338 getShuffleMask(Mask, MaskAsInts);
2339 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2340 }
2341
2342 /// Return true if this shuffle mask is an extract subvector mask.
2344 // Not possible to express a shuffle mask for a scalable vector for this
2345 // case.
2346 if (isa<ScalableVectorType>(getType()))
2347 return false;
2348
2349 int NumSrcElts =
2350 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2351 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2352 }
2353
2354 /// Return true if this shuffle mask is an insert subvector mask.
2355 /// A valid insert subvector mask inserts the lowest elements of a second
2356 /// source operand into an in-place first source operand operand.
2357 /// Both the sub vector width and the insertion index is returned.
2358 static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2359 int &NumSubElts, int &Index);
2360 static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts,
2361 int &NumSubElts, int &Index) {
2362 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2363 // Not possible to express a shuffle mask for a scalable vector for this
2364 // case.
2365 if (isa<ScalableVectorType>(Mask->getType()))
2366 return false;
2367 SmallVector<int, 16> MaskAsInts;
2368 getShuffleMask(Mask, MaskAsInts);
2369 return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index);
2370 }
2371
2372 /// Return true if this shuffle mask is an insert subvector mask.
2373 bool isInsertSubvectorMask(int &NumSubElts, int &Index) const {
2374 // Not possible to express a shuffle mask for a scalable vector for this
2375 // case.
2376 if (isa<ScalableVectorType>(getType()))
2377 return false;
2378
2379 int NumSrcElts =
2380 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2381 return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index);
2382 }
2383
2384 /// Return true if this shuffle mask replicates each of the \p VF elements
2385 /// in a vector \p ReplicationFactor times.
2386 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
2387 /// <0,0,0,1,1,1,2,2,2,3,3,3>
2388 static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor,
2389 int &VF);
2390 static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor,
2391 int &VF) {
2392 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2393 // Not possible to express a shuffle mask for a scalable vector for this
2394 // case.
2395 if (isa<ScalableVectorType>(Mask->getType()))
2396 return false;
2397 SmallVector<int, 16> MaskAsInts;
2398 getShuffleMask(Mask, MaskAsInts);
2399 return isReplicationMask(MaskAsInts, ReplicationFactor, VF);
2400 }
2401
2402 /// Return true if this shuffle mask is a replication mask.
2403 bool isReplicationMask(int &ReplicationFactor, int &VF) const;
2404
2405 /// Return true if this shuffle mask represents "clustered" mask of size VF,
2406 /// i.e. each index between [0..VF) is used exactly once in each submask of
2407 /// size VF.
2408 /// For example, the mask for \p VF=4 is:
2409 /// 0, 1, 2, 3, 3, 2, 0, 1 - "clustered", because each submask of size 4
2410 /// (0,1,2,3 and 3,2,0,1) uses indices [0..VF) exactly one time.
2411 /// 0, 1, 2, 3, 3, 3, 1, 0 - not "clustered", because
2412 /// element 3 is used twice in the second submask
2413 /// (3,3,1,0) and index 2 is not used at all.
2414 static bool isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF);
2415
2416 /// Return true if this shuffle mask is a one-use-single-source("clustered")
2417 /// mask.
2418 bool isOneUseSingleSourceMask(int VF) const;
2419
2420 /// Change values in a shuffle permute mask assuming the two vector operands
2421 /// of length InVecNumElts have swapped position.
2423 unsigned InVecNumElts) {
2424 for (int &Idx : Mask) {
2425 if (Idx == -1)
2426 continue;
2427 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2428 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
2429 "shufflevector mask index out of range");
2430 }
2431 }
2432
2433 // Methods for support type inquiry through isa, cast, and dyn_cast:
2434 static bool classof(const Instruction *I) {
2435 return I->getOpcode() == Instruction::ShuffleVector;
2436 }
2437 static bool classof(const Value *V) {
2438 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2439 }
2440};
2441
2442template <>
2444 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2445
2447
2448//===----------------------------------------------------------------------===//
2449// ExtractValueInst Class
2450//===----------------------------------------------------------------------===//
2451
2452/// This instruction extracts a struct member or array
2453/// element value from an aggregate value.
2454///
2457
2459
2460 /// Constructors - Create a extractvalue instruction with a base aggregate
2461 /// value and a list of indices. The first ctor can optionally insert before
2462 /// an existing instruction, the second appends the new instruction to the
2463 /// specified BasicBlock.
2464 inline ExtractValueInst(Value *Agg,
2465 ArrayRef<unsigned> Idxs,
2466 const Twine &NameStr,
2467 Instruction *InsertBefore);
2468 inline ExtractValueInst(Value *Agg,
2469 ArrayRef<unsigned> Idxs,
2470 const Twine &NameStr, BasicBlock *InsertAtEnd);
2471
2472 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2473
2474protected:
2475 // Note: Instruction needs to be a friend here to call cloneImpl.
2476 friend class Instruction;
2477
2478 ExtractValueInst *cloneImpl() const;
2479
2480public:
2482 ArrayRef<unsigned> Idxs,
2483 const Twine &NameStr = "",
2484 Instruction *InsertBefore = nullptr) {
2485 return new
2486 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2487 }
2488
2490 ArrayRef<unsigned> Idxs,
2491 const Twine &NameStr,
2492 BasicBlock *InsertAtEnd) {
2493 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2494 }
2495
2496 /// Returns the type of the element that would be extracted
2497 /// with an extractvalue instruction with the specified parameters.
2498 ///
2499 /// Null is returned if the indices are invalid for the specified type.
2500 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2501
2502 using idx_iterator = const unsigned*;
2503
2504 inline idx_iterator idx_begin() const { return Indices.begin(); }
2505 inline idx_iterator idx_end() const { return Indices.end(); }
2507 return make_range(idx_begin(), idx_end());
2508 }
2509
2511 return getOperand(0);
2512 }
2514 return getOperand(0);
2515 }
2516 static unsigned getAggregateOperandIndex() {
2517 return 0U; // get index for modifying correct operand
2518 }
2519
2521 return Indices;
2522 }
2523
2524 unsigned getNumIndices() const {
2525 return (unsigned)Indices.size();
2526 }
2527
2528 bool hasIndices() const {
2529 return true;
2530 }
2531
2532 // Methods for support type inquiry through isa, cast, and dyn_cast:
2533 static bool classof(const Instruction *I) {
2534 return I->getOpcode() == Instruction::ExtractValue;
2535 }
2536 static bool classof(const Value *V) {
2537 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2538 }
2539};
2540
2541ExtractValueInst::ExtractValueInst(Value *Agg,
2542 ArrayRef<unsigned> Idxs,
2543 const Twine &NameStr,
2544 Instruction *InsertBefore)
2545 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2546 ExtractValue, Agg, InsertBefore) {
2547 init(Idxs, NameStr);
2548}
2549
2550ExtractValueInst::ExtractValueInst(Value *Agg,
2551 ArrayRef<unsigned> Idxs,
2552 const Twine &NameStr,
2553 BasicBlock *InsertAtEnd)
2554 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2555 ExtractValue, Agg, InsertAtEnd) {
2556 init(Idxs, NameStr);
2557}
2558
2559//===----------------------------------------------------------------------===//
2560// InsertValueInst Class
2561//===----------------------------------------------------------------------===//
2562
2563/// This instruction inserts a struct field of array element
2564/// value into an aggregate value.
2565///
2568
2569 InsertValueInst(const InsertValueInst &IVI);
2570
2571 /// Constructors - Create a insertvalue instruction with a base aggregate
2572 /// value, a value to insert, and a list of indices. The first ctor can
2573 /// optionally insert before an existing instruction, the second appends
2574 /// the new instruction to the specified BasicBlock.
2575 inline InsertValueInst(Value *Agg, Value *Val,
2576 ArrayRef<unsigned> Idxs,
2577 const Twine &NameStr,
2578 Instruction *InsertBefore);
2579 inline InsertValueInst(Value *Agg, Value *Val,
2580 ArrayRef<unsigned> Idxs,
2581 const Twine &NameStr, BasicBlock *InsertAtEnd);
2582
2583 /// Constructors - These two constructors are convenience methods because one
2584 /// and two index insertvalue instructions are so common.
2585 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2586 const Twine &NameStr = "",
2587 Instruction *InsertBefore = nullptr);
2588 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2589 BasicBlock *InsertAtEnd);
2590
2591 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2592 const Twine &NameStr);
2593
2594protected:
2595 // Note: Instruction needs to be a friend here to call cloneImpl.
2596 friend class Instruction;
2597
2598 InsertValueInst *cloneImpl() const;
2599
2600public:
2601 // allocate space for exactly two operands
2602 void *operator new(size_t S) { return User::operator new(S, 2); }
2603 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2604
2605 static InsertValueInst *Create(Value *Agg, Value *Val,
2606 ArrayRef<unsigned> Idxs,
2607 const Twine &NameStr = "",
2608 Instruction *InsertBefore = nullptr) {
2609 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2610 }
2611
2612 static InsertValueInst *Create(Value *Agg, Value *Val,
2613 ArrayRef<unsigned> Idxs,
2614 const Twine &NameStr,
2615 BasicBlock *InsertAtEnd) {
2616 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2617 }
2618
2619 /// Transparently provide more efficient getOperand methods.
2621
2622 using idx_iterator = const unsigned*;
2623
2624 inline idx_iterator idx_begin() const { return Indices.begin(); }
2625 inline idx_iterator idx_end() const { return Indices.end(); }
2627 return make_range(idx_begin(), idx_end());
2628 }
2629
2631 return getOperand(0);
2632 }
2634 return getOperand(0);
2635 }
2636 static unsigned getAggregateOperandIndex() {
2637 return 0U; // get index for modifying correct operand
2638 }
2639
2641 return getOperand(1);
2642 }
2644 return getOperand(1);
2645 }
2647 return 1U; // get index for modifying correct operand
2648 }
2649
2651 return Indices;
2652 }
2653
2654 unsigned getNumIndices() const {
2655 return (unsigned)Indices.size();
2656 }
2657
2658 bool hasIndices() const {
2659 return true;
2660 }
2661
2662 // Methods for support type inquiry through isa, cast, and dyn_cast:
2663 static bool classof(const Instruction *I) {
2664 return I->getOpcode() == Instruction::InsertValue;
2665 }
2666 static bool classof(const Value *V) {
2667 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2668 }
2669};
2670
2671template <>
2673 public FixedNumOperandTraits<InsertValueInst, 2> {
2674};
2675
2676InsertValueInst::InsertValueInst(Value *Agg,
2677 Value *Val,
2678 ArrayRef<unsigned> Idxs,
2679 const Twine &NameStr,
2680 Instruction *InsertBefore)
2681 : Instruction(Agg->getType(), InsertValue,
2682 OperandTraits<InsertValueInst>::op_begin(this),
2683 2, InsertBefore) {
2684 init(Agg, Val, Idxs, NameStr);
2685}
2686
2687InsertValueInst::InsertValueInst(Value *Agg,
2688 Value *Val,
2689 ArrayRef<unsigned> Idxs,
2690 const Twine &NameStr,
2691 BasicBlock *InsertAtEnd)
2692 : Instruction(Agg->getType(), InsertValue,
2693 OperandTraits<InsertValueInst>::op_begin(this),
2694 2, InsertAtEnd) {
2695 init(Agg, Val, Idxs, NameStr);
2696}
2697
2698DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)
2699
2700//===----------------------------------------------------------------------===//
2701// PHINode Class
2702//===----------------------------------------------------------------------===//
2703
2704// PHINode - The PHINode class is used to represent the magical mystical PHI
2705// node, that can not exist in nature, but can be synthesized in a computer
2706// scientist's overactive imagination.
2707//
2708class PHINode : public Instruction {
2709 /// The number of operands actually allocated. NumOperands is
2710 /// the number actually in use.
2711 unsigned ReservedSpace;
2712
2713 PHINode(const PHINode &PN);
2714
2715 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2716 const Twine &NameStr = "",
2717 Instruction *InsertBefore = nullptr)
2718 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2719 ReservedSpace(NumReservedValues) {
2720 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
2721 setName(NameStr);
2722 allocHungoffUses(ReservedSpace);
2723 }
2724
2725 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2726 BasicBlock *InsertAtEnd)
2727 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2728 ReservedSpace(NumReservedValues) {
2729 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
2730 setName(NameStr);
2731 allocHungoffUses(ReservedSpace);
2732 }
2733
2734protected:
2735 // Note: Instruction needs to be a friend here to call cloneImpl.
2736 friend class Instruction;
2737
2738 PHINode *cloneImpl() const;
2739
2740 // allocHungoffUses - this is more complicated than the generic
2741 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2742 // values and pointers to the incoming blocks, all in one allocation.
2743 void allocHungoffUses(unsigned N) {
2744 User::allocHungoffUses(N, /* IsPhi */ true);
2745 }
2746
2747public:
2748 /// Constructors - NumReservedValues is a hint for the number of incoming
2749 /// edges that this phi node will have (use 0 if you really have no idea).
2750 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2751 const Twine &NameStr = "",
2752 Instruction *InsertBefore = nullptr) {
2753 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2754 }
2755
2756 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2757 const Twine &NameStr, BasicBlock *InsertAtEnd) {
2758 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2759 }
2760
2761 /// Provide fast operand accessors
2763
2764 // Block iterator interface. This provides access to the list of incoming
2765 // basic blocks, which parallels the list of incoming values.
2766 // Please note that we are not providing non-const iterators for blocks to
2767 // force all updates go through an interface function.
2768
2771
2773 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2774 }
2775
2777 return block_begin() + getNumOperands();
2778 }
2779
2781 return make_range(block_begin(), block_end());
2782 }
2783
2784 op_range incoming_values() { return operands(); }
2785
2786 const_op_range incoming_values() const { return operands(); }
2787
2788 /// Return the number of incoming edges
2789 ///
2790 unsigned getNumIncomingValues() const { return getNumOperands(); }
2791
2792 /// Return incoming value number x
2793 ///
2794 Value *getIncomingValue(unsigned i) const {
2795 return getOperand(i);
2796 }
2797 void setIncomingValue(unsigned i, Value *V) {
2798 assert(V && "PHI node got a null value!");
2799 assert(getType() == V->getType() &&
2800 "All operands to PHI node must be the same type as the PHI node!");
2801 setOperand(i, V);
2802 }
2803
2804 static unsigned getOperandNumForIncomingValue(unsigned i) {
2805 return i;
2806 }
2807
2808 static unsigned getIncomingValueNumForOperand(unsigned i) {
2809 return i;
2810 }
2811
2812 /// Return incoming basic block number @p i.
2813 ///
2814 BasicBlock *getIncomingBlock(unsigned i) const {
2815 return block_begin()[i];
2816 }
2817
2818 /// Return incoming basic block corresponding
2819 /// to an operand of the PHI.
2820 ///
2822 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
2823 return getIncomingBlock(unsigned(&U - op_begin()));
2824 }
2825
2826 /// Return incoming basic block corresponding
2827 /// to value use iterator.
2828 ///
2830 return getIncomingBlock(I.getUse());
2831 }
2832
2833 void setIncomingBlock(unsigned i, BasicBlock *BB) {
2834 const_cast<block_iterator>(block_begin())[i] = BB;
2835 }
2836
2837 /// Copies the basic blocks from \p BBRange to the incoming basic block list
2838 /// of this PHINode, starting at \p ToIdx.
2840 uint32_t ToIdx = 0) {
2841 copy(BBRange, const_cast<block_iterator>(block_begin()) + ToIdx);
2842 }
2843
2844 /// Replace every incoming basic block \p Old to basic block \p New.
2846 assert(New && Old && "PHI node got a null basic block!");
2847 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2848 if (getIncomingBlock(Op) == Old)
2849 setIncomingBlock(Op, New);
2850 }
2851
2852 /// Add an incoming value to the end of the PHI list
2853 ///
2855 if (getNumOperands() == ReservedSpace)
2856 growOperands(); // Get more space!
2857 // Initialize some new operands.
2858 setNumHungOffUseOperands(getNumOperands() + 1);
2859 setIncomingValue(getNumOperands() - 1, V);
2860 setIncomingBlock(getNumOperands() - 1, BB);
2861 }
2862
2863 /// Remove an incoming value. This is useful if a
2864 /// predecessor basic block is deleted. The value removed is returned.
2865 ///
2866 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2867 /// is true), the PHI node is destroyed and any uses of it are replaced with
2868 /// dummy values. The only time there should be zero incoming values to a PHI
2869 /// node is when the block is dead, so this strategy is sound.
2870 ///
2871 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2872
2873 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2874 int Idx = getBasicBlockIndex(BB);
2875 assert(Idx >= 0 && "Invalid basic block argument to remove!");
2876 return removeIncomingValue(Idx, DeletePHIIfEmpty);
2877 }
2878
2879 /// Return the first index of the specified basic
2880 /// block in the value list for this PHI. Returns -1 if no instance.
2881 ///
2882 int getBasicBlockIndex(const BasicBlock *BB) const {
2883 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2884 if (block_begin()[i] == BB)
2885 return i;
2886 return -1;
2887 }
2888
2890 int Idx = getBasicBlockIndex(BB);
2891 assert(Idx >= 0 && "Invalid basic block argument!");
2892 return getIncomingValue(Idx);
2893 }
2894
2895 /// Set every incoming value(s) for block \p BB to \p V.
2897 assert(BB && "PHI node got a null basic block!");
2898 bool Found = false;
2899 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2900 if (getIncomingBlock(Op) == BB) {
2901 Found = true;
2902 setIncomingValue(Op, V);
2903 }
2904 (void)Found;
2905 assert(Found && "Invalid basic block argument to set!");
2906 }
2907
2908 /// If the specified PHI node always merges together the
2909 /// same value, return the value, otherwise return null.
2910 Value *hasConstantValue() const;
2911
2912 /// Whether the specified PHI node always merges
2913 /// together the same value, assuming undefs are equal to a unique
2914 /// non-undef value.
2915 bool hasConstantOrUndefValue() const;
2916
2917 /// If the PHI node is complete which means all of its parent's predecessors
2918 /// have incoming value in this PHI, return true, otherwise return false.
2919 bool isComplete() const {
2921 [this](const BasicBlock *Pred) {
2922 return getBasicBlockIndex(Pred) >= 0;
2923 });
2924 }
2925
2926 /// Methods for support type inquiry through isa, cast, and dyn_cast:
2927 static bool classof(const Instruction *I) {
2928 return I->getOpcode() == Instruction::PHI;
2929 }
2930 static bool classof(const Value *V) {
2931 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2932 }
2933
2934private:
2935 void growOperands();
2936};
2937
2938template <>
2940};
2941
2943
2944//===----------------------------------------------------------------------===//
2945// LandingPadInst Class
2946//===----------------------------------------------------------------------===//
2947
2948//===---------------------------------------------------------------------------
2949/// The landingpad instruction holds all of the information
2950/// necessary to generate correct exception handling. The landingpad instruction
2951/// cannot be moved from the top of a landing pad block, which itself is
2952/// accessible only from the 'unwind' edge of an invoke. This uses the
2953/// SubclassData field in Value to store whether or not the landingpad is a
2954/// cleanup.
2955///
2957 using CleanupField = BoolBitfieldElementT<0>;
2958
2959 /// The number of operands actually allocated. NumOperands is
2960 /// the number actually in use.
2961 unsigned ReservedSpace;
2962
2963 LandingPadInst(const LandingPadInst &LP);
2964
2965public:
2967
2968private:
2969 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2970 const Twine &NameStr, Instruction *InsertBefore);
2971 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2972 const Twine &NameStr, BasicBlock *InsertAtEnd);
2973
2974 // Allocate space for exactly zero operands.
2975 void *operator new(size_t S) { return User::operator new(S); }
2976
2977 void growOperands(unsigned Size);
2978 void init(unsigned NumReservedValues, const Twine &NameStr);
2979
2980protected:
2981 // Note: Instruction needs to be a friend here to call cloneImpl.
2982 friend class Instruction;
2983
2984 LandingPadInst *cloneImpl() const;
2985
2986public:
2987 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2988
2989 /// Constructors - NumReservedClauses is a hint for the number of incoming
2990 /// clauses that this landingpad will have (use 0 if you really have no idea).
2991 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2992 const Twine &NameStr = "",
2993 Instruction *InsertBefore = nullptr);
2994 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2995 const Twine &NameStr, BasicBlock *InsertAtEnd);
2996
2997 /// Provide fast operand accessors
2999
3000 /// Return 'true' if this landingpad instruction is a
3001 /// cleanup. I.e., it should be run when unwinding even if its landing pad
3002 /// doesn't catch the exception.
3003 bool isCleanup() const { return getSubclassData<CleanupField>(); }
3004
3005 /// Indicate that this landingpad instruction is a cleanup.
3006 void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
3007
3008 /// Add a catch or filter clause to the landing pad.
3009 void addClause(Constant *ClauseVal);
3010
3011 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
3012 /// determine what type of clause this is.
3013 Constant *getClause(unsigned Idx) const {
3014 return cast<Constant>(getOperandList()[Idx]);
3015 }
3016
3017 /// Return 'true' if the clause and index Idx is a catch clause.
3018 bool isCatch(unsigned Idx) const {
3019 return !isa<ArrayType>(getOperandList()[Idx]->getType());
3020 }
3021
3022 /// Return 'true' if the clause and index Idx is a filter clause.
3023 bool isFilter(unsigned Idx) const {
3024 return isa<ArrayType>(getOperandList()[Idx]->getType());
3025 }
3026
3027 /// Get the number of clauses for this landing pad.
3028 unsigned getNumClauses() const { return getNumOperands(); }
3029
3030 /// Grow the size of the operand list to accommodate the new
3031 /// number of clauses.
3032 void reserveClauses(unsigned Size) { growOperands(Size); }
3033
3034 // Methods for support type inquiry through isa, cast, and dyn_cast:
3035 static bool classof(const Instruction *I) {
3036 return I->getOpcode() == Instruction::LandingPad;
3037 }
3038 static bool classof(const Value *V) {
3039 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3040 }
3041};
3042
3043template <>
3045};
3046
3048
3049//===----------------------------------------------------------------------===//
3050// ReturnInst Class
3051//===----------------------------------------------------------------------===//
3052
3053//===---------------------------------------------------------------------------
3054/// Return a value (possibly void), from a function. Execution
3055/// does not continue in this function any longer.
3056///
3057class ReturnInst : public Instruction {
3058 ReturnInst(const ReturnInst &RI);
3059
3060private:
3061 // ReturnInst constructors:
3062 // ReturnInst() - 'ret void' instruction
3063 // ReturnInst( null) - 'ret void' instruction
3064 // ReturnInst(Value* X) - 'ret X' instruction
3065 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
3066 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
3067 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
3068 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
3069 //
3070 // NOTE: If the Value* passed is of type void then the constructor behaves as
3071 // if it was passed NULL.
3072 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
3073 Instruction *InsertBefore = nullptr);
3074 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
3075 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
3076
3077protected:
3078 // Note: Instruction needs to be a friend here to call cloneImpl.
3079 friend class Instruction;
3080
3081 ReturnInst *cloneImpl() const;
3082
3083public:
3084 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
3085 Instruction *InsertBefore = nullptr) {
3086 return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
3087 }
3088
3090 BasicBlock *InsertAtEnd) {
3091 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
3092 }
3093
3094 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
3095 return new(0) ReturnInst(C, InsertAtEnd);
3096 }
3097
3098 /// Provide fast operand accessors
3100
3101 /// Convenience accessor. Returns null if there is no return value.
3103 return getNumOperands() != 0 ? getOperand(0) : nullptr;
3104 }
3105
3106 unsigned getNumSuccessors() const { return 0; }
3107
3108 // Methods for support type inquiry through isa, cast, and dyn_cast:
3109 static bool classof(const Instruction *I) {
3110 return (I->getOpcode() == Instruction::Ret);
3111 }
3112 static bool classof(const Value *V) {
3113 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3114 }
3115
3116private:
3117 BasicBlock *getSuccessor(unsigned idx) const {
3118 llvm_unreachable("ReturnInst has no successors!");
3119 }
3120
3121 void setSuccessor(unsigned idx, BasicBlock *B) {
3122 llvm_unreachable("ReturnInst has no successors!");
3123 }
3124};
3125
3126template <>
3127struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3128};
3129
3131
3132//===----------------------------------------------------------------------===//
3133// BranchInst Class
3134//===----------------------------------------------------------------------===//
3135
3136//===---------------------------------------------------------------------------
3137/// Conditional or Unconditional Branch instruction.
3138///
3139class BranchInst : public Instruction {
3140 /// Ops list - Branches are strange. The operands are ordered:
3141 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3142 /// they don't have to check for cond/uncond branchness. These are mostly
3143 /// accessed relative from op_end().
3144 BranchInst(const BranchInst &BI);
3145 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3146 // BranchInst(BB *B) - 'br B'
3147 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3148 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3149 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3150 // BranchInst(BB* B, BB *I) - 'br B' insert at end
3151 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3152 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3153 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3154 Instruction *InsertBefore = nullptr);
3155 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3156 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3157 BasicBlock *InsertAtEnd);
3158
3159 void AssertOK();
3160
3161protected:
3162 // Note: Instruction needs to be a friend here to call cloneImpl.
3163 friend class Instruction;
3164
3165 BranchInst *cloneImpl() const;
3166
3167public:
3168 /// Iterator type that casts an operand to a basic block.
3169 ///
3170 /// This only makes sense because the successors are stored as adjacent
3171 /// operands for branch instructions.
3173 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3174 std::random_access_iterator_tag, BasicBlock *,
3175 ptrdiff_t, BasicBlock *, BasicBlock *> {
3177
3178 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3179 BasicBlock *operator->() const { return operator*(); }
3180 };
3181
3182 /// The const version of `succ_op_iterator`.
3184 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3185 std::random_access_iterator_tag,
3186 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3187 const BasicBlock *> {
3190
3191 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3192 const BasicBlock *operator->() const { return operator*(); }
3193 };
3194
3196 Instruction *InsertBefore = nullptr) {
3197 return new(1) BranchInst(IfTrue, InsertBefore);
3198 }
3199
3200 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3201 Value *Cond, Instruction *InsertBefore = nullptr) {
3202 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3203 }
3204
3205 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3206 return new(1) BranchInst(IfTrue, InsertAtEnd);
3207 }
3208
3209 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3210 Value *Cond, BasicBlock *InsertAtEnd) {
3211 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3212 }
3213
3214 /// Transparently provide more efficient getOperand methods.
3216
3217 bool isUnconditional() const { return getNumOperands() == 1; }
3218 bool isConditional() const { return getNumOperands() == 3; }
3219
3221 assert(isConditional() && "Cannot get condition of an uncond branch!");
3222 return Op<-3>();
3223 }
3224
3226 assert(isConditional() && "Cannot set condition of unconditional branch!");
3227 Op<-3>() = V;
3228 }
3229
3230 unsigned getNumSuccessors() const { return 1+isConditional(); }
3231
3232 BasicBlock *getSuccessor(unsigned i) const {
3233 assert(i < getNumSuccessors() && "Successor # out of range for Branch!");
3234 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3235 }
3236
3237 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3238 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!");
3239 *(&Op<-1>() - idx) = NewSucc;
3240 }
3241
3242 /// Swap the successors of this branch instruction.
3243 ///
3244 /// Swaps the successors of the branch instruction. This also swaps any
3245 /// branch weight metadata associated with the instruction so that it
3246 /// continues to map correctly to each operand.
3247 void swapSuccessors();
3248
3250 return make_range(
3251 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3252 succ_op_iterator(value_op_end()));
3253 }
3254
3257 std::next(value_op_begin(), isConditional() ? 1 : 0)),
3258 const_succ_op_iterator(value_op_end()));
3259 }
3260
3261 // Methods for support type inquiry through isa, cast, and dyn_cast:
3262 static bool classof(const Instruction *I) {
3263 return (I->getOpcode() == Instruction::Br);
3264 }
3265 static bool classof(const Value *V) {
3266 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3267 }
3268};
3269
3270template <>
3271struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3272};
3273
3275
3276//===----------------------------------------------------------------------===//
3277// SwitchInst Class
3278//===----------------------------------------------------------------------===//
3279
3280//===---------------------------------------------------------------------------
3281/// Multiway switch
3282///
3283class SwitchInst : public Instruction {
3284 unsigned ReservedSpace;
3285
3286 // Operand[0] = Value to switch on
3287 // Operand[1] = Default basic block destination
3288 // Operand[2n ] = Value to match
3289 // Operand[2n+1] = BasicBlock to go to on match
3290 SwitchInst(const SwitchInst &SI);
3291
3292 /// Create a new switch instruction, specifying a value to switch on and a
3293 /// default destination. The number of additional cases can be specified here
3294 /// to make memory allocation more efficient. This constructor can also
3295 /// auto-insert before another instruction.
3296 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3297 Instruction *InsertBefore);
3298
3299 /// Create a new switch instruction, specifying a value to switch on and a
3300 /// default destination. The number of additional cases can be specified here
3301 /// to make memory allocation more efficient. This constructor also
3302 /// auto-inserts at the end of the specified BasicBlock.
3303 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3304 BasicBlock *InsertAtEnd);
3305
3306 // allocate space for exactly zero operands
3307 void *operator new(size_t S) { return User::operator new(S); }
3308
3309 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3310 void growOperands();
3311
3312protected:
3313 // Note: Instruction needs to be a friend here to call cloneImpl.
3314 friend class Instruction;
3315
3316 SwitchInst *cloneImpl() const;
3317
3318public:
3319 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3320
3321 // -2
3322 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3323
3324 template <typename CaseHandleT> class CaseIteratorImpl;
3325
3326 /// A handle to a particular switch case. It exposes a convenient interface
3327 /// to both the case value and the successor block.
3328 ///
3329 /// We define this as a template and instantiate it to form both a const and
3330 /// non-const handle.
3331 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3333 // Directly befriend both const and non-const iterators.
3334 friend class SwitchInst::CaseIteratorImpl<
3335 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3336
3337 protected:
3338 // Expose the switch type we're parameterized with to the iterator.
3339 using SwitchInstType = SwitchInstT;
3340
3341 SwitchInstT *SI;
3343
3344 CaseHandleImpl() = default;
3346
3347 public:
3348 /// Resolves case value for current case.
3349 ConstantIntT *getCaseValue() const {
3350 assert((unsigned)Index < SI->getNumCases() &&
3351 "Index out the number of cases.");
3352 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3353 }
3354
3355 /// Resolves successor for current case.
3356 BasicBlockT *getCaseSuccessor() const {
3357 assert(((unsigned)Index < SI->getNumCases() ||
3358 (unsigned)Index == DefaultPseudoIndex) &&
3359 "Index out the number of cases.");
3360 return SI->getSuccessor(getSuccessorIndex());
3361 }
3362
3363 /// Returns number of current case.
3364 unsigned getCaseIndex() const { return Index; }
3365
3366 /// Returns successor index for current case successor.
3367 unsigned getSuccessorIndex() const {
3368 assert(((unsigned)Index == DefaultPseudoIndex ||
3369 (unsigned)Index < SI->getNumCases()) &&
3370 "Index out the number of cases.");
3371 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3372 }
3373
3374 bool operator==(const CaseHandleImpl &RHS) const {
3375 assert(SI == RHS.SI && "Incompatible operators.");
3376 return Index == RHS.Index;
3377 }
3378 };
3379
3382
3384 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3386
3387 public:
3389
3390 /// Sets the new value for current case.
3391 void setValue(ConstantInt *V) const {
3392 assert((unsigned)Index < SI->getNumCases() &&
3393 "Index out the number of cases.");
3394 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3395 }
3396
3397 /// Sets the new successor for current case.
3398 void setSuccessor(BasicBlock *S) const {
3399 SI->setSuccessor(getSuccessorIndex(), S);
3400 }
3401 };
3402
3403 template <typename CaseHandleT>
3405 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3406 std::random_access_iterator_tag,
3407 const CaseHandleT> {
3408 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3409
3410 CaseHandleT Case;
3411
3412 public:
3413 /// Default constructed iterator is in an invalid state until assigned to
3414 /// a case for a particular switch.
3415 CaseIteratorImpl() = default;
3416
3417 /// Initializes case iterator for given SwitchInst and for given
3418 /// case number.
3419 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3420
3421 /// Initializes case iterator for given SwitchInst and for given
3422 /// successor index.
3424 unsigned SuccessorIndex) {
3425 assert(SuccessorIndex < SI->getNumSuccessors() &&
3426 "Successor index # out of range!");
3427 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3428 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3429 }
3430
3431 /// Support converting to the const variant. This will be a no-op for const
3432 /// variant.
3434 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3435 }
3436
3438 // Check index correctness after addition.
3439 // Note: Index == getNumCases() means end().
3440 assert(Case.Index + N >= 0 &&
3441 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
3442 "Case.Index out the number of cases.");
3443 Case.Index += N;
3444 return *this;
3445 }
3447 // Check index correctness after subtraction.
3448 // Note: Case.Index == getNumCases() means end().
3449 assert(Case.Index - N >= 0 &&
3450 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
3451 "Case.Index out the number of cases.");
3452 Case.Index -= N;
3453 return *this;
3454 }
3456 assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3457 return Case.Index - RHS.Case.Index;
3458 }
3459 bool operator==(const CaseIteratorImpl &RHS) const {
3460 return Case == RHS.Case;
3461 }
3462 bool operator<(const CaseIteratorImpl &RHS) const {
3463 assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3464 return Case.Index < RHS.Case.Index;
3465 }
3466 const CaseHandleT &operator*() const { return Case; }
3467 };
3468
3471
3473 unsigned NumCases,
3474 Instruction *InsertBefore = nullptr) {
3475 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3476 }
3477
3479 unsigned NumCases, BasicBlock *InsertAtEnd) {
3480 return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3481 }
3482
3483 /// Provide fast operand accessors
3485
3486 // Accessor Methods for Switch stmt
3487 Value *getCondition() const { return getOperand(0); }
3488 void setCondition(Value *V) { setOperand(0, V); }
3489
3491 return cast<BasicBlock>(getOperand(1));
3492 }
3493
3494 void setDefaultDest(BasicBlock *DefaultCase) {
3495 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3496 }
3497
3498 /// Return the number of 'cases' in this switch instruction, excluding the
3499 /// default case.
3500 unsigned getNumCases() const {
3501 return getNumOperands()/2 - 1;
3502 }
3503
3504 /// Returns a read/write iterator that points to the first case in the
3505 /// SwitchInst.
3507 return CaseIt(this, 0);
3508 }
3509
3510 /// Returns a read-only iterator that points to the first case in the
3511 /// SwitchInst.
3513 return ConstCaseIt(this, 0);
3514 }
3515
3516 /// Returns a read/write iterator that points one past the last in the
3517 /// SwitchInst.
3519 return CaseIt(this, getNumCases());
3520 }
3521
3522 /// Returns a read-only iterator that points one past the last in the
3523 /// SwitchInst.
3525 return ConstCaseIt(this, getNumCases());
3526 }
3527
3528 /// Iteration adapter for range-for loops.
3530 return make_range(case_begin(), case_end());
3531 }
3532
3533 /// Constant iteration adapter for range-for loops.
3535 return make_range(case_begin(), case_end());
3536 }
3537
3538 /// Returns an iterator that points to the default case.
3539 /// Note: this iterator allows to resolve successor only. Attempt
3540 /// to resolve case value causes an assertion.
3541 /// Also note, that increment and decrement also causes an assertion and
3542 /// makes iterator invalid.
3544 return CaseIt(this, DefaultPseudoIndex);
3545 }
3547 return ConstCaseIt(this, DefaultPseudoIndex);
3548 }
3549
3550 /// Search all of the case values for the specified constant. If it is
3551 /// explicitly handled, return the case iterator of it, otherwise return
3552 /// default case iterator to indicate that it is handled by the default
3553 /// handler.
3555 return CaseIt(
3556 this,
3557 const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex());
3558 }
3560 ConstCaseIt I = llvm::find_if(cases(), [C](const ConstCaseHandle &Case) {
3561 return Case.getCaseValue() == C;
3562 });
3563 if (I != case_end())
3564 return I;
3565
3566 return case_default();
3567 }
3568
3569 /// Finds the unique case value for a given successor. Returns null if the
3570 /// successor is not found, not unique, or is the default case.
3572 if (BB == getDefaultDest())
3573 return nullptr;
3574
3575 ConstantInt *CI = nullptr;
3576 for (auto Case : cases()) {
3577 if (Case.getCaseSuccessor() != BB)
3578 continue;
3579
3580 if (CI)
3581 return nullptr; // Multiple cases lead to BB.
3582
3583 CI = Case.getCaseValue();
3584 }
3585
3586 return CI;
3587 }
3588
3589 /// Add an entry to the switch instruction.
3590 /// Note:
3591 /// This action invalidates case_end(). Old case_end() iterator will
3592 /// point to the added case.
3593 void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3594
3595 /// This method removes the specified case and its successor from the switch
3596 /// instruction. Note that this operation may reorder the remaining cases at
3597 /// index idx and above.
3598 /// Note:
3599 /// This action invalidates iterators for all cases following the one removed,
3600 /// including the case_end() iterator. It returns an iterator for the next
3601 /// case.
3602 CaseIt removeCase(CaseIt I);
3603
3604 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3605 BasicBlock *getSuccessor(unsigned idx) const {
3606 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!");
3607 return cast<BasicBlock>(getOperand(idx*2+1));
3608 }
3609 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3610 assert(idx < getNumSuccessors() && "Successor # out of range for switch!");
3611 setOperand(idx * 2 + 1, NewSucc);
3612 }
3613
3614 // Methods for support type inquiry through isa, cast, and dyn_cast:
3615 static bool classof(const Instruction *I) {
3616 return I->getOpcode() == Instruction::Switch;
3617 }
3618 static bool classof(const Value *V) {
3619 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3620 }
3621};
3622
3623/// A wrapper class to simplify modification of SwitchInst cases along with
3624/// their prof branch_weights metadata.
3626 SwitchInst &SI;
3627 std::optional<SmallVector<uint32_t, 8>> Weights;
3628 bool Changed = false;
3629
3630protected:
3632
3633 void init();
3634
3635public:
3636 using CaseWeightOpt = std::optional<uint32_t>;
3637 SwitchInst *operator->() { return &SI; }
3638 SwitchInst &operator*() { return SI; }
3639 operator SwitchInst *() { return &SI; }
3640
3642
3644 if (Changed)
3645 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3646 }
3647
3648 /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3649 /// correspondent branch weight.
3651
3652 /// Delegate the call to the underlying SwitchInst::addCase() and set the
3653 /// specified branch weight for the added case.
3654 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3655
3656 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3657 /// this object to not touch the underlying SwitchInst in destructor.
3659
3660 void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3661 CaseWeightOpt getSuccessorWeight(unsigned idx);
3662
3663 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3664};
3665
3666template <>
3668};
3669
3671
3672//===----------------------------------------------------------------------===//
3673// IndirectBrInst Class
3674//===----------------------------------------------------------------------===//
3675
3676//===---------------------------------------------------------------------------
3677/// Indirect Branch Instruction.
3678///
3680 unsigned ReservedSpace;
3681
3682 // Operand[0] = Address to jump to
3683 // Operand[n+1] = n-th destination
3684 IndirectBrInst(const IndirectBrInst &IBI);
3685
3686 /// Create a new indirectbr instruction, specifying an
3687 /// Address to jump to. The number of expected destinations can be specified
3688 /// here to make memory allocation more efficient. This constructor can also
3689 /// autoinsert before another instruction.
3690 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3691
3692 /// Create a new indirectbr instruction, specifying an
3693 /// Address to jump to. The number of expected destinations can be specified
3694 /// here to make memory allocation more efficient. This constructor also
3695 /// autoinserts at the end of the specified BasicBlock.
3696 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3697
3698 // allocate space for exactly zero operands
3699 void *operator new(size_t S) { return User::operator new(S); }
3700
3701 void init(Value *Address, unsigned NumDests);
3702 void growOperands();
3703
3704protected:
3705 // Note: Instruction needs to be a friend here to call cloneImpl.
3706 friend class Instruction;
3707
3708 IndirectBrInst *cloneImpl() const;
3709
3710public:
3711 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3712
3713 /// Iterator type that casts an operand to a basic block.
3714 ///
3715 /// This only makes sense because the successors are stored as adjacent
3716 /// operands for indirectbr instructions.
3718 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3719 std::random_access_iterator_tag, BasicBlock *,
3720 ptrdiff_t, BasicBlock *, BasicBlock *> {
3722
3723 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3724 BasicBlock *operator->() const { return operator*(); }
3725 };
3726
3727 /// The const version of `succ_op_iterator`.
3729 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3730 std::random_access_iterator_tag,
3731 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3732 const BasicBlock *> {
3735
3736 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3737 const BasicBlock *operator->() const { return operator*(); }
3738 };
3739
3740 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3741 Instruction *InsertBefore = nullptr) {
3742 return new IndirectBrInst(Address, NumDests, InsertBefore);
3743 }
3744
3745 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3746 BasicBlock *InsertAtEnd) {
3747 return new IndirectBrInst(Address, NumDests, InsertAtEnd);
3748 }
3749
3750 /// Provide fast operand accessors.
3752
3753 // Accessor Methods for IndirectBrInst instruction.
3754 Value *getAddress() { return getOperand(0); }
3755 const Value *getAddress() const { return getOperand(0); }
3756 void setAddress(Value *V) { setOperand(0, V); }
3757
3758 /// return the number of possible destinations in this
3759 /// indirectbr instruction.
3760 unsigned getNumDestinations() const { return getNumOperands()-1; }
3761
3762 /// Return the specified destination.
3763 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3764 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3765
3766 /// Add a destination.
3767 ///
3768 void addDestination(BasicBlock *Dest);
3769
3770 /// This method removes the specified successor from the
3771 /// indirectbr instruction.
3772 void removeDestination(unsigned i);
3773
3774 unsigned getNumSuccessors() const { return getNumOperands()-1; }
3775 BasicBlock *getSuccessor(unsigned i) const {
3776 return cast<BasicBlock>(getOperand(i+1));
3777 }
3778 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3779 setOperand(i + 1, NewSucc);
3780 }
3781
3783 return make_range(succ_op_iterator(std::next(value_op_begin())),
3784 succ_op_iterator(value_op_end()));
3785 }
3786
3788 return make_range(const_succ_op_iterator(std::next(value_op_begin())),
3789 const_succ_op_iterator(value_op_end()));
3790 }
3791
3792 // Methods for support type inquiry through isa, cast, and dyn_cast:
3793 static bool classof(const Instruction *I) {
3794 return I->getOpcode() == Instruction::IndirectBr;
3795 }
3796 static bool classof(const Value *V) {
3797 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3798 }
3799};
3800
3801template <>
3803};
3804
3806
3807//===----------------------------------------------------------------------===//
3808// InvokeInst Class
3809//===----------------------------------------------------------------------===//
3810
3811/// Invoke instruction. The SubclassData field is used to hold the
3812/// calling convention of the call.
3813///
3814class InvokeInst : public CallBase {
3815 /// The number of operands for this call beyond the called function,
3816 /// arguments, and operand bundles.
3817 static constexpr int NumExtraOperands = 2;
3818
3819 /// The index from the end of the operand array to the normal destination.
3820 static constexpr int NormalDestOpEndIdx = -3;
3821
3822 /// The index from the end of the operand array to the unwind destination.
3823 static constexpr int UnwindDestOpEndIdx = -2;
3824
3825 InvokeInst(const InvokeInst &BI);
3826
3827 /// Construct an InvokeInst given a range of arguments.
3828 ///
3829 /// Construct an InvokeInst from a range of arguments
3830 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3831 BasicBlock *IfException, ArrayRef<Value *> Args,
3832 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3833 const Twine &NameStr, Instruction *InsertBefore);
3834
3835 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3836 BasicBlock *IfException, ArrayRef<Value *> Args,
3837 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3838 const Twine &NameStr, BasicBlock *InsertAtEnd);
3839
3840 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3841 BasicBlock *IfException, ArrayRef<Value *> Args,
3842 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3843
3844 /// Compute the number of operands to allocate.
3845 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
3846 // We need one operand for the called function, plus our extra operands and
3847 // the input operand counts provided.
3848 return 1 + NumExtraOperands + NumArgs + NumBundleInputs;
3849 }
3850
3851protected:
3852 // Note: Instruction needs to be a friend here to call cloneImpl.
3853 friend class Instruction;
3854
3855 InvokeInst *cloneImpl() const;
3856
3857public:
3858 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3859 BasicBlock *IfException, ArrayRef<Value *> Args,
3860 const Twine &NameStr,
3861 Instruction *InsertBefore = nullptr) {
3862 int NumOperands = ComputeNumOperands(Args.size());
3863 return new (NumOperands)
3864 InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt,
3865 NumOperands, NameStr, InsertBefore);
3866 }
3867
3868 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3869 BasicBlock *IfException, ArrayRef<Value *> Args,
3870 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
3871 const Twine &NameStr = "",
3872 Instruction *InsertBefore = nullptr) {
3873 int NumOperands =
3874 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3875 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3876
3877 return new (NumOperands, DescriptorBytes)
3878 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3879 NameStr, InsertBefore);
3880 }
3881
3882 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3883 BasicBlock *IfException, ArrayRef<Value *> Args,
3884 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3885 int NumOperands = ComputeNumOperands(Args.size());
3886 return new (NumOperands)
3887 InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt,
3888 NumOperands, NameStr, InsertAtEnd);
3889 }
3890
3891 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3892 BasicBlock *IfException, ArrayRef<Value *> Args,
3894 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3895 int NumOperands =
3896 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3897 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3898
3899 return new (NumOperands, DescriptorBytes)
3900 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3901 NameStr, InsertAtEnd);
3902 }
3903
3905 BasicBlock *IfException, ArrayRef<Value *> Args,
3906 const Twine &NameStr,
3907 Instruction *InsertBefore = nullptr) {
3908 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3909 IfException, Args, std::nullopt, NameStr, InsertBefore);
3910 }
3911
3913 BasicBlock *IfException, ArrayRef<Value *> Args,
3914 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
3915 const Twine &NameStr = "",
3916 Instruction *InsertBefore = nullptr) {
3917 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3918 IfException, Args, Bundles, NameStr, InsertBefore);
3919 }
3920
3922 BasicBlock *IfException, ArrayRef<Value *> Args,
3923 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3924 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3925 IfException, Args, NameStr, InsertAtEnd);
3926 }
3927
3929 BasicBlock *IfException, ArrayRef<Value *> Args,
3931 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3932 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3933 IfException, Args, Bundles, NameStr, InsertAtEnd);
3934 }
3935
3936 /// Create a clone of \p II with a different set of operand bundles and
3937 /// insert it before \p InsertPt.
3938 ///
3939 /// The returned invoke instruction is identical to \p II in every way except
3940 /// that the operand bundles for the new instruction are set to the operand
3941 /// bundles in \p Bundles.
3942 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3943 Instruction *InsertPt = nullptr);
3944
3945 // get*Dest - Return the destination basic blocks...
3947 return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
3948 }
3950 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
3951 }
3953 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3954 }
3956 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3957 }
3958
3959 /// Get the landingpad instruction from the landing pad
3960 /// block (the unwind destination).
3961 LandingPadInst *getLandingPadInst() const;
3962
3963 BasicBlock *getSuccessor(unsigned i) const {
3964 assert(i < 2 && "Successor # out of range for invoke!");
3965 return i == 0 ? getNormalDest() : getUnwindDest();
3966 }
3967
3968 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3969 assert(i < 2 && "Successor # out of range for invoke!");
3970 if (i == 0)
3971 setNormalDest(NewSucc);
3972 else
3973 setUnwindDest(NewSucc);
3974 }
3975
3976 unsigned getNumSuccessors() const { return 2; }
3977
3978 // Methods for support type inquiry through isa, cast, and dyn_cast:
3979 static bool classof(const Instruction *I) {
3980 return (I->getOpcode() == Instruction::Invoke);
3981 }
3982 static bool classof(const Value *V) {
3983 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3984 }
3985
3986private:
3987 // Shadow Instruction::setInstructionSubclassData with a private forwarding
3988 // method so that subclasses cannot accidentally use it.
3989 template <typename Bitfield>
3990 void setSubclassData(typename Bitfield::Type Value) {
3991 Instruction::setSubclassData<Bitfield>(Value);
3992 }
3993};
3994
3995InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3996 BasicBlock *IfException, ArrayRef<Value *> Args,
3997 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3998 const Twine &NameStr, Instruction *InsertBefore)
3999 : CallBase(Ty->getReturnType(), Instruction::Invoke,
4000 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4001 InsertBefore) {
4002 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
4003}
4004
4005InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4006 BasicBlock *IfException, ArrayRef<Value *> Args,
4007 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4008 const Twine &NameStr, BasicBlock *InsertAtEnd)
4009 : CallBase(Ty->getReturnType(), Instruction::Invoke,
4010 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4011 InsertAtEnd) {
4012 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
4013}
4014
4015//===----------------------------------------------------------------------===//
4016// CallBrInst Class
4017//===----------------------------------------------------------------------===//
4018
4019/// CallBr instruction, tracking function calls that may not return control but
4020/// instead transfer it to a third location. The SubclassData field is used to
4021/// hold the calling convention of the call.
4022///
4023class CallBrInst : public CallBase {
4024
4025 unsigned NumIndirectDests;
4026
4027 CallBrInst(const CallBrInst &BI);
4028
4029 /// Construct a CallBrInst given a range of arguments.
4030 ///
4031 /// Construct a CallBrInst from a range of arguments
4032 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4033 ArrayRef<BasicBlock *> IndirectDests,
4034 ArrayRef<Value *> Args,
4035 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4036 const Twine &NameStr, Instruction *InsertBefore);
4037
4038 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4039 ArrayRef<BasicBlock *> IndirectDests,
4040 ArrayRef<Value *> Args,
4041 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4042 const Twine &NameStr, BasicBlock *InsertAtEnd);
4043
4044 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
4045 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
4046 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
4047
4048 /// Compute the number of operands to allocate.
4049 static int ComputeNumOperands(int NumArgs, int NumIndirectDests,
4050 int NumBundleInputs = 0) {
4051 // We need one operand for the called function, plus our extra operands and
4052 // the input operand counts provided.
4053 return 2 + NumIndirectDests + NumArgs + NumBundleInputs;
4054 }
4055
4056protected:
4057 // Note: Instruction needs to be a friend here to call cloneImpl.
4058 friend class Instruction;
4059
4060 CallBrInst *cloneImpl() const;
4061
4062public:
4064 BasicBlock *DefaultDest,
4065 ArrayRef<BasicBlock *> IndirectDests,
4066 ArrayRef<Value *> Args, const Twine &NameStr,
4067 Instruction *InsertBefore = nullptr) {
4068 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4069 return new (NumOperands)
4070 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, std::nullopt,
4071 NumOperands, NameStr, InsertBefore);
4072 }
4073
4074 static CallBrInst *
4075 Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4076 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
4077 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
4078 const Twine &NameStr = "", Instruction *InsertBefore = nullptr) {
4079 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4080 CountBundleInputs(Bundles));
4081 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4082
4083 return new (NumOperands, DescriptorBytes)
4084 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4085 NumOperands, NameStr, InsertBefore);
4086 }
4087
4089 BasicBlock *DefaultDest,
4090 ArrayRef<BasicBlock *> IndirectDests,
4091 ArrayRef<Value *> Args, const Twine &NameStr,
4092 BasicBlock *InsertAtEnd) {
4093 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4094 return new (NumOperands)
4095 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, std::nullopt,
4096 NumOperands, NameStr, InsertAtEnd);
4097 }
4098
4100 BasicBlock *DefaultDest,
4101 ArrayRef<BasicBlock *> IndirectDests,
4102 ArrayRef<Value *> Args,
4104 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4105 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4106 CountBundleInputs(Bundles));
4107 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4108
4109 return new (NumOperands, DescriptorBytes)
4110 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4111 NumOperands, NameStr, InsertAtEnd);
4112 }
4113
4114 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4115 ArrayRef<BasicBlock *> IndirectDests,
4116 ArrayRef<Value *> Args, const Twine &NameStr,
4117 Instruction *InsertBefore = nullptr) {
4118 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4119 IndirectDests, Args, NameStr, InsertBefore);
4120 }
4121
4122 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4123 ArrayRef<BasicBlock *> IndirectDests,
4124 ArrayRef<Value *> Args,
4125 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
4126 const Twine &NameStr = "",
4127 Instruction *InsertBefore = nullptr) {
4128 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4129 IndirectDests, Args, Bundles, NameStr, InsertBefore);
4130 }
4131
4132 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4133 ArrayRef<BasicBlock *> IndirectDests,
4134 ArrayRef<Value *> Args, const Twine &NameStr,
4135 BasicBlock *InsertAtEnd) {
4136 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4137 IndirectDests, Args, NameStr, InsertAtEnd);
4138 }
4139
4141 BasicBlock *DefaultDest,
4142 ArrayRef<BasicBlock *> IndirectDests,
4143 ArrayRef<Value *> Args,
4145 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4146 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4147 IndirectDests, Args, Bundles, NameStr, InsertAtEnd);
4148 }
4149
4150 /// Create a clone of \p CBI with a different set of operand bundles and
4151 /// insert it before \p InsertPt.
4152 ///
4153 /// The returned callbr instruction is identical to \p CBI in every way
4154 /// except that the operand bundles for the new instruction are set to the
4155 /// operand bundles in \p Bundles.
4156 static CallBrInst *Create(CallBrInst *CBI,
4158 Instruction *InsertPt = nullptr);
4159
4160 /// Return the number of callbr indirect dest labels.
4161 ///
4162 unsigned getNumIndirectDests() const { return NumIndirectDests; }
4163
4164 /// getIndirectDestLabel - Return the i-th indirect dest label.
4165 ///
4166 Value *getIndirectDestLabel(unsigned i) const {
4167 assert(i < getNumIndirectDests() && "Out of bounds!");
4168 return getOperand(i + arg_size() + getNumTotalBundleOperands() + 1);
4169 }
4170
4171 Value *getIndirectDestLabelUse(unsigned i) const {
4172 assert(i < getNumIndirectDests() && "Out of bounds!");
4173 return getOperandUse(i + arg_size() + getNumTotalBundleOperands() + 1);
4174 }
4175
4176 // Return the destination basic blocks...
4178 return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1));
4179 }
4180 BasicBlock *getIndirectDest(unsigned i) const {
4181 return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
4182 }
4184 SmallVector<BasicBlock *, 16> IndirectDests;
4185 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
4186 IndirectDests.push_back(getIndirectDest(i));
4187 return IndirectDests;
4188 }
4190 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
4191 }
4192 void setIndirectDest(unsigned i, BasicBlock *B) {
4193 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
4194 }
4195
4196 BasicBlock *getSuccessor(unsigned i) const {
4197 assert(i < getNumSuccessors() + 1 &&
4198 "Successor # out of range for callbr!");
4199 return i == 0 ? getDefaultDest() : getIndirectDest(i - 1);
4200 }
4201
4202 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
4203 assert(i < getNumIndirectDests() + 1 &&
4204 "Successor # out of range for callbr!");
4205 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc);
4206 }
4207
4208 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
4209
4210 // Methods for support type inquiry through isa, cast, and dyn_cast:
4211 static bool classof(const Instruction *I) {
4212 return (I->getOpcode() == Instruction::CallBr);
4213 }
4214 static bool classof(const Value *V) {
4215 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4216 }
4217
4218private:
4219 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4220 // method so that subclasses cannot accidentally use it.
4221 template <typename Bitfield>
4222 void setSubclassData(typename Bitfield::Type Value) {
4223 Instruction::setSubclassData<Bitfield>(Value);
4224 }
4225};
4226
4227CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4228 ArrayRef<BasicBlock *> IndirectDests,
4229 ArrayRef<Value *> Args,
4230 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4231 const Twine &NameStr, Instruction *InsertBefore)
4232 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4233 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4234 InsertBefore) {
4235 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4236}
4237
4238CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4239 ArrayRef<BasicBlock *> IndirectDests,
4240 ArrayRef<Value *> Args,
4241 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4242 const Twine &NameStr, BasicBlock *InsertAtEnd)
4243 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4244 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4245 InsertAtEnd) {
4246 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4247}
4248
4249//===----------------------------------------------------------------------===//
4250// ResumeInst Class
4251//===----------------------------------------------------------------------===//
4252
4253//===---------------------------------------------------------------------------
4254/// Resume the propagation of an exception.
4255///
4256class ResumeInst : public Instruction {
4257 ResumeInst(const ResumeInst &RI);
4258
4259 explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
4260 ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
4261
4262protected:
4263 // Note: Instruction needs to be a friend here to call cloneImpl.
4264 friend class Instruction;
4265
4266 ResumeInst *cloneImpl() const;
4267
4268public:
4269 static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
4270 return new(1) ResumeInst(Exn, InsertBefore);
4271 }
4272
4273 static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
4274 return new(1) ResumeInst(Exn, InsertAtEnd);
4275 }
4276
4277 /// Provide fast operand accessors
4279
4280 /// Convenience accessor.
4281 Value *getValue() const { return Op<0>(); }
4282
4283 unsigned getNumSuccessors() const { return 0; }
4284
4285 // Methods for support type inquiry through isa, cast, and dyn_cast:
4286 static bool classof(const Instruction *I) {
4287 return I->getOpcode() == Instruction::Resume;
4288 }
4289 static bool classof(const Value *V) {
4290 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4291 }
4292
4293private:
4294 BasicBlock *getSuccessor(unsigned idx) const {
4295 llvm_unreachable("ResumeInst has no successors!");
4296 }
4297
4298 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4299 llvm_unreachable("ResumeInst has no successors!");
4300 }
4301};
4302
4303template <>
4305 public FixedNumOperandTraits<ResumeInst, 1> {
4306};
4307
4309
4310//===----------------------------------------------------------------------===//
4311// CatchSwitchInst Class
4312//===----------------------------------------------------------------------===//
4314 using UnwindDestField = BoolBitfieldElementT<0>;
4315
4316 /// The number of operands actually allocated. NumOperands is
4317 /// the number actually in use.
4318 unsigned ReservedSpace;
4319
4320 // Operand[0] = Outer scope
4321 // Operand[1] = Unwind block destination
4322 // Operand[n] = BasicBlock to go to on match
4323 CatchSwitchInst(const CatchSwitchInst &CSI);
4324
4325 /// Create a new switch instruction, specifying a
4326 /// default destination. The number of additional handlers can be specified
4327 /// here to make memory allocation more efficient.
4328 /// This constructor can also autoinsert before another instruction.
4329 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4330 unsigned NumHandlers, const Twine &NameStr,
4331 Instruction *InsertBefore);
4332
4333 /// Create a new switch instruction, specifying a
4334 /// default destination. The number of additional handlers can be specified
4335 /// here to make memory allocation more efficient.
4336 /// This constructor also autoinserts at the end of the specified BasicBlock.
4337 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4338 unsigned NumHandlers, const Twine &NameStr,
4339 BasicBlock *InsertAtEnd);
4340
4341 // allocate space for exactly zero operands
4342 void *operator new(size_t S) { return User::operator new(S); }
4343
4344 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4345 void growOperands(unsigned Size);
4346
4347protected:
4348 // Note: Instruction needs to be a friend here to call cloneImpl.
4349 friend class Instruction;
4350
4351 CatchSwitchInst *cloneImpl() const;
4352
4353public:
4354 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
4355
4356 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4357 unsigned NumHandlers,
4358 const Twine &NameStr = "",
4359 Instruction *InsertBefore = nullptr) {
4360 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4361 InsertBefore);
4362 }
4363
4364 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4365 unsigned NumHandlers, const Twine &NameStr,
4366 BasicBlock *InsertAtEnd) {
4367 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4368 InsertAtEnd);
4369 }
4370
4371 /// Provide fast operand accessors
4373
4374 // Accessor Methods for CatchSwitch stmt
4375 Value *getParentPad() const { return getOperand(0); }
4376 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4377
4378 // Accessor Methods for CatchSwitch stmt
4379 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4380 bool unwindsToCaller() const { return !hasUnwindDest(); }
4382 if (hasUnwindDest())
4383 return cast<BasicBlock>(getOperand(1));
4384 return nullptr;
4385 }
4386 void setUnwindDest(BasicBlock *UnwindDest) {
4387 assert(UnwindDest);
4388 assert(hasUnwindDest());
4389 setOperand(1, UnwindDest);
4390 }
4391
4392 /// return the number of 'handlers' in this catchswitch
4393 /// instruction, except the default handler
4394 unsigned getNumHandlers() const {
4395 if (hasUnwindDest())
4396 return getNumOperands() - 2;
4397 return getNumOperands() - 1;
4398 }
4399
4400private:
4401 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
4402 static const BasicBlock *handler_helper(const Value *V) {
4403 return cast<BasicBlock>(V);
4404 }
4405
4406public:
4407 using DerefFnTy = BasicBlock *(*)(Value *);
4410 using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4414
4415 /// Returns an iterator that points to the first handler in CatchSwitchInst.
4417 op_iterator It = op_begin() + 1;
4418 if (hasUnwindDest())
4419 ++It;
4420 return handler_iterator(It, DerefFnTy(handler_helper));
4421 }
4422
4423 /// Returns an iterator that points to the first handler in the
4424 /// CatchSwitchInst.
4426 const_op_iterator It = op_begin() + 1;
4427 if (hasUnwindDest())
4428 ++It;
4429 return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4430 }
4431
4432 /// Returns a read-only iterator that points one past the last
4433 /// handler in the CatchSwitchInst.
4435 return handler_iterator(op_end(), DerefFnTy(handler_helper));
4436 }
4437
4438 /// Returns an iterator that points one past the last handler in the
4439 /// CatchSwitchInst.
4441 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4442 }
4443
4444 /// iteration adapter for range-for loops.
4446 return make_range(handler_begin(), handler_end());
4447 }
4448
4449 /// iteration adapter for range-for loops.
4451 return make_range(handler_begin(), handler_end());
4452 }
4453
4454 /// Add an entry to the switch instruction...
4455 /// Note:
4456 /// This action invalidates handler_end(). Old handler_end() iterator will
4457 /// point to the added handler.
4458 void addHandler(BasicBlock *Dest);
4459
4460 void removeHandler(handler_iterator HI);
4461
4462 unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4463 BasicBlock *getSuccessor(unsigned Idx) const {
4464 assert(Idx < getNumSuccessors() &&
4465