LLVM 19.0.0git
Instructions.h
Go to the documentation of this file.
1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/Twine.h"
24#include "llvm/ADT/iterator.h"
26#include "llvm/IR/CFG.h"
27#include "llvm/IR/Constant.h"
30#include "llvm/IR/InstrTypes.h"
31#include "llvm/IR/Instruction.h"
33#include "llvm/IR/Use.h"
34#include "llvm/IR/User.h"
37#include <cassert>
38#include <cstddef>
39#include <cstdint>
40#include <iterator>
41#include <optional>
42
43namespace llvm {
44
45class APFloat;
46class APInt;
47class BasicBlock;
48class ConstantInt;
49class DataLayout;
50class StringRef;
51class Type;
52class Value;
53class UnreachableInst;
54
55//===----------------------------------------------------------------------===//
56// AllocaInst Class
57//===----------------------------------------------------------------------===//
58
59/// an instruction to allocate memory on the stack
61 Type *AllocatedType;
62
63 using AlignmentField = AlignmentBitfieldElementT<0>;
64 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
66 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
67 SwiftErrorField>(),
68 "Bitfields must be contiguous");
69
70protected:
71 // Note: Instruction needs to be a friend here to call cloneImpl.
72 friend class Instruction;
73
74 AllocaInst *cloneImpl() const;
75
76public:
77 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
78 const Twine &Name, BasicBlock::iterator InsertBefore);
79 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
80 const Twine &Name, Instruction *InsertBefore);
81 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
82 const Twine &Name, BasicBlock *InsertAtEnd);
83
84 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
85 BasicBlock::iterator InsertBefore);
86 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
87 Instruction *InsertBefore);
88 AllocaInst(Type *Ty, unsigned AddrSpace,
89 const Twine &Name, BasicBlock *InsertAtEnd);
90
91 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
93 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
94 const Twine &Name = "", Instruction *InsertBefore = nullptr);
95 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
96 const Twine &Name, BasicBlock *InsertAtEnd);
97
98 /// Return true if there is an allocation size parameter to the allocation
99 /// instruction that is not 1.
100 bool isArrayAllocation() const;
101
102 /// Get the number of elements allocated. For a simple allocation of a single
103 /// element, this will return a constant 1 value.
104 const Value *getArraySize() const { return getOperand(0); }
105 Value *getArraySize() { return getOperand(0); }
106
107 /// Overload to return most specific pointer type.
109 return cast<PointerType>(Instruction::getType());
110 }
111
112 /// Return the address space for the allocation.
113 unsigned getAddressSpace() const {
114 return getType()->getAddressSpace();
115 }
116
117 /// Get allocation size in bytes. Returns std::nullopt if size can't be
118 /// determined, e.g. in case of a VLA.
119 std::optional<TypeSize> getAllocationSize(const DataLayout &DL) const;
120
121 /// Get allocation size in bits. Returns std::nullopt if size can't be
122 /// determined, e.g. in case of a VLA.
123 std::optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
124
125 /// Return the type that is being allocated by the instruction.
126 Type *getAllocatedType() const { return AllocatedType; }
127 /// for use only in special circumstances that need to generically
128 /// transform a whole instruction (eg: IR linking and vectorization).
129 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
130
131 /// Return the alignment of the memory that is being allocated by the
132 /// instruction.
133 Align getAlign() const {
134 return Align(1ULL << getSubclassData<AlignmentField>());
135 }
136
138 setSubclassData<AlignmentField>(Log2(Align));
139 }
140
141 /// Return true if this alloca is in the entry block of the function and is a
142 /// constant size. If so, the code generator will fold it into the
143 /// prolog/epilog code, so it is basically free.
144 bool isStaticAlloca() const;
145
146 /// Return true if this alloca is used as an inalloca argument to a call. Such
147 /// allocas are never considered static even if they are in the entry block.
148 bool isUsedWithInAlloca() const {
149 return getSubclassData<UsedWithInAllocaField>();
150 }
151
152 /// Specify whether this alloca is used to represent the arguments to a call.
153 void setUsedWithInAlloca(bool V) {
154 setSubclassData<UsedWithInAllocaField>(V);
155 }
156
157 /// Return true if this alloca is used as a swifterror argument to a call.
158 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
159 /// Specify whether this alloca is used to represent a swifterror.
160 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
161
162 // Methods for support type inquiry through isa, cast, and dyn_cast:
163 static bool classof(const Instruction *I) {
164 return (I->getOpcode() == Instruction::Alloca);
165 }
166 static bool classof(const Value *V) {
167 return isa<Instruction>(V) && classof(cast<Instruction>(V));
168 }
169
170private:
171 // Shadow Instruction::setInstructionSubclassData with a private forwarding
172 // method so that subclasses cannot accidentally use it.
173 template <typename Bitfield>
174 void setSubclassData(typename Bitfield::Type Value) {
175 Instruction::setSubclassData<Bitfield>(Value);
176 }
177};
178
179//===----------------------------------------------------------------------===//
180// LoadInst Class
181//===----------------------------------------------------------------------===//
182
183/// An instruction for reading from memory. This uses the SubclassData field in
184/// Value to store whether or not the load is volatile.
186 using VolatileField = BoolBitfieldElementT<0>;
189 static_assert(
190 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
191 "Bitfields must be contiguous");
192
193 void AssertOK();
194
195protected:
196 // Note: Instruction needs to be a friend here to call cloneImpl.
197 friend class Instruction;
198
199 LoadInst *cloneImpl() const;
200
201public:
202 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
203 BasicBlock::iterator InsertBefore);
204 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
205 Instruction *InsertBefore);
206 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
207 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
208 BasicBlock::iterator InsertBefore);
209 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
210 Instruction *InsertBefore);
211 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
212 BasicBlock *InsertAtEnd);
213 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
214 Align Align, BasicBlock::iterator InsertBefore);
215 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
216 Align Align, Instruction *InsertBefore = nullptr);
217 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
218 Align Align, BasicBlock *InsertAtEnd);
219 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
221 BasicBlock::iterator InsertBefore);
222 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
225 Instruction *InsertBefore = nullptr);
226 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
228 BasicBlock *InsertAtEnd);
229
230 /// Return true if this is a load from a volatile memory location.
231 bool isVolatile() const { return getSubclassData<VolatileField>(); }
232
233 /// Specify whether this is a volatile load or not.
234 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
235
236 /// Return the alignment of the access that is being performed.
237 Align getAlign() const {
238 return Align(1ULL << (getSubclassData<AlignmentField>()));
239 }
240
242 setSubclassData<AlignmentField>(Log2(Align));
243 }
244
245 /// Returns the ordering constraint of this load instruction.
247 return getSubclassData<OrderingField>();
248 }
249 /// Sets the ordering constraint of this load instruction. May not be Release
250 /// or AcquireRelease.
252 setSubclassData<OrderingField>(Ordering);
253 }
254
255 /// Returns the synchronization scope ID of this load instruction.
257 return SSID;
258 }
259
260 /// Sets the synchronization scope ID of this load instruction.
262 this->SSID = SSID;
263 }
264
265 /// Sets the ordering constraint and the synchronization scope ID of this load
266 /// instruction.
269 setOrdering(Ordering);
270 setSyncScopeID(SSID);
271 }
272
273 bool isSimple() const { return !isAtomic() && !isVolatile(); }
274
275 bool isUnordered() const {
278 !isVolatile();
279 }
280
282 const Value *getPointerOperand() const { return getOperand(0); }
283 static unsigned getPointerOperandIndex() { return 0U; }
285
286 /// Returns the address space of the pointer operand.
287 unsigned getPointerAddressSpace() const {
289 }
290
291 // Methods for support type inquiry through isa, cast, and dyn_cast:
292 static bool classof(const Instruction *I) {
293 return I->getOpcode() == Instruction::Load;
294 }
295 static bool classof(const Value *V) {
296 return isa<Instruction>(V) && classof(cast<Instruction>(V));
297 }
298
299private:
300 // Shadow Instruction::setInstructionSubclassData with a private forwarding
301 // method so that subclasses cannot accidentally use it.
302 template <typename Bitfield>
303 void setSubclassData(typename Bitfield::Type Value) {
304 Instruction::setSubclassData<Bitfield>(Value);
305 }
306
307 /// The synchronization scope ID of this load instruction. Not quite enough
308 /// room in SubClassData for everything, so synchronization scope ID gets its
309 /// own field.
310 SyncScope::ID SSID;
311};
312
313//===----------------------------------------------------------------------===//
314// StoreInst Class
315//===----------------------------------------------------------------------===//
316
317/// An instruction for storing to memory.
318class StoreInst : public Instruction {
319 using VolatileField = BoolBitfieldElementT<0>;
322 static_assert(
323 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
324 "Bitfields must be contiguous");
325
326 void AssertOK();
327
328protected:
329 // Note: Instruction needs to be a friend here to call cloneImpl.
330 friend class Instruction;
331
332 StoreInst *cloneImpl() const;
333
334public:
335 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
336 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
337 StoreInst(Value *Val, Value *Ptr, BasicBlock::iterator InsertBefore);
338 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
339 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
340 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
341 BasicBlock::iterator InsertBefore);
343 Instruction *InsertBefore = nullptr);
345 BasicBlock *InsertAtEnd);
347 BasicBlock::iterator InsertBefore);
350 Instruction *InsertBefore = nullptr);
352 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
354 AtomicOrdering Order, SyncScope::ID SSID,
355 BasicBlock::iterator InsertBefore);
356
357 // allocate space for exactly two operands
358 void *operator new(size_t S) { return User::operator new(S, 2); }
359 void operator delete(void *Ptr) { User::operator delete(Ptr); }
360
361 /// Return true if this is a store to a volatile memory location.
362 bool isVolatile() const { return getSubclassData<VolatileField>(); }
363
364 /// Specify whether this is a volatile store or not.
365 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
366
367 /// Transparently provide more efficient getOperand methods.
369
370 Align getAlign() const {
371 return Align(1ULL << (getSubclassData<AlignmentField>()));
372 }
373
375 setSubclassData<AlignmentField>(Log2(Align));
376 }
377
378 /// Returns the ordering constraint of this store instruction.
380 return getSubclassData<OrderingField>();
381 }
382
383 /// Sets the ordering constraint of this store instruction. May not be
384 /// Acquire or AcquireRelease.
386 setSubclassData<OrderingField>(Ordering);
387 }
388
389 /// Returns the synchronization scope ID of this store instruction.
391 return SSID;
392 }
393
394 /// Sets the synchronization scope ID of this store instruction.
396 this->SSID = SSID;
397 }
398
399 /// Sets the ordering constraint and the synchronization scope ID of this
400 /// store instruction.
403 setOrdering(Ordering);
404 setSyncScopeID(SSID);
405 }
406
407 bool isSimple() const { return !isAtomic() && !isVolatile(); }
408
409 bool isUnordered() const {
412 !isVolatile();
413 }
414
416 const Value *getValueOperand() const { return getOperand(0); }
417
419 const Value *getPointerOperand() const { return getOperand(1); }
420 static unsigned getPointerOperandIndex() { return 1U; }
422
423 /// Returns the address space of the pointer operand.
424 unsigned getPointerAddressSpace() const {
426 }
427
428 // Methods for support type inquiry through isa, cast, and dyn_cast:
429 static bool classof(const Instruction *I) {
430 return I->getOpcode() == Instruction::Store;
431 }
432 static bool classof(const Value *V) {
433 return isa<Instruction>(V) && classof(cast<Instruction>(V));
434 }
435
436private:
437 // Shadow Instruction::setInstructionSubclassData with a private forwarding
438 // method so that subclasses cannot accidentally use it.
439 template <typename Bitfield>
440 void setSubclassData(typename Bitfield::Type Value) {
441 Instruction::setSubclassData<Bitfield>(Value);
442 }
443
444 /// The synchronization scope ID of this store instruction. Not quite enough
445 /// room in SubClassData for everything, so synchronization scope ID gets its
446 /// own field.
447 SyncScope::ID SSID;
448};
449
450template <>
451struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
452};
453
455
456//===----------------------------------------------------------------------===//
457// FenceInst Class
458//===----------------------------------------------------------------------===//
459
460/// An instruction for ordering other memory operations.
461class FenceInst : public Instruction {
462 using OrderingField = AtomicOrderingBitfieldElementT<0>;
463
464 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
465
466protected:
467 // Note: Instruction needs to be a friend here to call cloneImpl.
468 friend class Instruction;
469
470 FenceInst *cloneImpl() const;
471
472public:
473 // Ordering may only be Acquire, Release, AcquireRelease, or
474 // SequentiallyConsistent.
476 BasicBlock::iterator InsertBefore);
479 Instruction *InsertBefore = nullptr);
481 BasicBlock *InsertAtEnd);
482
483 // allocate space for exactly zero operands
484 void *operator new(size_t S) { return User::operator new(S, 0); }
485 void operator delete(void *Ptr) { User::operator delete(Ptr); }
486
487 /// Returns the ordering constraint of this fence instruction.
489 return getSubclassData<OrderingField>();
490 }
491
492 /// Sets the ordering constraint of this fence instruction. May only be
493 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
495 setSubclassData<OrderingField>(Ordering);
496 }
497
498 /// Returns the synchronization scope ID of this fence instruction.
500 return SSID;
501 }
502
503 /// Sets the synchronization scope ID of this fence instruction.
505 this->SSID = SSID;
506 }
507
508 // Methods for support type inquiry through isa, cast, and dyn_cast:
509 static bool classof(const Instruction *I) {
510 return I->getOpcode() == Instruction::Fence;
511 }
512 static bool classof(const Value *V) {
513 return isa<Instruction>(V) && classof(cast<Instruction>(V));
514 }
515
516private:
517 // Shadow Instruction::setInstructionSubclassData with a private forwarding
518 // method so that subclasses cannot accidentally use it.
519 template <typename Bitfield>
520 void setSubclassData(typename Bitfield::Type Value) {
521 Instruction::setSubclassData<Bitfield>(Value);
522 }
523
524 /// The synchronization scope ID of this fence instruction. Not quite enough
525 /// room in SubClassData for everything, so synchronization scope ID gets its
526 /// own field.
527 SyncScope::ID SSID;
528};
529
530//===----------------------------------------------------------------------===//
531// AtomicCmpXchgInst Class
532//===----------------------------------------------------------------------===//
533
534/// An instruction that atomically checks whether a
535/// specified value is in a memory location, and, if it is, stores a new value
536/// there. The value returned by this instruction is a pair containing the
537/// original value as first element, and an i1 indicating success (true) or
538/// failure (false) as second element.
539///
541 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
542 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
543 SyncScope::ID SSID);
544
545 template <unsigned Offset>
546 using AtomicOrderingBitfieldElement =
549
550protected:
551 // Note: Instruction needs to be a friend here to call cloneImpl.
552 friend class Instruction;
553
555
556public:
557 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
558 AtomicOrdering SuccessOrdering,
559 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
560 BasicBlock::iterator InsertBefore);
561 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
562 AtomicOrdering SuccessOrdering,
563 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
564 Instruction *InsertBefore = nullptr);
565 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
566 AtomicOrdering SuccessOrdering,
567 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
568 BasicBlock *InsertAtEnd);
569
570 // allocate space for exactly three operands
571 void *operator new(size_t S) { return User::operator new(S, 3); }
572 void operator delete(void *Ptr) { User::operator delete(Ptr); }
573
582 static_assert(
585 "Bitfields must be contiguous");
586
587 /// Return the alignment of the memory that is being allocated by the
588 /// instruction.
589 Align getAlign() const {
590 return Align(1ULL << getSubclassData<AlignmentField>());
591 }
592
594 setSubclassData<AlignmentField>(Log2(Align));
595 }
596
597 /// Return true if this is a cmpxchg from a volatile memory
598 /// location.
599 ///
600 bool isVolatile() const { return getSubclassData<VolatileField>(); }
601
602 /// Specify whether this is a volatile cmpxchg.
603 ///
604 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
605
606 /// Return true if this cmpxchg may spuriously fail.
607 bool isWeak() const { return getSubclassData<WeakField>(); }
608
609 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
610
611 /// Transparently provide more efficient getOperand methods.
613
615 return Ordering != AtomicOrdering::NotAtomic &&
616 Ordering != AtomicOrdering::Unordered;
617 }
618
620 return Ordering != AtomicOrdering::NotAtomic &&
621 Ordering != AtomicOrdering::Unordered &&
622 Ordering != AtomicOrdering::AcquireRelease &&
623 Ordering != AtomicOrdering::Release;
624 }
625
626 /// Returns the success ordering constraint of this cmpxchg instruction.
628 return getSubclassData<SuccessOrderingField>();
629 }
630
631 /// Sets the success ordering constraint of this cmpxchg instruction.
633 assert(isValidSuccessOrdering(Ordering) &&
634 "invalid CmpXchg success ordering");
635 setSubclassData<SuccessOrderingField>(Ordering);
636 }
637
638 /// Returns the failure ordering constraint of this cmpxchg instruction.
640 return getSubclassData<FailureOrderingField>();
641 }
642
643 /// Sets the failure ordering constraint of this cmpxchg instruction.
645 assert(isValidFailureOrdering(Ordering) &&
646 "invalid CmpXchg failure ordering");
647 setSubclassData<FailureOrderingField>(Ordering);
648 }
649
650 /// Returns a single ordering which is at least as strong as both the
651 /// success and failure orderings for this cmpxchg.
660 }
661 return getSuccessOrdering();
662 }
663
664 /// Returns the synchronization scope ID of this cmpxchg instruction.
666 return SSID;
667 }
668
669 /// Sets the synchronization scope ID of this cmpxchg instruction.
671 this->SSID = SSID;
672 }
673
675 const Value *getPointerOperand() const { return getOperand(0); }
676 static unsigned getPointerOperandIndex() { return 0U; }
677
679 const Value *getCompareOperand() const { return getOperand(1); }
680
682 const Value *getNewValOperand() const { return getOperand(2); }
683
684 /// Returns the address space of the pointer operand.
685 unsigned getPointerAddressSpace() const {
687 }
688
689 /// Returns the strongest permitted ordering on failure, given the
690 /// desired ordering on success.
691 ///
692 /// If the comparison in a cmpxchg operation fails, there is no atomic store
693 /// so release semantics cannot be provided. So this function drops explicit
694 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
695 /// operation would remain SequentiallyConsistent.
696 static AtomicOrdering
698 switch (SuccessOrdering) {
699 default:
700 llvm_unreachable("invalid cmpxchg success ordering");
709 }
710 }
711
712 // Methods for support type inquiry through isa, cast, and dyn_cast:
713 static bool classof(const Instruction *I) {
714 return I->getOpcode() == Instruction::AtomicCmpXchg;
715 }
716 static bool classof(const Value *V) {
717 return isa<Instruction>(V) && classof(cast<Instruction>(V));
718 }
719
720private:
721 // Shadow Instruction::setInstructionSubclassData with a private forwarding
722 // method so that subclasses cannot accidentally use it.
723 template <typename Bitfield>
724 void setSubclassData(typename Bitfield::Type Value) {
725 Instruction::setSubclassData<Bitfield>(Value);
726 }
727
728 /// The synchronization scope ID of this cmpxchg instruction. Not quite
729 /// enough room in SubClassData for everything, so synchronization scope ID
730 /// gets its own field.
731 SyncScope::ID SSID;
732};
733
734template <>
736 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
737};
738
740
741//===----------------------------------------------------------------------===//
742// AtomicRMWInst Class
743//===----------------------------------------------------------------------===//
744
745/// an instruction that atomically reads a memory location,
746/// combines it with another value, and then stores the result back. Returns
747/// the old value.
748///
750protected:
751 // Note: Instruction needs to be a friend here to call cloneImpl.
752 friend class Instruction;
753
754 AtomicRMWInst *cloneImpl() const;
755
756public:
757 /// This enumeration lists the possible modifications atomicrmw can make. In
758 /// the descriptions, 'p' is the pointer to the instruction's memory location,
759 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
760 /// instruction. These instructions always return 'old'.
761 enum BinOp : unsigned {
762 /// *p = v
764 /// *p = old + v
766 /// *p = old - v
768 /// *p = old & v
770 /// *p = ~(old & v)
772 /// *p = old | v
774 /// *p = old ^ v
776 /// *p = old >signed v ? old : v
778 /// *p = old <signed v ? old : v
780 /// *p = old >unsigned v ? old : v
782 /// *p = old <unsigned v ? old : v
784
785 /// *p = old + v
787
788 /// *p = old - v
790
791 /// *p = maxnum(old, v)
792 /// \p maxnum matches the behavior of \p llvm.maxnum.*.
794
795 /// *p = minnum(old, v)
796 /// \p minnum matches the behavior of \p llvm.minnum.*.
798
799 /// Increment one up to a maximum value.
800 /// *p = (old u>= v) ? 0 : (old + 1)
802
803 /// Decrement one until a minimum value or zero.
804 /// *p = ((old == 0) || (old u> v)) ? v : (old - 1)
806
807 FIRST_BINOP = Xchg,
808 LAST_BINOP = UDecWrap,
809 BAD_BINOP
810 };
811
812private:
813 template <unsigned Offset>
814 using AtomicOrderingBitfieldElement =
817
818 template <unsigned Offset>
819 using BinOpBitfieldElement =
821
822public:
823 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
824 AtomicOrdering Ordering, SyncScope::ID SSID,
825 BasicBlock::iterator InsertBefore);
826 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
827 AtomicOrdering Ordering, SyncScope::ID SSID,
828 Instruction *InsertBefore = nullptr);
829 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
830 AtomicOrdering Ordering, SyncScope::ID SSID,
831 BasicBlock *InsertAtEnd);
832
833 // allocate space for exactly two operands
834 void *operator new(size_t S) { return User::operator new(S, 2); }
835 void operator delete(void *Ptr) { User::operator delete(Ptr); }
836
840 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
844 "Bitfields must be contiguous");
845
846 BinOp getOperation() const { return getSubclassData<OperationField>(); }
847
848 static StringRef getOperationName(BinOp Op);
849
850 static bool isFPOperation(BinOp Op) {
851 switch (Op) {
856 return true;
857 default:
858 return false;
859 }
860 }
861
863 setSubclassData<OperationField>(Operation);
864 }
865
866 /// Return the alignment of the memory that is being allocated by the
867 /// instruction.
868 Align getAlign() const {
869 return Align(1ULL << getSubclassData<AlignmentField>());
870 }
871
873 setSubclassData<AlignmentField>(Log2(Align));
874 }
875
876 /// Return true if this is a RMW on a volatile memory location.
877 ///
878 bool isVolatile() const { return getSubclassData<VolatileField>(); }
879
880 /// Specify whether this is a volatile RMW or not.
881 ///
882 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
883
884 /// Transparently provide more efficient getOperand methods.
886
887 /// Returns the ordering constraint of this rmw instruction.
889 return getSubclassData<AtomicOrderingField>();
890 }
891
892 /// Sets the ordering constraint of this rmw instruction.
894 assert(Ordering != AtomicOrdering::NotAtomic &&
895 "atomicrmw instructions can only be atomic.");
896 assert(Ordering != AtomicOrdering::Unordered &&
897 "atomicrmw instructions cannot be unordered.");
898 setSubclassData<AtomicOrderingField>(Ordering);
899 }
900
901 /// Returns the synchronization scope ID of this rmw instruction.
903 return SSID;
904 }
905
906 /// Sets the synchronization scope ID of this rmw instruction.
908 this->SSID = SSID;
909 }
910
911 Value *getPointerOperand() { return getOperand(0); }
912 const Value *getPointerOperand() const { return getOperand(0); }
913 static unsigned getPointerOperandIndex() { return 0U; }
914
915 Value *getValOperand() { return getOperand(1); }
916 const Value *getValOperand() const { return getOperand(1); }
917
918 /// Returns the address space of the pointer operand.
919 unsigned getPointerAddressSpace() const {
921 }
922
924 return isFPOperation(getOperation());
925 }
926
927 // Methods for support type inquiry through isa, cast, and dyn_cast:
928 static bool classof(const Instruction *I) {
929 return I->getOpcode() == Instruction::AtomicRMW;
930 }
931 static bool classof(const Value *V) {
932 return isa<Instruction>(V) && classof(cast<Instruction>(V));
933 }
934
935private:
936 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
937 AtomicOrdering Ordering, SyncScope::ID SSID);
938
939 // Shadow Instruction::setInstructionSubclassData with a private forwarding
940 // method so that subclasses cannot accidentally use it.
941 template <typename Bitfield>
942 void setSubclassData(typename Bitfield::Type Value) {
943 Instruction::setSubclassData<Bitfield>(Value);
944 }
945
946 /// The synchronization scope ID of this rmw instruction. Not quite enough
947 /// room in SubClassData for everything, so synchronization scope ID gets its
948 /// own field.
949 SyncScope::ID SSID;
950};
951
952template <>
954 : public FixedNumOperandTraits<AtomicRMWInst,2> {
955};
956
958
959//===----------------------------------------------------------------------===//
960// GetElementPtrInst Class
961//===----------------------------------------------------------------------===//
962
963// checkGEPType - Simple wrapper function to give a better assertion failure
964// message on bad indexes for a gep instruction.
965//
967 assert(Ty && "Invalid GetElementPtrInst indices for type!");
968 return Ty;
969}
970
971/// an instruction for type-safe pointer arithmetic to
972/// access elements of arrays and structs
973///
975 Type *SourceElementType;
976 Type *ResultElementType;
977
979
980 /// Constructors - Create a getelementptr instruction with a base pointer an
981 /// list of indices. The first and second ctor can optionally insert before an
982 /// existing instruction, the third appends the new instruction to the
983 /// specified BasicBlock.
984 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
985 ArrayRef<Value *> IdxList, unsigned Values,
986 const Twine &NameStr,
987 BasicBlock::iterator InsertBefore);
988 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
989 ArrayRef<Value *> IdxList, unsigned Values,
990 const Twine &NameStr, Instruction *InsertBefore);
991 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
992 ArrayRef<Value *> IdxList, unsigned Values,
993 const Twine &NameStr, BasicBlock *InsertAtEnd);
994
995 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
996
997protected:
998 // Note: Instruction needs to be a friend here to call cloneImpl.
999 friend class Instruction;
1000
1002
1003public:
1004 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
1005 ArrayRef<Value *> IdxList,
1006 const Twine &NameStr,
1007 BasicBlock::iterator InsertBefore) {
1008 unsigned Values = 1 + unsigned(IdxList.size());
1009 assert(PointeeType && "Must specify element type");
1010 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
1011 NameStr, InsertBefore);
1012 }
1013
1014 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
1015 ArrayRef<Value *> IdxList,
1016 const Twine &NameStr = "",
1017 Instruction *InsertBefore = nullptr) {
1018 unsigned Values = 1 + unsigned(IdxList.size());
1019 assert(PointeeType && "Must specify element type");
1020 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
1021 NameStr, InsertBefore);
1022 }
1023
1024 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
1025 ArrayRef<Value *> IdxList,
1026 const Twine &NameStr,
1027 BasicBlock *InsertAtEnd) {
1028 unsigned Values = 1 + unsigned(IdxList.size());
1029 assert(PointeeType && "Must specify element type");
1030 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
1031 NameStr, InsertAtEnd);
1032 }
1033
1034 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
1036 const Twine &NameStr,
1037 BasicBlock::iterator InsertBefore) {
1039 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
1040 GEP->setNoWrapFlags(NW);
1041 return GEP;
1042 }
1043
1044 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
1046 const Twine &NameStr = "",
1047 Instruction *InsertBefore = nullptr) {
1049 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
1050 GEP->setNoWrapFlags(NW);
1051 return GEP;
1052 }
1053
1054 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
1056 const Twine &NameStr,
1057 BasicBlock *InsertAtEnd) {
1059 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
1060 GEP->setNoWrapFlags(NW);
1061 return GEP;
1062 }
1063
1064 /// Create an "inbounds" getelementptr. See the documentation for the
1065 /// "inbounds" flag in LangRef.html for details.
1067 ArrayRef<Value *> IdxList,
1068 const Twine &NameStr,
1069 BasicBlock::iterator InsertBefore) {
1070 return Create(PointeeType, Ptr, IdxList, GEPNoWrapFlags::inBounds(),
1071 NameStr, InsertBefore);
1072 }
1073
1074 static GetElementPtrInst *
1076 const Twine &NameStr = "",
1077 Instruction *InsertBefore = nullptr) {
1078 return Create(PointeeType, Ptr, IdxList, GEPNoWrapFlags::inBounds(),
1079 NameStr, InsertBefore);
1080 }
1081
1083 ArrayRef<Value *> IdxList,
1084 const Twine &NameStr,
1085 BasicBlock *InsertAtEnd) {
1086 return Create(PointeeType, Ptr, IdxList, GEPNoWrapFlags::inBounds(),
1087 NameStr, InsertAtEnd);
1088 }
1089
1090 /// Transparently provide more efficient getOperand methods.
1092
1093 Type *getSourceElementType() const { return SourceElementType; }
1094
1095 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1096 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1097
1099 return ResultElementType;
1100 }
1101
1102 /// Returns the address space of this instruction's pointer type.
1103 unsigned getAddressSpace() const {
1104 // Note that this is always the same as the pointer operand's address space
1105 // and that is cheaper to compute, so cheat here.
1106 return getPointerAddressSpace();
1107 }
1108
1109 /// Returns the result type of a getelementptr with the given source
1110 /// element type and indexes.
1111 ///
1112 /// Null is returned if the indices are invalid for the specified
1113 /// source element type.
1114 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1115 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1116 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1117
1118 /// Return the type of the element at the given index of an indexable
1119 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1120 ///
1121 /// Returns null if the type can't be indexed, or the given index is not
1122 /// legal for the given type.
1123 static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1124 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1125
1126 inline op_iterator idx_begin() { return op_begin()+1; }
1127 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1128 inline op_iterator idx_end() { return op_end(); }
1129 inline const_op_iterator idx_end() const { return op_end(); }
1130
1132 return make_range(idx_begin(), idx_end());
1133 }
1134
1136 return make_range(idx_begin(), idx_end());
1137 }
1138
1140 return getOperand(0);
1141 }
1142 const Value *getPointerOperand() const {
1143 return getOperand(0);
1144 }
1145 static unsigned getPointerOperandIndex() {
1146 return 0U; // get index for modifying correct operand.
1147 }
1148
1149 /// Method to return the pointer operand as a
1150 /// PointerType.
1152 return getPointerOperand()->getType();
1153 }
1154
1155 /// Returns the address space of the pointer operand.
1156 unsigned getPointerAddressSpace() const {
1158 }
1159
1160 /// Returns the pointer type returned by the GEP
1161 /// instruction, which may be a vector of pointers.
1163 // Vector GEP
1164 Type *Ty = Ptr->getType();
1165 if (Ty->isVectorTy())
1166 return Ty;
1167
1168 for (Value *Index : IdxList)
1169 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1170 ElementCount EltCount = IndexVTy->getElementCount();
1171 return VectorType::get(Ty, EltCount);
1172 }
1173 // Scalar GEP
1174 return Ty;
1175 }
1176
1177 unsigned getNumIndices() const { // Note: always non-negative
1178 return getNumOperands() - 1;
1179 }
1180
1181 bool hasIndices() const {
1182 return getNumOperands() > 1;
1183 }
1184
1185 /// Return true if all of the indices of this GEP are
1186 /// zeros. If so, the result pointer and the first operand have the same
1187 /// value, just potentially different types.
1188 bool hasAllZeroIndices() const;
1189
1190 /// Return true if all of the indices of this GEP are
1191 /// constant integers. If so, the result pointer and the first operand have
1192 /// a constant offset between them.
1193 bool hasAllConstantIndices() const;
1194
1195 /// Set nowrap flags for GEP instruction.
1197
1198 /// Set or clear the inbounds flag on this GEP instruction.
1199 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1200 /// TODO: Remove this method in favor of setNoWrapFlags().
1201 void setIsInBounds(bool b = true);
1202
1203 /// Get the nowrap flags for the GEP instruction.
1205
1206 /// Determine whether the GEP has the inbounds flag.
1207 bool isInBounds() const;
1208
1209 /// Determine whether the GEP has the nusw flag.
1210 bool hasNoUnsignedSignedWrap() const;
1211
1212 /// Determine whether the GEP has the nuw flag.
1213 bool hasNoUnsignedWrap() const;
1214
1215 /// Accumulate the constant address offset of this GEP if possible.
1216 ///
1217 /// This routine accepts an APInt into which it will accumulate the constant
1218 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1219 /// all-constant, it returns false and the value of the offset APInt is
1220 /// undefined (it is *not* preserved!). The APInt passed into this routine
1221 /// must be at least as wide as the IntPtr type for the address space of
1222 /// the base GEP pointer.
1223 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1224 bool collectOffset(const DataLayout &DL, unsigned BitWidth,
1225 MapVector<Value *, APInt> &VariableOffsets,
1226 APInt &ConstantOffset) const;
1227 // Methods for support type inquiry through isa, cast, and dyn_cast:
1228 static bool classof(const Instruction *I) {
1229 return (I->getOpcode() == Instruction::GetElementPtr);
1230 }
1231 static bool classof(const Value *V) {
1232 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1233 }
1234};
1235
1236template <>
1238 public VariadicOperandTraits<GetElementPtrInst, 1> {
1239};
1240
1241GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1242 ArrayRef<Value *> IdxList, unsigned Values,
1243 const Twine &NameStr,
1244 BasicBlock::iterator InsertBefore)
1245 : Instruction(getGEPReturnType(Ptr, IdxList), GetElementPtr,
1246 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1247 Values, InsertBefore),
1248 SourceElementType(PointeeType),
1249 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1250 init(Ptr, IdxList, NameStr);
1251}
1252
1253GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1254 ArrayRef<Value *> IdxList, unsigned Values,
1255 const Twine &NameStr,
1256 Instruction *InsertBefore)
1257 : Instruction(getGEPReturnType(Ptr, IdxList), GetElementPtr,
1258 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1259 Values, InsertBefore),
1260 SourceElementType(PointeeType),
1261 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1262 init(Ptr, IdxList, NameStr);
1263}
1264
1265GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1266 ArrayRef<Value *> IdxList, unsigned Values,
1267 const Twine &NameStr,
1268 BasicBlock *InsertAtEnd)
1269 : Instruction(getGEPReturnType(Ptr, IdxList), GetElementPtr,
1270 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1271 Values, InsertAtEnd),
1272 SourceElementType(PointeeType),
1273 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1274 init(Ptr, IdxList, NameStr);
1275}
1276
1277DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)
1278
1279//===----------------------------------------------------------------------===//
1280// ICmpInst Class
1281//===----------------------------------------------------------------------===//
1282
1283/// This instruction compares its operands according to the predicate given
1284/// to the constructor. It only operates on integers or pointers. The operands
1285/// must be identical types.
1286/// Represent an integer comparison operator.
1287class ICmpInst: public CmpInst {
1288 void AssertOK() {
1289 assert(isIntPredicate() &&
1290 "Invalid ICmp predicate value");
1291 assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1292 "Both operands to ICmp instruction are not of the same type!");
1293 // Check that the operands are the right type
1294 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||
1295 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
1296 "Invalid operand types for ICmp instruction");
1297 }
1298
1299protected:
1300 // Note: Instruction needs to be a friend here to call cloneImpl.
1301 friend class Instruction;
1302
1303 /// Clone an identical ICmpInst
1304 ICmpInst *cloneImpl() const;
1305
1306public:
1307 /// Constructor with insert-before-instruction semantics.
1309 BasicBlock::iterator InsertBefore, ///< Where to insert
1310 Predicate pred, ///< The predicate to use for the comparison
1311 Value *LHS, ///< The left-hand-side of the expression
1312 Value *RHS, ///< The right-hand-side of the expression
1313 const Twine &NameStr = "" ///< Name of the instruction
1314 ) : CmpInst(makeCmpResultType(LHS->getType()),
1315 Instruction::ICmp, pred, LHS, RHS, NameStr,
1316 InsertBefore) {
1317#ifndef NDEBUG
1318 AssertOK();
1319#endif
1320 }
1321
1322 /// Constructor with insert-before-instruction semantics.
1324 Instruction *InsertBefore, ///< Where to insert
1325 Predicate pred, ///< The predicate to use for the comparison
1326 Value *LHS, ///< The left-hand-side of the expression
1327 Value *RHS, ///< The right-hand-side of the expression
1328 const Twine &NameStr = "" ///< Name of the instruction
1329 ) : CmpInst(makeCmpResultType(LHS->getType()),
1330 Instruction::ICmp, pred, LHS, RHS, NameStr,
1331 InsertBefore) {
1332#ifndef NDEBUG
1333 AssertOK();
1334#endif
1335 }
1336
1337 /// Constructor with insert-at-end semantics.
1339 BasicBlock *InsertAtEnd, ///< Block to insert into.
1340 Predicate pred, ///< The predicate to use for the comparison
1341 Value *LHS, ///< The left-hand-side of the expression
1342 Value *RHS, ///< The right-hand-side of the expression
1343 const Twine &NameStr = "" ///< Name of the instruction
1344 ) : CmpInst(makeCmpResultType(LHS->getType()),
1345 Instruction::ICmp, pred, LHS, RHS, NameStr,
1346 InsertAtEnd) {
1347#ifndef NDEBUG
1348 AssertOK();
1349#endif
1350 }
1351
1352 /// Constructor with no-insertion semantics
1354 Predicate pred, ///< The predicate to use for the comparison
1355 Value *LHS, ///< The left-hand-side of the expression
1356 Value *RHS, ///< The right-hand-side of the expression
1357 const Twine &NameStr = "" ///< Name of the instruction
1358 ) : CmpInst(makeCmpResultType(LHS->getType()),
1359 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1360#ifndef NDEBUG
1361 AssertOK();
1362#endif
1363 }
1364
1365 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1366 /// @returns the predicate that would be the result if the operand were
1367 /// regarded as signed.
1368 /// Return the signed version of the predicate
1370 return getSignedPredicate(getPredicate());
1371 }
1372
1373 /// This is a static version that you can use without an instruction.
1374 /// Return the signed version of the predicate.
1375 static Predicate getSignedPredicate(Predicate pred);
1376
1377 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1378 /// @returns the predicate that would be the result if the operand were
1379 /// regarded as unsigned.
1380 /// Return the unsigned version of the predicate
1382 return getUnsignedPredicate(getPredicate());
1383 }
1384
1385 /// This is a static version that you can use without an instruction.
1386 /// Return the unsigned version of the predicate.
1387 static Predicate getUnsignedPredicate(Predicate pred);
1388
1389 /// Return true if this predicate is either EQ or NE. This also
1390 /// tests for commutativity.
1391 static bool isEquality(Predicate P) {
1392 return P == ICMP_EQ || P == ICMP_NE;
1393 }
1394
1395 /// Return true if this predicate is either EQ or NE. This also
1396 /// tests for commutativity.
1397 bool isEquality() const {
1398 return isEquality(getPredicate());
1399 }
1400
1401 /// @returns true if the predicate of this ICmpInst is commutative
1402 /// Determine if this relation is commutative.
1403 bool isCommutative() const { return isEquality(); }
1404
1405 /// Return true if the predicate is relational (not EQ or NE).
1406 ///
1407 bool isRelational() const {
1408 return !isEquality();
1409 }
1410
1411 /// Return true if the predicate is relational (not EQ or NE).
1412 ///
1413 static bool isRelational(Predicate P) {
1414 return !isEquality(P);
1415 }
1416
1417 /// Return true if the predicate is SGT or UGT.
1418 ///
1419 static bool isGT(Predicate P) {
1420 return P == ICMP_SGT || P == ICMP_UGT;
1421 }
1422
1423 /// Return true if the predicate is SLT or ULT.
1424 ///
1425 static bool isLT(Predicate P) {
1426 return P == ICMP_SLT || P == ICMP_ULT;
1427 }
1428
1429 /// Return true if the predicate is SGE or UGE.
1430 ///
1431 static bool isGE(Predicate P) {
1432 return P == ICMP_SGE || P == ICMP_UGE;
1433 }
1434
1435 /// Return true if the predicate is SLE or ULE.
1436 ///
1437 static bool isLE(Predicate P) {
1438 return P == ICMP_SLE || P == ICMP_ULE;
1439 }
1440
1441 /// Returns the sequence of all ICmp predicates.
1442 ///
1443 static auto predicates() { return ICmpPredicates(); }
1444
1445 /// Exchange the two operands to this instruction in such a way that it does
1446 /// not modify the semantics of the instruction. The predicate value may be
1447 /// changed to retain the same result if the predicate is order dependent
1448 /// (e.g. ult).
1449 /// Swap operands and adjust predicate.
1451 setPredicate(getSwappedPredicate());
1452 Op<0>().swap(Op<1>());
1453 }
1454
1455 /// Return result of `LHS Pred RHS` comparison.
1456 static bool compare(const APInt &LHS, const APInt &RHS,
1457 ICmpInst::Predicate Pred);
1458
1459 // Methods for support type inquiry through isa, cast, and dyn_cast:
1460 static bool classof(const Instruction *I) {
1461 return I->getOpcode() == Instruction::ICmp;
1462 }
1463 static bool classof(const Value *V) {
1464 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1465 }
1466};
1467
1468//===----------------------------------------------------------------------===//
1469// FCmpInst Class
1470//===----------------------------------------------------------------------===//
1471
1472/// This instruction compares its operands according to the predicate given
1473/// to the constructor. It only operates on floating point values or packed
1474/// vectors of floating point values. The operands must be identical types.
1475/// Represents a floating point comparison operator.
1476class FCmpInst: public CmpInst {
1477 void AssertOK() {
1478 assert(isFPPredicate() && "Invalid FCmp predicate value");
1479 assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1480 "Both operands to FCmp instruction are not of the same type!");
1481 // Check that the operands are the right type
1482 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&
1483 "Invalid operand types for FCmp instruction");
1484 }
1485
1486protected:
1487 // Note: Instruction needs to be a friend here to call cloneImpl.
1488 friend class Instruction;
1489
1490 /// Clone an identical FCmpInst
1491 FCmpInst *cloneImpl() const;
1492
1493public:
1494 /// Constructor with insert-before-instruction semantics.
1496 BasicBlock::iterator InsertBefore, ///< Where to insert
1497 Predicate pred, ///< The predicate to use for the comparison
1498 Value *LHS, ///< The left-hand-side of the expression
1499 Value *RHS, ///< The right-hand-side of the expression
1500 const Twine &NameStr = "" ///< Name of the instruction
1502 Instruction::FCmp, pred, LHS, RHS, NameStr,
1503 InsertBefore) {
1504 AssertOK();
1505 }
1506
1507 /// Constructor with insert-before-instruction semantics.
1509 Instruction *InsertBefore, ///< Where to insert
1510 Predicate pred, ///< The predicate to use for the comparison
1511 Value *LHS, ///< The left-hand-side of the expression
1512 Value *RHS, ///< The right-hand-side of the expression
1513 const Twine &NameStr = "" ///< Name of the instruction
1515 Instruction::FCmp, pred, LHS, RHS, NameStr,
1516 InsertBefore) {
1517 AssertOK();
1518 }
1519
1520 /// Constructor with insert-at-end semantics.
1522 BasicBlock *InsertAtEnd, ///< Block to insert into.
1523 Predicate pred, ///< The predicate to use for the comparison
1524 Value *LHS, ///< The left-hand-side of the expression
1525 Value *RHS, ///< The right-hand-side of the expression
1526 const Twine &NameStr = "" ///< Name of the instruction
1528 Instruction::FCmp, pred, LHS, RHS, NameStr,
1529 InsertAtEnd) {
1530 AssertOK();
1531 }
1532
1533 /// Constructor with no-insertion semantics
1535 Predicate Pred, ///< The predicate to use for the comparison
1536 Value *LHS, ///< The left-hand-side of the expression
1537 Value *RHS, ///< The right-hand-side of the expression
1538 const Twine &NameStr = "", ///< Name of the instruction
1539 Instruction *FlagsSource = nullptr
1540 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1541 RHS, NameStr, nullptr, FlagsSource) {
1542 AssertOK();
1543 }
1544
1545 /// @returns true if the predicate of this instruction is EQ or NE.
1546 /// Determine if this is an equality predicate.
1547 static bool isEquality(Predicate Pred) {
1548 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1549 Pred == FCMP_UNE;
1550 }
1551
1552 /// @returns true if the predicate of this instruction is EQ or NE.
1553 /// Determine if this is an equality predicate.
1554 bool isEquality() const { return isEquality(getPredicate()); }
1555
1556 /// @returns true if the predicate of this instruction is commutative.
1557 /// Determine if this is a commutative predicate.
1558 bool isCommutative() const {
1559 return isEquality() ||
1560 getPredicate() == FCMP_FALSE ||
1561 getPredicate() == FCMP_TRUE ||
1562 getPredicate() == FCMP_ORD ||
1564 }
1565
1566 /// @returns true if the predicate is relational (not EQ or NE).
1567 /// Determine if this a relational predicate.
1568 bool isRelational() const { return !isEquality(); }
1569
1570 /// Exchange the two operands to this instruction in such a way that it does
1571 /// not modify the semantics of the instruction. The predicate value may be
1572 /// changed to retain the same result if the predicate is order dependent
1573 /// (e.g. ult).
1574 /// Swap operands and adjust predicate.
1577 Op<0>().swap(Op<1>());
1578 }
1579
1580 /// Returns the sequence of all FCmp predicates.
1581 ///
1582 static auto predicates() { return FCmpPredicates(); }
1583
1584 /// Return result of `LHS Pred RHS` comparison.
1585 static bool compare(const APFloat &LHS, const APFloat &RHS,
1586 FCmpInst::Predicate Pred);
1587
1588 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1589 static bool classof(const Instruction *I) {
1590 return I->getOpcode() == Instruction::FCmp;
1591 }
1592 static bool classof(const Value *V) {
1593 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1594 }
1595};
1596
1597//===----------------------------------------------------------------------===//
1598/// This class represents a function call, abstracting a target
1599/// machine's calling convention. This class uses low bit of the SubClassData
1600/// field to indicate whether or not this is a tail call. The rest of the bits
1601/// hold the calling convention of the call.
1602///
1603class CallInst : public CallBase {
1604 CallInst(const CallInst &CI);
1605
1606 /// Construct a CallInst from a range of arguments
1607 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1608 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1609 BasicBlock::iterator InsertBefore);
1610
1611 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1612 const Twine &NameStr, BasicBlock::iterator InsertBefore)
1613 : CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore) {}
1614
1615 /// Construct a CallInst given a range of arguments.
1616 /// Construct a CallInst from a range of arguments
1617 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1618 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1619 Instruction *InsertBefore);
1620
1621 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1622 const Twine &NameStr, Instruction *InsertBefore)
1623 : CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore) {}
1624
1625 /// Construct a CallInst given a range of arguments.
1626 /// Construct a CallInst from a range of arguments
1627 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1628 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1629 BasicBlock *InsertAtEnd);
1630
1631 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1632 BasicBlock::iterator InsertBefore);
1633
1634 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1635 Instruction *InsertBefore);
1636
1637 CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1638 BasicBlock *InsertAtEnd);
1639
1640 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1641 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1642 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1643
1644 /// Compute the number of operands to allocate.
1645 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1646 // We need one operand for the called function, plus the input operand
1647 // counts provided.
1648 return 1 + NumArgs + NumBundleInputs;
1649 }
1650
1651protected:
1652 // Note: Instruction needs to be a friend here to call cloneImpl.
1653 friend class Instruction;
1654
1655 CallInst *cloneImpl() const;
1656
1657public:
1658 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1659 BasicBlock::iterator InsertBefore) {
1660 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1661 }
1662
1663 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1664 Instruction *InsertBefore = nullptr) {
1665 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1666 }
1667
1669 const Twine &NameStr,
1670 BasicBlock::iterator InsertBefore) {
1671 return new (ComputeNumOperands(Args.size()))
1672 CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore);
1673 }
1674
1676 const Twine &NameStr,
1677 Instruction *InsertBefore = nullptr) {
1678 return new (ComputeNumOperands(Args.size()))
1679 CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore);
1680 }
1681
1684 const Twine &NameStr,
1685 BasicBlock::iterator InsertBefore) {
1686 const int NumOperands =
1687 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1688 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1689
1690 return new (NumOperands, DescriptorBytes)
1691 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1692 }
1693
1695 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
1696 const Twine &NameStr = "",
1697 Instruction *InsertBefore = nullptr) {
1698 const int NumOperands =
1699 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1700 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1701
1702 return new (NumOperands, DescriptorBytes)
1703 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1704 }
1705
1706 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1707 BasicBlock *InsertAtEnd) {
1708 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1709 }
1710
1712 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1713 return new (ComputeNumOperands(Args.size()))
1714 CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertAtEnd);
1715 }
1716
1719 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1720 const int NumOperands =
1721 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1722 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1723
1724 return new (NumOperands, DescriptorBytes)
1725 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1726 }
1727
1728 static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1729 BasicBlock::iterator InsertBefore) {
1730 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1731 InsertBefore);
1732 }
1733
1734 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1735 Instruction *InsertBefore = nullptr) {
1736 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1737 InsertBefore);
1738 }
1739
1742 const Twine &NameStr,
1743 BasicBlock::iterator InsertBefore) {
1744 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1745 NameStr, InsertBefore);
1746 }
1747
1749 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
1750 const Twine &NameStr = "",
1751 Instruction *InsertBefore = nullptr) {
1752 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1753 NameStr, InsertBefore);
1754 }
1755
1757 const Twine &NameStr,
1758 BasicBlock::iterator InsertBefore) {
1759 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1760 InsertBefore);
1761 }
1762
1764 const Twine &NameStr,
1765 Instruction *InsertBefore = nullptr) {
1766 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1767 InsertBefore);
1768 }
1769
1770 static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1771 BasicBlock *InsertAtEnd) {
1772 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1773 InsertAtEnd);
1774 }
1775
1777 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1778 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1779 InsertAtEnd);
1780 }
1781
1784 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1785 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1786 NameStr, InsertAtEnd);
1787 }
1788
1789 /// Create a clone of \p CI with a different set of operand bundles and
1790 /// insert it before \p InsertPt.
1791 ///
1792 /// The returned call instruction is identical \p CI in every way except that
1793 /// the operand bundles for the new instruction are set to the operand bundles
1794 /// in \p Bundles.
1796 BasicBlock::iterator InsertPt);
1798 Instruction *InsertPt = nullptr);
1799
1800 // Note that 'musttail' implies 'tail'.
1801 enum TailCallKind : unsigned {
1808
1810 static_assert(
1811 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1812 "Bitfields must be contiguous");
1813
1815 return getSubclassData<TailCallKindField>();
1816 }
1817
1818 bool isTailCall() const {
1820 return Kind == TCK_Tail || Kind == TCK_MustTail;
1821 }
1822
1823 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1824
1825 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1826
1828 setSubclassData<TailCallKindField>(TCK);
1829 }
1830
1831 void setTailCall(bool IsTc = true) {
1833 }
1834
1835 /// Return true if the call can return twice
1836 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1837 void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); }
1838
1839 // Methods for support type inquiry through isa, cast, and dyn_cast:
1840 static bool classof(const Instruction *I) {
1841 return I->getOpcode() == Instruction::Call;
1842 }
1843 static bool classof(const Value *V) {
1844 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1845 }
1846
1847 /// Updates profile metadata by scaling it by \p S / \p T.
1849
1850private:
1851 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1852 // method so that subclasses cannot accidentally use it.
1853 template <typename Bitfield>
1854 void setSubclassData(typename Bitfield::Type Value) {
1855 Instruction::setSubclassData<Bitfield>(Value);
1856 }
1857};
1858
1859CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1860 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1861 BasicBlock *InsertAtEnd)
1862 : CallBase(Ty->getReturnType(), Instruction::Call,
1863 OperandTraits<CallBase>::op_end(this) -
1864 (Args.size() + CountBundleInputs(Bundles) + 1),
1865 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1866 InsertAtEnd) {
1867 init(Ty, Func, Args, Bundles, NameStr);
1868}
1869
1870CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1871 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1872 BasicBlock::iterator InsertBefore)
1873 : CallBase(Ty->getReturnType(), Instruction::Call,
1874 OperandTraits<CallBase>::op_end(this) -
1875 (Args.size() + CountBundleInputs(Bundles) + 1),
1876 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1877 InsertBefore) {
1878 init(Ty, Func, Args, Bundles, NameStr);
1879}
1880
1881CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1882 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1883 Instruction *InsertBefore)
1884 : CallBase(Ty->getReturnType(), Instruction::Call,
1885 OperandTraits<CallBase>::op_end(this) -
1886 (Args.size() + CountBundleInputs(Bundles) + 1),
1887 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1888 InsertBefore) {
1889 init(Ty, Func, Args, Bundles, NameStr);
1890}
1891
1892//===----------------------------------------------------------------------===//
1893// SelectInst Class
1894//===----------------------------------------------------------------------===//
1895
1896/// This class represents the LLVM 'select' instruction.
1897///
1898class SelectInst : public Instruction {
1899 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1900 BasicBlock::iterator InsertBefore)
1901 : Instruction(S1->getType(), Instruction::Select, &Op<0>(), 3,
1902 InsertBefore) {
1903 init(C, S1, S2);
1904 setName(NameStr);
1905 }
1906
1907 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1908 Instruction *InsertBefore)
1909 : Instruction(S1->getType(), Instruction::Select,
1910 &Op<0>(), 3, InsertBefore) {
1911 init(C, S1, S2);
1912 setName(NameStr);
1913 }
1914
1915 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1916 BasicBlock *InsertAtEnd)
1917 : Instruction(S1->getType(), Instruction::Select,
1918 &Op<0>(), 3, InsertAtEnd) {
1919 init(C, S1, S2);
1920 setName(NameStr);
1921 }
1922
1923 void init(Value *C, Value *S1, Value *S2) {
1924 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select");
1925 Op<0>() = C;
1926 Op<1>() = S1;
1927 Op<2>() = S2;
1928 }
1929
1930protected:
1931 // Note: Instruction needs to be a friend here to call cloneImpl.
1932 friend class Instruction;
1933
1934 SelectInst *cloneImpl() const;
1935
1936public:
1938 const Twine &NameStr,
1939 BasicBlock::iterator InsertBefore,
1940 Instruction *MDFrom = nullptr) {
1941 SelectInst *Sel = new (3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1942 if (MDFrom)
1943 Sel->copyMetadata(*MDFrom);
1944 return Sel;
1945 }
1946
1948 const Twine &NameStr = "",
1949 Instruction *InsertBefore = nullptr,
1950 Instruction *MDFrom = nullptr) {
1951 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1952 if (MDFrom)
1953 Sel->copyMetadata(*MDFrom);
1954 return Sel;
1955 }
1956
1958 const Twine &NameStr,
1959 BasicBlock *InsertAtEnd) {
1960 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1961 }
1962
1963 const Value *getCondition() const { return Op<0>(); }
1964 const Value *getTrueValue() const { return Op<1>(); }
1965 const Value *getFalseValue() const { return Op<2>(); }
1966 Value *getCondition() { return Op<0>(); }
1967 Value *getTrueValue() { return Op<1>(); }
1968 Value *getFalseValue() { return Op<2>(); }
1969
1970 void setCondition(Value *V) { Op<0>() = V; }
1971 void setTrueValue(Value *V) { Op<1>() = V; }
1972 void setFalseValue(Value *V) { Op<2>() = V; }
1973
1974 /// Swap the true and false values of the select instruction.
1975 /// This doesn't swap prof metadata.
1976 void swapValues() { Op<1>().swap(Op<2>()); }
1977
1978 /// Return a string if the specified operands are invalid
1979 /// for a select operation, otherwise return null.
1980 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1981
1982 /// Transparently provide more efficient getOperand methods.
1984
1986 return static_cast<OtherOps>(Instruction::getOpcode());
1987 }
1988
1989 // Methods for support type inquiry through isa, cast, and dyn_cast:
1990 static bool classof(const Instruction *I) {
1991 return I->getOpcode() == Instruction::Select;
1992 }
1993 static bool classof(const Value *V) {
1994 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1995 }
1996};
1997
1998template <>
1999struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
2000};
2001
2003
2004//===----------------------------------------------------------------------===//
2005// VAArgInst Class
2006//===----------------------------------------------------------------------===//
2007
2008/// This class represents the va_arg llvm instruction, which returns
2009/// an argument of the specified type given a va_list and increments that list
2010///
2012protected:
2013 // Note: Instruction needs to be a friend here to call cloneImpl.
2014 friend class Instruction;
2015
2016 VAArgInst *cloneImpl() const;
2017
2018public:
2019 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
2020 BasicBlock::iterator InsertBefore)
2021 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
2022 setName(NameStr);
2023 }
2024
2025 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
2026 Instruction *InsertBefore = nullptr)
2027 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
2028 setName(NameStr);
2029 }
2030
2031 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
2032 BasicBlock *InsertAtEnd)
2033 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
2034 setName(NameStr);
2035 }
2036
2037 Value *getPointerOperand() { return getOperand(0); }
2038 const Value *getPointerOperand() const { return getOperand(0); }
2039 static unsigned getPointerOperandIndex() { return 0U; }
2040
2041 // Methods for support type inquiry through isa, cast, and dyn_cast:
2042 static bool classof(const Instruction *I) {
2043 return I->getOpcode() == VAArg;
2044 }
2045 static bool classof(const Value *V) {
2046 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2047 }
2048};
2049
2050//===----------------------------------------------------------------------===//
2051// ExtractElementInst Class
2052//===----------------------------------------------------------------------===//
2053
2054/// This instruction extracts a single (scalar)
2055/// element from a VectorType value
2056///
2058 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
2059 BasicBlock::iterator InsertBefore);
2060 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
2061 Instruction *InsertBefore = nullptr);
2062 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
2063 BasicBlock *InsertAtEnd);
2064
2065protected:
2066 // Note: Instruction needs to be a friend here to call cloneImpl.
2067 friend class Instruction;
2068
2070
2071public:
2073 const Twine &NameStr,
2074 BasicBlock::iterator InsertBefore) {
2075 return new (2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
2076 }
2077
2079 const Twine &NameStr = "",
2080 Instruction *InsertBefore = nullptr) {
2081 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
2082 }
2083
2085 const Twine &NameStr,
2086 BasicBlock *InsertAtEnd) {
2087 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
2088 }
2089
2090 /// Return true if an extractelement instruction can be
2091 /// formed with the specified operands.
2092 static bool isValidOperands(const Value *Vec, const Value *Idx);
2093
2095 Value *getIndexOperand() { return Op<1>(); }
2096 const Value *getVectorOperand() const { return Op<0>(); }
2097 const Value *getIndexOperand() const { return Op<1>(); }
2098
2100 return cast<VectorType>(getVectorOperand()->getType());
2101 }
2102
2103 /// Transparently provide more efficient getOperand methods.
2105
2106 // Methods for support type inquiry through isa, cast, and dyn_cast:
2107 static bool classof(const Instruction *I) {
2108 return I->getOpcode() == Instruction::ExtractElement;
2109 }
2110 static bool classof(const Value *V) {
2111 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2112 }
2113};
2114
2115template <>
2117 public FixedNumOperandTraits<ExtractElementInst, 2> {
2118};
2119
2121
2122//===----------------------------------------------------------------------===//
2123// InsertElementInst Class
2124//===----------------------------------------------------------------------===//
2125
2126/// This instruction inserts a single (scalar)
2127/// element into a VectorType value
2128///
2130 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
2131 BasicBlock::iterator InsertBefore);
2132 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
2133 const Twine &NameStr = "",
2134 Instruction *InsertBefore = nullptr);
2135 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
2136 BasicBlock *InsertAtEnd);
2137
2138protected:
2139 // Note: Instruction needs to be a friend here to call cloneImpl.
2140 friend class Instruction;
2141
2142 InsertElementInst *cloneImpl() const;
2143
2144public:
2145 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
2146 const Twine &NameStr,
2147 BasicBlock::iterator InsertBefore) {
2148 return new (3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
2149 }
2150
2151 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
2152 const Twine &NameStr = "",
2153 Instruction *InsertBefore = nullptr) {
2154 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
2155 }
2156
2157 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
2158 const Twine &NameStr,
2159 BasicBlock *InsertAtEnd) {
2160 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
2161 }
2162
2163 /// Return true if an insertelement instruction can be
2164 /// formed with the specified operands.
2165 static bool isValidOperands(const Value *Vec, const Value *NewElt,
2166 const Value *Idx);
2167
2168 /// Overload to return most specific vector type.
2169 ///
2171 return cast<VectorType>(Instruction::getType());
2172 }
2173
2174 /// Transparently provide more efficient getOperand methods.
2176
2177 // Methods for support type inquiry through isa, cast, and dyn_cast:
2178 static bool classof(const Instruction *I) {
2179 return I->getOpcode() == Instruction::InsertElement;
2180 }
2181 static bool classof(const Value *V) {
2182 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2183 }
2184};
2185
2186template <>
2188 public FixedNumOperandTraits<InsertElementInst, 3> {
2189};
2190
2192
2193//===----------------------------------------------------------------------===//
2194// ShuffleVectorInst Class
2195//===----------------------------------------------------------------------===//
2196
2197constexpr int PoisonMaskElem = -1;
2198
2199/// This instruction constructs a fixed permutation of two
2200/// input vectors.
2201///
2202/// For each element of the result vector, the shuffle mask selects an element
2203/// from one of the input vectors to copy to the result. Non-negative elements
2204/// in the mask represent an index into the concatenated pair of input vectors.
2205/// PoisonMaskElem (-1) specifies that the result element is poison.
2206///
2207/// For scalable vectors, all the elements of the mask must be 0 or -1. This
2208/// requirement may be relaxed in the future.
2210 SmallVector<int, 4> ShuffleMask;
2211 Constant *ShuffleMaskForBitcode;
2212
2213protected:
2214 // Note: Instruction needs to be a friend here to call cloneImpl.
2215 friend class Instruction;
2216
2217 ShuffleVectorInst *cloneImpl() const;
2218
2219public:
2220 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr,
2221 BasicBlock::iterator InsertBefore);
2222 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "",
2223 Instruction *InsertBefore = nullptr);
2224 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr,
2225 BasicBlock *InsertAtEnd);
2226 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr,
2227 BasicBlock::iterator InsertBefore);
2228 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "",
2229 Instruction *InsertBefore = nullptr);
2230 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr,
2231 BasicBlock *InsertAtEnd);
2232 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, const Twine &NameStr,
2233 BasicBlock::iterator InsertBefor);
2234 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2235 const Twine &NameStr = "",
2236 Instruction *InsertBefor = nullptr);
2237 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2238 const Twine &NameStr, BasicBlock *InsertAtEnd);
2240 const Twine &NameStr, BasicBlock::iterator InsertBefor);
2242 const Twine &NameStr = "",
2243 Instruction *InsertBefor = nullptr);
2245 const Twine &NameStr, BasicBlock *InsertAtEnd);
2246
2247 void *operator new(size_t S) { return User::operator new(S, 2); }
2248 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
2249
2250 /// Swap the operands and adjust the mask to preserve the semantics
2251 /// of the instruction.
2252 void commute();
2253
2254 /// Return true if a shufflevector instruction can be
2255 /// formed with the specified operands.
2256 static bool isValidOperands(const Value *V1, const Value *V2,
2257 const Value *Mask);
2258 static bool isValidOperands(const Value *V1, const Value *V2,
2259 ArrayRef<int> Mask);
2260
2261 /// Overload to return most specific vector type.
2262 ///
2264 return cast<VectorType>(Instruction::getType());
2265 }
2266
2267 /// Transparently provide more efficient getOperand methods.
2269
2270 /// Return the shuffle mask value of this instruction for the given element
2271 /// index. Return PoisonMaskElem if the element is undef.
2272 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2273
2274 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2275 /// elements of the mask are returned as PoisonMaskElem.
2276 static void getShuffleMask(const Constant *Mask,
2277 SmallVectorImpl<int> &Result);
2278
2279 /// Return the mask for this instruction as a vector of integers. Undefined
2280 /// elements of the mask are returned as PoisonMaskElem.
2282 Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2283 }
2284
2285 /// Return the mask for this instruction, for use in bitcode.
2286 ///
2287 /// TODO: This is temporary until we decide a new bitcode encoding for
2288 /// shufflevector.
2289 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2290
2291 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2292 Type *ResultTy);
2293
2294 void setShuffleMask(ArrayRef<int> Mask);
2295
2296 ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2297
2298 /// Return true if this shuffle returns a vector with a different number of
2299 /// elements than its source vectors.
2300 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2301 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2302 bool changesLength() const {
2303 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2304 ->getElementCount()
2305 .getKnownMinValue();
2306 unsigned NumMaskElts = ShuffleMask.size();
2307 return NumSourceElts != NumMaskElts;
2308 }
2309
2310 /// Return true if this shuffle returns a vector with a greater number of
2311 /// elements than its source vectors.
2312 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2313 bool increasesLength() const {
2314 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2315 ->getElementCount()
2316 .getKnownMinValue();
2317 unsigned NumMaskElts = ShuffleMask.size();
2318 return NumSourceElts < NumMaskElts;
2319 }
2320
2321 /// Return true if this shuffle mask chooses elements from exactly one source
2322 /// vector.
2323 /// Example: <7,5,undef,7>
2324 /// This assumes that vector operands (of length \p NumSrcElts) are the same
2325 /// length as the mask.
2326 static bool isSingleSourceMask(ArrayRef<int> Mask, int NumSrcElts);
2327 static bool isSingleSourceMask(const Constant *Mask, int NumSrcElts) {
2328 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2329 SmallVector<int, 16> MaskAsInts;
2330 getShuffleMask(Mask, MaskAsInts);
2331 return isSingleSourceMask(MaskAsInts, NumSrcElts);
2332 }
2333
2334 /// Return true if this shuffle chooses elements from exactly one source
2335 /// vector without changing the length of that vector.
2336 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2337 /// TODO: Optionally allow length-changing shuffles.
2338 bool isSingleSource() const {
2339 return !changesLength() &&
2340 isSingleSourceMask(ShuffleMask, ShuffleMask.size());
2341 }
2342
2343 /// Return true if this shuffle mask chooses elements from exactly one source
2344 /// vector without lane crossings. A shuffle using this mask is not
2345 /// necessarily a no-op because it may change the number of elements from its
2346 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2347 /// Example: <undef,undef,2,3>
2348 static bool isIdentityMask(ArrayRef<int> Mask, int NumSrcElts);
2349 static bool isIdentityMask(const Constant *Mask, int NumSrcElts) {
2350 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2351
2352 // Not possible to express a shuffle mask for a scalable vector for this
2353 // case.
2354 if (isa<ScalableVectorType>(Mask->getType()))
2355 return false;
2356
2357 SmallVector<int, 16> MaskAsInts;
2358 getShuffleMask(Mask, MaskAsInts);
2359 return isIdentityMask(MaskAsInts, NumSrcElts);
2360 }
2361
2362 /// Return true if this shuffle chooses elements from exactly one source
2363 /// vector without lane crossings and does not change the number of elements
2364 /// from its input vectors.
2365 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2366 bool isIdentity() const {
2367 // Not possible to express a shuffle mask for a scalable vector for this
2368 // case.
2369 if (isa<ScalableVectorType>(getType()))
2370 return false;
2371
2372 return !changesLength() && isIdentityMask(ShuffleMask, ShuffleMask.size());
2373 }
2374
2375 /// Return true if this shuffle lengthens exactly one source vector with
2376 /// undefs in the high elements.
2377 bool isIdentityWithPadding() const;
2378
2379 /// Return true if this shuffle extracts the first N elements of exactly one
2380 /// source vector.
2381 bool isIdentityWithExtract() const;
2382
2383 /// Return true if this shuffle concatenates its 2 source vectors. This
2384 /// returns false if either input is undefined. In that case, the shuffle is
2385 /// is better classified as an identity with padding operation.
2386 bool isConcat() const;
2387
2388 /// Return true if this shuffle mask chooses elements from its source vectors
2389 /// without lane crossings. A shuffle using this mask would be
2390 /// equivalent to a vector select with a constant condition operand.
2391 /// Example: <4,1,6,undef>
2392 /// This returns false if the mask does not choose from both input vectors.
2393 /// In that case, the shuffle is better classified as an identity shuffle.
2394 /// This assumes that vector operands are the same length as the mask
2395 /// (a length-changing shuffle can never be equivalent to a vector select).
2396 static bool isSelectMask(ArrayRef<int> Mask, int NumSrcElts);
2397 static bool isSelectMask(const Constant *Mask, int NumSrcElts) {
2398 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2399 SmallVector<int, 16> MaskAsInts;
2400 getShuffleMask(Mask, MaskAsInts);
2401 return isSelectMask(MaskAsInts, NumSrcElts);
2402 }
2403
2404 /// Return true if this shuffle chooses elements from its source vectors
2405 /// without lane crossings and all operands have the same number of elements.
2406 /// In other words, this shuffle is equivalent to a vector select with a
2407 /// constant condition operand.
2408 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2409 /// This returns false if the mask does not choose from both input vectors.
2410 /// In that case, the shuffle is better classified as an identity shuffle.
2411 /// TODO: Optionally allow length-changing shuffles.
2412 bool isSelect() const {
2413 return !changesLength() && isSelectMask(ShuffleMask, ShuffleMask.size());
2414 }
2415
2416 /// Return true if this shuffle mask swaps the order of elements from exactly
2417 /// one source vector.
2418 /// Example: <7,6,undef,4>
2419 /// This assumes that vector operands (of length \p NumSrcElts) are the same
2420 /// length as the mask.
2421 static bool isReverseMask(ArrayRef<int> Mask, int NumSrcElts);
2422 static bool isReverseMask(const Constant *Mask, int NumSrcElts) {
2423 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2424 SmallVector<int, 16> MaskAsInts;
2425 getShuffleMask(Mask, MaskAsInts);
2426 return isReverseMask(MaskAsInts, NumSrcElts);
2427 }
2428
2429 /// Return true if this shuffle swaps the order of elements from exactly
2430 /// one source vector.
2431 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2432 /// TODO: Optionally allow length-changing shuffles.
2433 bool isReverse() const {
2434 return !changesLength() && isReverseMask(ShuffleMask, ShuffleMask.size());
2435 }
2436
2437 /// Return true if this shuffle mask chooses all elements with the same value
2438 /// as the first element of exactly one source vector.
2439 /// Example: <4,undef,undef,4>
2440 /// This assumes that vector operands (of length \p NumSrcElts) are the same
2441 /// length as the mask.
2442 static bool isZeroEltSplatMask(ArrayRef<int> Mask, int NumSrcElts);
2443 static bool isZeroEltSplatMask(const Constant *Mask, int NumSrcElts) {
2444 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2445 SmallVector<int, 16> MaskAsInts;
2446 getShuffleMask(Mask, MaskAsInts);
2447 return isZeroEltSplatMask(MaskAsInts, NumSrcElts);
2448 }
2449
2450 /// Return true if all elements of this shuffle are the same value as the
2451 /// first element of exactly one source vector without changing the length
2452 /// of that vector.
2453 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2454 /// TODO: Optionally allow length-changing shuffles.
2455 /// TODO: Optionally allow splats from other elements.
2456 bool isZeroEltSplat() const {
2457 return !changesLength() &&
2458 isZeroEltSplatMask(ShuffleMask, ShuffleMask.size());
2459 }
2460
2461 /// Return true if this shuffle mask is a transpose mask.
2462 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2463 /// even- or odd-numbered vector elements from two n-dimensional source
2464 /// vectors and write each result into consecutive elements of an
2465 /// n-dimensional destination vector. Two shuffles are necessary to complete
2466 /// the transpose, one for the even elements and another for the odd elements.
2467 /// This description closely follows how the TRN1 and TRN2 AArch64
2468 /// instructions operate.
2469 ///
2470 /// For example, a simple 2x2 matrix can be transposed with:
2471 ///
2472 /// ; Original matrix
2473 /// m0 = < a, b >
2474 /// m1 = < c, d >
2475 ///
2476 /// ; Transposed matrix
2477 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2478 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2479 ///
2480 /// For matrices having greater than n columns, the resulting nx2 transposed
2481 /// matrix is stored in two result vectors such that one vector contains
2482 /// interleaved elements from all the even-numbered rows and the other vector
2483 /// contains interleaved elements from all the odd-numbered rows. For example,
2484 /// a 2x4 matrix can be transposed with:
2485 ///
2486 /// ; Original matrix
2487 /// m0 = < a, b, c, d >
2488 /// m1 = < e, f, g, h >
2489 ///
2490 /// ; Transposed matrix
2491 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2492 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2493 static bool isTransposeMask(ArrayRef<int> Mask, int NumSrcElts);
2494 static bool isTransposeMask(const Constant *Mask, int NumSrcElts) {
2495 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2496 SmallVector<int, 16> MaskAsInts;
2497 getShuffleMask(Mask, MaskAsInts);
2498 return isTransposeMask(MaskAsInts, NumSrcElts);
2499 }
2500
2501 /// Return true if this shuffle transposes the elements of its inputs without
2502 /// changing the length of the vectors. This operation may also be known as a
2503 /// merge or interleave. See the description for isTransposeMask() for the
2504 /// exact specification.
2505 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2506 bool isTranspose() const {
2507 return !changesLength() && isTransposeMask(ShuffleMask, ShuffleMask.size());
2508 }
2509
2510 /// Return true if this shuffle mask is a splice mask, concatenating the two
2511 /// inputs together and then extracts an original width vector starting from
2512 /// the splice index.
2513 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2514 /// This assumes that vector operands (of length \p NumSrcElts) are the same
2515 /// length as the mask.
2516 static bool isSpliceMask(ArrayRef<int> Mask, int NumSrcElts, int &Index);
2517 static bool isSpliceMask(const Constant *Mask, int NumSrcElts, int &Index) {
2518 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2519 SmallVector<int, 16> MaskAsInts;
2520 getShuffleMask(Mask, MaskAsInts);
2521 return isSpliceMask(MaskAsInts, NumSrcElts, Index);
2522 }
2523
2524 /// Return true if this shuffle splices two inputs without changing the length
2525 /// of the vectors. This operation concatenates the two inputs together and
2526 /// then extracts an original width vector starting from the splice index.
2527 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2528 bool isSplice(int &Index) const {
2529 return !changesLength() &&
2530 isSpliceMask(ShuffleMask, ShuffleMask.size(), Index);
2531 }
2532
2533 /// Return true if this shuffle mask is an extract subvector mask.
2534 /// A valid extract subvector mask returns a smaller vector from a single
2535 /// source operand. The base extraction index is returned as well.
2536 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2537 int &Index);
2538 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2539 int &Index) {
2540 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2541 // Not possible to express a shuffle mask for a scalable vector for this
2542 // case.
2543 if (isa<ScalableVectorType>(Mask->getType()))
2544 return false;
2545 SmallVector<int, 16> MaskAsInts;
2546 getShuffleMask(Mask, MaskAsInts);
2547 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2548 }
2549
2550 /// Return true if this shuffle mask is an extract subvector mask.
2552 // Not possible to express a shuffle mask for a scalable vector for this
2553 // case.
2554 if (isa<ScalableVectorType>(getType()))
2555 return false;
2556
2557 int NumSrcElts =
2558 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2559 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2560 }
2561
2562 /// Return true if this shuffle mask is an insert subvector mask.
2563 /// A valid insert subvector mask inserts the lowest elements of a second
2564 /// source operand into an in-place first source operand.
2565 /// Both the sub vector width and the insertion index is returned.
2566 static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2567 int &NumSubElts, int &Index);
2568 static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts,
2569 int &NumSubElts, int &Index) {
2570 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2571 // Not possible to express a shuffle mask for a scalable vector for this
2572 // case.
2573 if (isa<ScalableVectorType>(Mask->getType()))
2574 return false;
2575 SmallVector<int, 16> MaskAsInts;
2576 getShuffleMask(Mask, MaskAsInts);
2577 return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index);
2578 }
2579
2580 /// Return true if this shuffle mask is an insert subvector mask.
2581 bool isInsertSubvectorMask(int &NumSubElts, int &Index) const {
2582 // Not possible to express a shuffle mask for a scalable vector for this
2583 // case.
2584 if (isa<ScalableVectorType>(getType()))
2585 return false;
2586
2587 int NumSrcElts =
2588 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2589 return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index);
2590 }
2591
2592 /// Return true if this shuffle mask replicates each of the \p VF elements
2593 /// in a vector \p ReplicationFactor times.
2594 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
2595 /// <0,0,0,1,1,1,2,2,2,3,3,3>
2596 static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor,
2597 int &VF);
2598 static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor,
2599 int &VF) {
2600 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2601 // Not possible to express a shuffle mask for a scalable vector for this
2602 // case.
2603 if (isa<ScalableVectorType>(Mask->getType()))
2604 return false;
2605 SmallVector<int, 16> MaskAsInts;
2606 getShuffleMask(Mask, MaskAsInts);
2607 return isReplicationMask(MaskAsInts, ReplicationFactor, VF);
2608 }
2609
2610 /// Return true if this shuffle mask is a replication mask.
2611 bool isReplicationMask(int &ReplicationFactor, int &VF) const;
2612
2613 /// Return true if this shuffle mask represents "clustered" mask of size VF,
2614 /// i.e. each index between [0..VF) is used exactly once in each submask of
2615 /// size VF.
2616 /// For example, the mask for \p VF=4 is:
2617 /// 0, 1, 2, 3, 3, 2, 0, 1 - "clustered", because each submask of size 4
2618 /// (0,1,2,3 and 3,2,0,1) uses indices [0..VF) exactly one time.
2619 /// 0, 1, 2, 3, 3, 3, 1, 0 - not "clustered", because
2620 /// element 3 is used twice in the second submask
2621 /// (3,3,1,0) and index 2 is not used at all.
2622 static bool isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF);
2623
2624 /// Return true if this shuffle mask is a one-use-single-source("clustered")
2625 /// mask.
2626 bool isOneUseSingleSourceMask(int VF) const;
2627
2628 /// Change values in a shuffle permute mask assuming the two vector operands
2629 /// of length InVecNumElts have swapped position.
2631 unsigned InVecNumElts) {
2632 for (int &Idx : Mask) {
2633 if (Idx == -1)
2634 continue;
2635 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2636 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
2637 "shufflevector mask index out of range");
2638 }
2639 }
2640
2641 /// Return if this shuffle interleaves its two input vectors together.
2642 bool isInterleave(unsigned Factor);
2643
2644 /// Return true if the mask interleaves one or more input vectors together.
2645 ///
2646 /// I.e. <0, LaneLen, ... , LaneLen*(Factor - 1), 1, LaneLen + 1, ...>
2647 /// E.g. For a Factor of 2 (LaneLen=4):
2648 /// <0, 4, 1, 5, 2, 6, 3, 7>
2649 /// E.g. For a Factor of 3 (LaneLen=4):
2650 /// <4, 0, 9, 5, 1, 10, 6, 2, 11, 7, 3, 12>
2651 /// E.g. For a Factor of 4 (LaneLen=2):
2652 /// <0, 2, 6, 4, 1, 3, 7, 5>
2653 ///
2654 /// NumInputElts is the total number of elements in the input vectors.
2655 ///
2656 /// StartIndexes are the first indexes of each vector being interleaved,
2657 /// substituting any indexes that were undef
2658 /// E.g. <4, -1, 2, 5, 1, 3> (Factor=3): StartIndexes=<4, 0, 2>
2659 ///
2660 /// Note that this does not check if the input vectors are consecutive:
2661 /// It will return true for masks such as
2662 /// <0, 4, 6, 1, 5, 7> (Factor=3, LaneLen=2)
2663 static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor,
2664 unsigned NumInputElts,
2665 SmallVectorImpl<unsigned> &StartIndexes);
2666 static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor,
2667 unsigned NumInputElts) {
2668 SmallVector<unsigned, 8> StartIndexes;
2669 return isInterleaveMask(Mask, Factor, NumInputElts, StartIndexes);
2670 }
2671
2672 /// Check if the mask is a DE-interleave mask of the given factor
2673 /// \p Factor like:
2674 /// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>
2675 static bool isDeInterleaveMaskOfFactor(ArrayRef<int> Mask, unsigned Factor,
2676 unsigned &Index);
2677 static bool isDeInterleaveMaskOfFactor(ArrayRef<int> Mask, unsigned Factor) {
2678 unsigned Unused;
2679 return isDeInterleaveMaskOfFactor(Mask, Factor, Unused);
2680 }
2681
2682 /// Checks if the shuffle is a bit rotation of the first operand across
2683 /// multiple subelements, e.g:
2684 ///
2685 /// shuffle <8 x i8> %a, <8 x i8> poison, <8 x i32> <1, 0, 3, 2, 5, 4, 7, 6>
2686 ///
2687 /// could be expressed as
2688 ///
2689 /// rotl <4 x i16> %a, 8
2690 ///
2691 /// If it can be expressed as a rotation, returns the number of subelements to
2692 /// group by in NumSubElts and the number of bits to rotate left in RotateAmt.
2693 static bool isBitRotateMask(ArrayRef<int> Mask, unsigned EltSizeInBits,
2694 unsigned MinSubElts, unsigned MaxSubElts,
2695 unsigned &NumSubElts, unsigned &RotateAmt);
2696
2697 // Methods for support type inquiry through isa, cast, and dyn_cast:
2698 static bool classof(const Instruction *I) {
2699 return I->getOpcode() == Instruction::ShuffleVector;
2700 }
2701 static bool classof(const Value *V) {
2702 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2703 }
2704};
2705
2706template <>
2708 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2709
2711
2712//===----------------------------------------------------------------------===//
2713// ExtractValueInst Class
2714//===----------------------------------------------------------------------===//
2715
2716/// This instruction extracts a struct member or array
2717/// element value from an aggregate value.
2718///
2721
2723
2724 /// Constructors - Create a extractvalue instruction with a base aggregate
2725 /// value and a list of indices. The first and second ctor can optionally
2726 /// insert before an existing instruction, the third appends the new
2727 /// instruction to the specified BasicBlock.
2728 inline ExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
2729 const Twine &NameStr,
2730 BasicBlock::iterator InsertBefore);
2731 inline ExtractValueInst(Value *Agg,
2732 ArrayRef<unsigned> Idxs,
2733 const Twine &NameStr,
2734 Instruction *InsertBefore);
2735 inline ExtractValueInst(Value *Agg,
2736 ArrayRef<unsigned> Idxs,
2737 const Twine &NameStr, BasicBlock *InsertAtEnd);
2738
2739 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2740
2741protected:
2742 // Note: Instruction needs to be a friend here to call cloneImpl.
2743 friend class Instruction;
2744
2745 ExtractValueInst *cloneImpl() const;
2746
2747public:
2749 const Twine &NameStr,
2750 BasicBlock::iterator InsertBefore) {
2751 return new
2752 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2753 }
2754
2756 ArrayRef<unsigned> Idxs,
2757 const Twine &NameStr = "",
2758 Instruction *InsertBefore = nullptr) {
2759 return new
2760 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2761 }
2762
2764 ArrayRef<unsigned> Idxs,
2765 const Twine &NameStr,
2766 BasicBlock *InsertAtEnd) {
2767 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2768 }
2769
2770 /// Returns the type of the element that would be extracted
2771 /// with an extractvalue instruction with the specified parameters.
2772 ///
2773 /// Null is returned if the indices are invalid for the specified type.
2774 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2775
2776 using idx_iterator = const unsigned*;
2777
2778 inline idx_iterator idx_begin() const { return Indices.begin(); }
2779 inline idx_iterator idx_end() const { return Indices.end(); }
2781 return make_range(idx_begin(), idx_end());
2782 }
2783
2785 return getOperand(0);
2786 }
2788 return getOperand(0);
2789 }
2790 static unsigned getAggregateOperandIndex() {
2791 return 0U; // get index for modifying correct operand
2792 }
2793
2795 return Indices;
2796 }
2797
2798 unsigned getNumIndices() const {
2799 return (unsigned)Indices.size();
2800 }
2801
2802 bool hasIndices() const {
2803 return true;
2804 }
2805
2806 // Methods for support type inquiry through isa, cast, and dyn_cast:
2807 static bool classof(const Instruction *I) {
2808 return I->getOpcode() == Instruction::ExtractValue;
2809 }
2810 static bool classof(const Value *V) {
2811 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2812 }
2813};
2814
2815ExtractValueInst::ExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
2816 const Twine &NameStr,
2817 BasicBlock::iterator InsertBefore)
2818 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2819 ExtractValue, Agg, InsertBefore) {
2820 init(Idxs, NameStr);
2821}
2822
2823ExtractValueInst::ExtractValueInst(Value *Agg,
2824 ArrayRef<unsigned> Idxs,
2825 const Twine &NameStr,
2826 Instruction *InsertBefore)
2827 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2828 ExtractValue, Agg, InsertBefore) {
2829 init(Idxs, NameStr);
2830}
2831
2832ExtractValueInst::ExtractValueInst(Value *Agg,
2833 ArrayRef<unsigned> Idxs,
2834 const Twine &NameStr,
2835 BasicBlock *InsertAtEnd)
2836 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2837 ExtractValue, Agg, InsertAtEnd) {
2838 init(Idxs, NameStr);
2839}
2840
2841//===----------------------------------------------------------------------===//
2842// InsertValueInst Class
2843//===----------------------------------------------------------------------===//
2844
2845/// This instruction inserts a struct field of array element
2846/// value into an aggregate value.
2847///
2850
2851 InsertValueInst(const InsertValueInst &IVI);
2852
2853 /// Constructors - Create a insertvalue instruction with a base aggregate
2854 /// value, a value to insert, and a list of indices. The first and second ctor
2855 /// can optionally insert before an existing instruction, the third appends
2856 /// the new instruction to the specified BasicBlock.
2857 inline InsertValueInst(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2858 const Twine &NameStr,
2859 BasicBlock::iterator InsertBefore);
2860 inline InsertValueInst(Value *Agg, Value *Val,
2861 ArrayRef<unsigned> Idxs,
2862 const Twine &NameStr,
2863 Instruction *InsertBefore);
2864 inline InsertValueInst(Value *Agg, Value *Val,
2865 ArrayRef<unsigned> Idxs,
2866 const Twine &NameStr, BasicBlock *InsertAtEnd);
2867
2868 /// Constructors - These three constructors are convenience methods because
2869 /// one and two index insertvalue instructions are so common.
2870 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2871 BasicBlock::iterator InsertBefore);
2872 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2873 const Twine &NameStr = "",
2874 Instruction *InsertBefore = nullptr);
2875 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2876 BasicBlock *InsertAtEnd);
2877
2878 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2879 const Twine &NameStr);
2880
2881protected:
2882 // Note: Instruction needs to be a friend here to call cloneImpl.
2883 friend class Instruction;
2884
2885 InsertValueInst *cloneImpl() const;
2886
2887public:
2888 // allocate space for exactly two operands
2889 void *operator new(size_t S) { return User::operator new(S, 2); }
2890 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2891
2892 static InsertValueInst *Create(Value *Agg, Value *Val,
2893 ArrayRef<unsigned> Idxs, const Twine &NameStr,
2894 BasicBlock::iterator InsertBefore) {
2895 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2896 }
2897
2898 static InsertValueInst *Create(Value *Agg, Value *Val,
2899 ArrayRef<unsigned> Idxs,
2900 const Twine &NameStr = "",
2901 Instruction *InsertBefore = nullptr) {
2902 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2903 }
2904
2905 static InsertValueInst *Create(Value *Agg, Value *Val,
2906 ArrayRef<unsigned> Idxs,
2907 const Twine &NameStr,
2908 BasicBlock *InsertAtEnd) {
2909 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2910 }
2911
2912 /// Transparently provide more efficient getOperand methods.
2914
2915 using idx_iterator = const unsigned*;
2916
2917 inline idx_iterator idx_begin() const { return Indices.begin(); }
2918 inline idx_iterator idx_end() const { return Indices.end(); }
2920 return make_range(idx_begin(), idx_end());
2921 }
2922
2924 return getOperand(0);
2925 }
2927 return getOperand(0);
2928 }
2929 static unsigned getAggregateOperandIndex() {
2930 return 0U; // get index for modifying correct operand
2931 }
2932
2934 return getOperand(1);
2935 }
2937 return getOperand(1);
2938 }
2940 return 1U; // get index for modifying correct operand
2941 }
2942
2944 return Indices;
2945 }
2946
2947 unsigned getNumIndices() const {
2948 return (unsigned)Indices.size();
2949 }
2950
2951 bool hasIndices() const {
2952 return true;
2953 }
2954
2955 // Methods for support type inquiry through isa, cast, and dyn_cast:
2956 static bool classof(const Instruction *I) {
2957 return I->getOpcode() == Instruction::InsertValue;
2958 }
2959 static bool classof(const Value *V) {
2960 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2961 }
2962};
2963
2964template <>
2966 public FixedNumOperandTraits<InsertValueInst, 2> {
2967};
2968
2969InsertValueInst::InsertValueInst(Value *Agg,
2970 Value *Val,
2971 ArrayRef<unsigned> Idxs,
2972 const Twine &NameStr,
2973 BasicBlock::iterator InsertBefore)
2974 : Instruction(Agg->getType(), InsertValue, OperandTraits<InsertValueInst>::op_begin(this),
2975 2, InsertBefore) {
2976 init(Agg, Val, Idxs, NameStr);
2977}
2978
2979InsertValueInst::InsertValueInst(Value *Agg,
2980 Value *Val,
2981 ArrayRef<unsigned> Idxs,
2982 const Twine &NameStr,
2983 Instruction *InsertBefore)
2984 : Instruction(Agg->getType(), InsertValue,
2985 OperandTraits<InsertValueInst>::op_begin(this),
2986 2, InsertBefore) {
2987 init(Agg, Val, Idxs, NameStr);
2988}
2989
2990InsertValueInst::InsertValueInst(Value *Agg,
2991 Value *Val,
2992 ArrayRef<unsigned> Idxs,
2993 const Twine &NameStr,
2994 BasicBlock *InsertAtEnd)
2995 : Instruction(Agg->getType(), InsertValue,
2996 OperandTraits<InsertValueInst>::op_begin(this),
2997 2, InsertAtEnd) {
2998 init(Agg, Val, Idxs, NameStr);
2999}
3000
3001DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)
3002
3003//===----------------------------------------------------------------------===//
3004// PHINode Class
3005//===----------------------------------------------------------------------===//
3006
3007// PHINode - The PHINode class is used to represent the magical mystical PHI
3008// node, that can not exist in nature, but can be synthesized in a computer
3009// scientist's overactive imagination.
3010//
3011class PHINode : public Instruction {
3012 /// The number of operands actually allocated. NumOperands is
3013 /// the number actually in use.
3014 unsigned ReservedSpace;
3015
3016 PHINode(const PHINode &PN);
3017
3018 explicit PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
3019 BasicBlock::iterator InsertBefore)
3020 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
3021 ReservedSpace(NumReservedValues) {
3022 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
3023 setName(NameStr);
3024 allocHungoffUses(ReservedSpace);
3025 }
3026
3027 explicit PHINode(Type *Ty, unsigned NumReservedValues,
3028 const Twine &NameStr = "",
3029 Instruction *InsertBefore = nullptr)
3030 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
3031 ReservedSpace(NumReservedValues) {
3032 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
3033 setName(NameStr);
3034 allocHungoffUses(ReservedSpace);
3035 }
3036
3037 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
3038 BasicBlock *InsertAtEnd)
3039 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
3040 ReservedSpace(NumReservedValues) {
3041 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
3042 setName(NameStr);
3043 allocHungoffUses(ReservedSpace);
3044 }
3045
3046protected:
3047 // Note: Instruction needs to be a friend here to call cloneImpl.
3048 friend class Instruction;
3049
3050 PHINode *cloneImpl() const;
3051
3052 // allocHungoffUses - this is more complicated than the generic
3053 // User::allocHungoffUses, because we have to allocate Uses for the incoming
3054 // values and pointers to the incoming blocks, all in one allocation.
3055 void allocHungoffUses(unsigned N) {
3056 User::allocHungoffUses(N, /* IsPhi */ true);
3057 }
3058
3059public:
3060 /// Constructors - NumReservedValues is a hint for the number of incoming
3061 /// edges that this phi node will have (use 0 if you really have no idea).
3062 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
3063 const Twine &NameStr,
3064 BasicBlock::iterator InsertBefore) {
3065 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
3066 }
3067
3068 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
3069 const Twine &NameStr = "",
3070 Instruction *InsertBefore = nullptr) {
3071 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
3072 }
3073
3074 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
3075 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3076 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
3077 }
3078
3079 /// Provide fast operand accessors
3081
3082 // Block iterator interface. This provides access to the list of incoming
3083 // basic blocks, which parallels the list of incoming values.
3084 // Please note that we are not providing non-const iterators for blocks to
3085 // force all updates go through an interface function.
3086
3089
3091 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
3092 }
3093
3095 return block_begin() + getNumOperands();
3096 }
3097
3099 return make_range(block_begin(), block_end());
3100 }
3101
3102 op_range incoming_values() { return operands(); }
3103
3104 const_op_range incoming_values() const { return operands(); }
3105
3106 /// Return the number of incoming edges
3107 ///
3108 unsigned getNumIncomingValues() const { return getNumOperands(); }
3109
3110 /// Return incoming value number x
3111 ///
3112 Value *getIncomingValue(unsigned i) const {
3113 return getOperand(i);
3114 }
3115 void setIncomingValue(unsigned i, Value *V) {
3116 assert(V && "PHI node got a null value!");
3117 assert(getType() == V->getType() &&
3118 "All operands to PHI node must be the same type as the PHI node!");
3119 setOperand(i, V);
3120 }
3121
3122 static unsigned getOperandNumForIncomingValue(unsigned i) {
3123 return i;
3124 }
3125
3126 static unsigned getIncomingValueNumForOperand(unsigned i) {
3127 return i;
3128 }
3129
3130 /// Return incoming basic block number @p i.
3131 ///
3132 BasicBlock *getIncomingBlock(unsigned i) const {
3133 return block_begin()[i];
3134 }
3135
3136 /// Return incoming basic block corresponding
3137 /// to an operand of the PHI.
3138 ///
3140 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
3141 return getIncomingBlock(unsigned(&U - op_begin()));
3142 }
3143
3144 /// Return incoming basic block corresponding
3145 /// to value use iterator.
3146 ///
3148 return getIncomingBlock(I.getUse());
3149 }
3150
3151 void setIncomingBlock(unsigned i, BasicBlock *BB) {
3152 const_cast<block_iterator>(block_begin())[i] = BB;
3153 }
3154
3155 /// Copies the basic blocks from \p BBRange to the incoming basic block list
3156 /// of this PHINode, starting at \p ToIdx.
3158 uint32_t ToIdx = 0) {
3159 copy(BBRange, const_cast<block_iterator>(block_begin()) + ToIdx);
3160 }
3161
3162 /// Replace every incoming basic block \p Old to basic block \p New.
3164 assert(New && Old && "PHI node got a null basic block!");
3165 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
3166 if (getIncomingBlock(Op) == Old)
3167 setIncomingBlock(Op, New);
3168 }
3169
3170 /// Add an incoming value to the end of the PHI list
3171 ///
3173 if (getNumOperands() == ReservedSpace)
3174 growOperands(); // Get more space!
3175 // Initialize some new operands.
3176 setNumHungOffUseOperands(getNumOperands() + 1);
3177 setIncomingValue(getNumOperands() - 1, V);
3178 setIncomingBlock(getNumOperands() - 1, BB);
3179 }
3180
3181 /// Remove an incoming value. This is useful if a
3182 /// predecessor basic block is deleted. The value removed is returned.
3183 ///
3184 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
3185 /// is true), the PHI node is destroyed and any uses of it are replaced with
3186 /// dummy values. The only time there should be zero incoming values to a PHI
3187 /// node is when the block is dead, so this strategy is sound.
3188 ///
3189 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
3190
3191 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
3192 int Idx = getBasicBlockIndex(BB);
3193 assert(Idx >= 0 && "Invalid basic block argument to remove!");
3194 return removeIncomingValue(Idx, DeletePHIIfEmpty);
3195 }
3196
3197 /// Remove all incoming values for which the predicate returns true.
3198 /// The predicate accepts the incoming value index.
3199 void removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
3200 bool DeletePHIIfEmpty = true);
3201
3202 /// Return the first index of the specified basic
3203 /// block in the value list for this PHI. Returns -1 if no instance.
3204 ///
3205 int getBasicBlockIndex(const BasicBlock *BB) const {
3206 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
3207 if (block_begin()[i] == BB)
3208 return i;
3209 return -1;
3210 }
3211
3213 int Idx = getBasicBlockIndex(BB);
3214 assert(Idx >= 0 && "Invalid basic block argument!");
3215 return getIncomingValue(Idx);
3216 }
3217
3218 /// Set every incoming value(s) for block \p BB to \p V.
3220 assert(BB && "PHI node got a null basic block!");
3221 bool Found = false;
3222 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
3223 if (getIncomingBlock(Op) == BB) {
3224 Found = true;
3225 setIncomingValue(Op, V);
3226 }
3227 (void)Found;
3228 assert(Found && "Invalid basic block argument to set!");
3229 }
3230
3231 /// If the specified PHI node always merges together the
3232 /// same value, return the value, otherwise return null.
3233 Value *hasConstantValue() const;
3234
3235 /// Whether the specified PHI node always merges
3236 /// together the same value, assuming undefs are equal to a unique
3237 /// non-undef value.
3238 bool hasConstantOrUndefValue() const;
3239
3240 /// If the PHI node is complete which means all of its parent's predecessors
3241 /// have incoming value in this PHI, return true, otherwise return false.
3242 bool isComplete() const {
3244 [this](const BasicBlock *Pred) {
3245 return getBasicBlockIndex(Pred) >= 0;
3246 });
3247 }
3248
3249 /// Methods for support type inquiry through isa, cast, and dyn_cast:
3250 static bool classof(const Instruction *I) {
3251 return I->getOpcode() == Instruction::PHI;
3252 }
3253 static bool classof(const Value *V) {
3254 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3255 }
3256
3257private:
3258 void growOperands();
3259};
3260
3261template <>
3263};
3264
3266
3267//===----------------------------------------------------------------------===//
3268// LandingPadInst Class
3269//===----------------------------------------------------------------------===//
3270
3271//===---------------------------------------------------------------------------
3272/// The landingpad instruction holds all of the information
3273/// necessary to generate correct exception handling. The landingpad instruction
3274/// cannot be moved from the top of a landing pad block, which itself is
3275/// accessible only from the 'unwind' edge of an invoke. This uses the
3276/// SubclassData field in Value to store whether or not the landingpad is a
3277/// cleanup.
3278///
3280 using CleanupField = BoolBitfieldElementT<0>;
3281
3282 /// The number of operands actually allocated. NumOperands is
3283 /// the number actually in use.
3284 unsigned ReservedSpace;
3285
3286 LandingPadInst(const LandingPadInst &LP);
3287
3288public:
3290
3291private:
3292 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
3293 const Twine &NameStr,
3294 BasicBlock::iterator InsertBefore);
3295 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
3296 const Twine &NameStr, Instruction *InsertBefore);
3297 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
3298 const Twine &NameStr, BasicBlock *InsertAtEnd);
3299
3300 // Allocate space for exactly zero operands.
3301 void *operator new(size_t S) { return User::operator new(S); }
3302
3303 void growOperands(unsigned Size);
3304 void init(unsigned NumReservedValues, const Twine &NameStr);
3305
3306protected:
3307 // Note: Instruction needs to be a friend here to call cloneImpl.
3308 friend class Instruction;
3309
3310 LandingPadInst *cloneImpl() const;
3311
3312public:
3313 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3314
3315 /// Constructors - NumReservedClauses is a hint for the number of incoming
3316 /// clauses that this landingpad will have (use 0 if you really have no idea).
3317 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
3318 const Twine &NameStr,
3319 BasicBlock::iterator InsertBefore);
3320 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
3321 const Twine &NameStr = "",
3322 Instruction *InsertBefore = nullptr);
3323 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
3324 const Twine &NameStr, BasicBlock *InsertAtEnd);
3325
3326 /// Provide fast operand accessors
3328
3329 /// Return 'true' if this landingpad instruction is a
3330 /// cleanup. I.e., it should be run when unwinding even if its landing pad
3331 /// doesn't catch the exception.
3332 bool isCleanup() const { return getSubclassData<CleanupField>(); }
3333
3334 /// Indicate that this landingpad instruction is a cleanup.
3335 void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
3336
3337 /// Add a catch or filter clause to the landing pad.
3338 void addClause(Constant *ClauseVal);
3339
3340 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
3341 /// determine what type of clause this is.
3342 Constant *getClause(unsigned Idx) const {
3343 return cast<Constant>(getOperandList()[Idx]);
3344 }
3345
3346 /// Return 'true' if the clause and index Idx is a catch clause.
3347 bool isCatch(unsigned Idx) const {
3348 return !isa<ArrayType>(getOperandList()[Idx]->getType());
3349 }
3350
3351 /// Return 'true' if the clause and index Idx is a filter clause.
3352 bool isFilter(unsigned Idx) const {
3353 return isa<ArrayType>(getOperandList()[Idx]->getType());
3354 }
3355
3356 /// Get the number of clauses for this landing pad.
3357 unsigned getNumClauses() const { return getNumOperands(); }
3358
3359 /// Grow the size of the operand list to accommodate the new
3360 /// number of clauses.
3361 void reserveClauses(unsigned Size) { growOperands(Size); }
3362
3363 // Methods for support type inquiry through isa, cast, and dyn_cast:
3364 static bool classof(const Instruction *I) {
3365 return I->getOpcode() == Instruction::LandingPad;
3366 }
3367 static bool classof(const Value *V) {
3368 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3369 }
3370};
3371
3372template <>
3374};
3375
3377
3378//===----------------------------------------------------------------------===//
3379// ReturnInst Class
3380//===----------------------------------------------------------------------===//
3381
3382//===---------------------------------------------------------------------------
3383/// Return a value (possibly void), from a function. Execution
3384/// does not continue in this function any longer.
3385///
3386class ReturnInst : public Instruction {
3387 ReturnInst(const ReturnInst &RI);
3388
3389private:
3390 // ReturnInst constructors:
3391 // ReturnInst() - 'ret void' instruction
3392 // ReturnInst( null) - 'ret void' instruction
3393 // ReturnInst(Value* X) - 'ret X' instruction
3394 // ReturnInst(null, Iterator It) - 'ret void' instruction, insert before I
3395 // ReturnInst(Value* X, Iterator It) - 'ret X' instruction, insert before I
3396 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
3397 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
3398 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
3399 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
3400 //
3401 // NOTE: If the Value* passed is of type void then the constructor behaves as
3402 // if it was passed NULL.
3403 explicit ReturnInst(LLVMContext &C, Value *retVal,
3404 BasicBlock::iterator InsertBefore);
3405 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
3406 Instruction *InsertBefore = nullptr);
3407 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
3408 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
3409
3410protected:
3411 // Note: Instruction needs to be a friend here to call cloneImpl.
3412 friend class Instruction;
3413
3414 ReturnInst *cloneImpl() const;
3415
3416public:
3418 BasicBlock::iterator InsertBefore) {
3419 return new (!!retVal) ReturnInst(C, retVal, InsertBefore);
3420 }
3421
3422 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
3423 Instruction *InsertBefore = nullptr) {
3424 return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
3425 }
3426
3428 BasicBlock *InsertAtEnd) {
3429 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
3430 }
3431
3432 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
3433 return new(0) ReturnInst(C, InsertAtEnd);
3434 }
3435
3436 /// Provide fast operand accessors
3438
3439 /// Convenience accessor. Returns null if there is no return value.
3441 return getNumOperands() != 0 ? getOperand(0) : nullptr;
3442 }
3443
3444 unsigned getNumSuccessors() const { return 0; }
3445
3446 // Methods for support type inquiry through isa, cast, and dyn_cast:
3447 static bool classof(const Instruction *I) {
3448 return (I->getOpcode() == Instruction::Ret);
3449 }
3450 static bool classof(const Value *V) {
3451 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3452 }
3453
3454private:
3455 BasicBlock *getSuccessor(unsigned idx) const {
3456 llvm_unreachable("ReturnInst has no successors!");
3457 }
3458
3459 void setSuccessor(unsigned idx, BasicBlock *B) {
3460 llvm_unreachable("ReturnInst has no successors!");
3461 }
3462};
3463
3464template <>
3465struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3466};
3467
3469
3470//===----------------------------------------------------------------------===//
3471// BranchInst Class
3472//===----------------------------------------------------------------------===//
3473
3474//===---------------------------------------------------------------------------
3475/// Conditional or Unconditional Branch instruction.
3476///
3477class BranchInst : public Instruction {
3478 /// Ops list - Branches are strange. The operands are ordered:
3479 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3480 /// they don't have to check for cond/uncond branchness. These are mostly
3481 /// accessed relative from op_end().
3482 BranchInst(const BranchInst &BI);
3483 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3484 // BranchInst(BB *B) - 'br B'
3485 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3486 // BranchInst(BB* B, Iter It) - 'br B' insert before I
3487 // BranchInst(BB* T, BB *F, Value *C, Iter It) - 'br C, T, F', insert before I
3488 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3489 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3490 // BranchInst(BB* B, BB *I) - 'br B' insert at end
3491 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3492 explicit BranchInst(BasicBlock *IfTrue, BasicBlock::iterator InsertBefore);
3493 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3494 BasicBlock::iterator InsertBefore);
3495 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3496 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3497 Instruction *InsertBefore = nullptr);
3498 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3499 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3500 BasicBlock *InsertAtEnd);
3501
3502 void AssertOK();
3503
3504protected:
3505 // Note: Instruction needs to be a friend here to call cloneImpl.
3506 friend class Instruction;
3507
3508 BranchInst *cloneImpl() const;
3509
3510public:
3511 /// Iterator type that casts an operand to a basic block.
3512 ///
3513 /// This only makes sense because the successors are stored as adjacent
3514 /// operands for branch instructions.
3516 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3517 std::random_access_iterator_tag, BasicBlock *,
3518 ptrdiff_t, BasicBlock *, BasicBlock *> {
3520
3521 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3522 BasicBlock *operator->() const { return operator*(); }
3523 };
3524
3525 /// The const version of `succ_op_iterator`.
3527 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3528 std::random_access_iterator_tag,
3529 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3530 const BasicBlock *> {
3533
3534 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3535 const BasicBlock *operator->() const { return operator*(); }
3536 };
3537
3539 BasicBlock::iterator InsertBefore) {
3540 return new(1) BranchInst(IfTrue, InsertBefore);
3541 }
3542
3544 Instruction *InsertBefore = nullptr) {
3545 return new(1) BranchInst(IfTrue, InsertBefore);
3546 }
3547
3548 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3549 Value *Cond, BasicBlock::iterator InsertBefore) {
3550 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3551 }
3552
3553 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3554 Value *Cond, Instruction *InsertBefore = nullptr) {
3555 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3556 }
3557
3558 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3559 return new(1) BranchInst(IfTrue, InsertAtEnd);
3560 }
3561
3562 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3563 Value *Cond, BasicBlock *InsertAtEnd) {
3564 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3565 }
3566
3567 /// Transparently provide more efficient getOperand methods.
3569
3570 bool isUnconditional() const { return getNumOperands() == 1; }
3571 bool isConditional() const { return getNumOperands() == 3; }
3572
3574 assert(isConditional() && "Cannot get condition of an uncond branch!");
3575 return Op<-3>();
3576 }
3577
3579 assert(isConditional() && "Cannot set condition of unconditional branch!");
3580 Op<-3>() = V;
3581 }
3582
3583 unsigned getNumSuccessors() const { return 1+isConditional(); }
3584
3585 BasicBlock *getSuccessor(unsigned i) const {
3586 assert(i < getNumSuccessors() && "Successor # out of range for Branch!");
3587 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3588 }
3589
3590 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3591 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!");
3592 *(&Op<-1>() - idx) = NewSucc;
3593 }
3594
3595 /// Swap the successors of this branch instruction.
3596 ///
3597 /// Swaps the successors of the branch instruction. This also swaps any
3598 /// branch weight metadata associated with the instruction so that it
3599 /// continues to map correctly to each operand.
3600 void swapSuccessors();
3601
3603 return make_range(
3604 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3605 succ_op_iterator(value_op_end()));
3606 }
3607
3610 std::next(value_op_begin(), isConditional() ? 1 : 0)),
3611 const_succ_op_iterator(value_op_end()));
3612 }
3613
3614 // Methods for support type inquiry through isa, cast, and dyn_cast:
3615 static bool classof(const Instruction *I) {
3616 return (I->getOpcode() == Instruction::Br);
3617 }
3618 static bool classof(const Value *V) {
3619 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3620 }
3621};
3622
3623template <>
3624struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3625};
3626
3628
3629//===----------------------------------------------------------------------===//
3630// SwitchInst Class
3631//===----------------------------------------------------------------------===//
3632
3633//===---------------------------------------------------------------------------
3634/// Multiway switch
3635///
3636class SwitchInst : public Instruction {
3637 unsigned ReservedSpace;
3638
3639 // Operand[0] = Value to switch on
3640 // Operand[1] = Default basic block destination
3641 // Operand[2n ] = Value to match
3642 // Operand[2n+1] = BasicBlock to go to on match
3643 SwitchInst(const SwitchInst &SI);
3644
3645 /// Create a new switch instruction, specifying a value to switch on and a
3646 /// default destination. The number of additional cases can be specified here
3647 /// to make memory allocation more efficient. This constructor can also
3648 /// auto-insert before another instruction.
3649 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3650 BasicBlock::iterator InsertBefore);
3651
3652 /// Create a new switch instruction, specifying a value to switch on and a
3653 /// default destination. The number of additional cases can be specified here
3654 /// to make memory allocation more efficient. This constructor can also
3655 /// auto-insert before another instruction.
3656 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3657 Instruction *InsertBefore);
3658
3659 /// Create a new switch instruction, specifying a value to switch on and a
3660 /// default destination. The number of additional cases can be specified here
3661 /// to make memory allocation more efficient. This constructor also
3662 /// auto-inserts at the end of the specified BasicBlock.
3663 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3664 BasicBlock *InsertAtEnd);
3665
3666 // allocate space for exactly zero operands
3667 void *operator new(size_t S) { return User::operator new(S); }
3668
3669 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3670 void growOperands();
3671
3672protected:
3673 // Note: Instruction needs to be a friend here to call cloneImpl.
3674 friend class Instruction;
3675
3676 SwitchInst *cloneImpl() const;
3677
3678public:
3679 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3680
3681 // -2
3682 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3683
3684 template <typename CaseHandleT> class CaseIteratorImpl;
3685
3686 /// A handle to a particular switch case. It exposes a convenient interface
3687 /// to both the case value and the successor block.
3688 ///
3689 /// We define this as a template and instantiate it to form both a const and
3690 /// non-const handle.
3691 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3693 // Directly befriend both const and non-const iterators.
3694 friend class SwitchInst::CaseIteratorImpl<
3695 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3696
3697 protected:
3698 // Expose the switch type we're parameterized with to the iterator.
3699 using SwitchInstType = SwitchInstT;
3700
3701 SwitchInstT *SI;
3703
3704 CaseHandleImpl() = default;
3706
3707 public:
3708 /// Resolves case value for current case.
3709 ConstantIntT *getCaseValue() const {
3710 assert((unsigned)Index < SI->getNumCases() &&
3711 "Index out the number of cases.");
3712 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3713 }
3714
3715 /// Resolves successor for current case.
3716 BasicBlockT *getCaseSuccessor() const {
3717 assert(((unsigned)Index < SI->getNumCases() ||
3718 (unsigned)Index == DefaultPseudoIndex) &&
3719 "Index out the number of cases.");
3720 return SI->getSuccessor(getSuccessorIndex());
3721 }
3722
3723 /// Returns number of current case.
3724 unsigned getCaseIndex() const { return Index; }
3725
3726 /// Returns successor index for current case successor.
3727 unsigned getSuccessorIndex() const {
3728 assert(((unsigned)Index == DefaultPseudoIndex ||
3729 (unsigned)Index < SI->getNumCases()) &&
3730 "Index out the number of cases.");
3731 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3732 }
3733
3734 bool operator==(const CaseHandleImpl &RHS) const {
3735 assert(SI == RHS.SI && "Incompatible operators.");
3736 return Index == RHS.Index;
3737 }
3738 };
3739
3742
3744 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3746
3747 public:
3749
3750 /// Sets the new value for current case.
3751 void setValue(ConstantInt *V) const {
3752 assert((unsigned)Index < SI->getNumCases() &&
3753 "Index out the number of cases.");
3754 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3755 }
3756
3757 /// Sets the new successor for current case.
3758 void setSuccessor(BasicBlock *S) const {
3759 SI->setSuccessor(getSuccessorIndex(), S);
3760 }
3761 };
3762
3763 template <typename CaseHandleT>
3765 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3766 std::random_access_iterator_tag,
3767 const CaseHandleT> {
3768 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3769
3770 CaseHandleT Case;
3771
3772 public:
3773 /// Default constructed iterator is in an invalid state until assigned to
3774 /// a case for a particular switch.
3775 CaseIteratorImpl() = default;
3776
3777 /// Initializes case iterator for given SwitchInst and for given
3778 /// case number.
3779 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3780
3781 /// Initializes case iterator for given SwitchInst and for given
3782 /// successor index.
3784 unsigned SuccessorIndex) {
3785 assert(SuccessorIndex < SI->getNumSuccessors() &&
3786 "Successor index # out of range!");
3787 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3788 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3789 }
3790
3791 /// Support converting to the const variant. This will be a no-op for const
3792 /// variant.
3794 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3795 }
3796
3798 // Check index correctness after addition.
3799 // Note: Index == getNumCases() means end().
3800 assert(Case.Index + N >= 0 &&
3801 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
3802 "Case.Index out the number of cases.");
3803 Case.Index += N;
3804 return *this;
3805 }
3807 // Check index correctness after subtraction.
3808 // Note: Case.Index == getNumCases() means end().
3809 assert(Case.Index - N >= 0 &&
3810 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
3811 "Case.Index out the number of cases.");
3812 Case.Index -= N;
3813 return *this;
3814 }
3816 assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3817 return Case.Index - RHS.Case.Index;
3818 }
3819 bool operator==(const CaseIteratorImpl &RHS) const {
3820 return Case == RHS.Case;
3821 }
3822 bool operator<(const CaseIteratorImpl &RHS) const {
3823 assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3824 return Case.Index < RHS.Case.Index;
3825 }
3826 const CaseHandleT &operator*() const { return Case; }
3827 };
3828
3831
3833 unsigned NumCases,
3834 BasicBlock::iterator InsertBefore) {
3835 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3836 }
3837
3839 unsigned NumCases,
3840 Instruction *InsertBefore = nullptr) {
3841 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3842 }
3843
3845 unsigned NumCases, BasicBlock *InsertAtEnd) {
3846 return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3847 }
3848
3849 /// Provide fast operand accessors
3851
3852 // Accessor Methods for Switch stmt
3853 Value *getCondition() const { return getOperand(0); }
3854 void setCondition(Value *V) { setOperand(0, V); }
3855
3857 return cast<BasicBlock>(getOperand(1));
3858 }
3859
3860 /// Returns true if the default branch must result in immediate undefined
3861 /// behavior, false otherwise.
3863 return isa<UnreachableInst>(getDefaultDest()->getFirstNonPHIOrDbg());
3864 }
3865
3866 void setDefaultDest(BasicBlock *DefaultCase) {
3867 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3868 }
3869
3870 /// Return the number of 'cases' in this switch instruction, excluding the
3871 /// default case.
3872 unsigned getNumCases() const {
3873 return getNumOperands()/2 - 1;
3874 }
3875
3876 /// Returns a read/write iterator that points to the first case in the
3877 /// SwitchInst.
3879 return CaseIt(this, 0);
3880 }
3881
3882 /// Returns a read-only iterator that points to the first case in the
3883 /// SwitchInst.
3885 return ConstCaseIt(this, 0);
3886 }
3887
3888 /// Returns a read/write iterator that points one past the last in the
3889 /// SwitchInst.
3891 return CaseIt(this, getNumCases());
3892 }
3893
3894 /// Returns a read-only iterator that points one past the last in the
3895 /// SwitchInst.
3897 return ConstCaseIt(this, getNumCases());
3898 }
3899
3900 /// Iteration adapter for range-for loops.
3902 return make_range(case_begin(), case_end());
3903 }
3904
3905 /// Constant iteration adapter for range-for loops.
3907 return make_range(case_begin(), case_end());
3908 }
3909
3910 /// Returns an iterator that points to the default case.
3911 /// Note: this iterator allows to resolve successor only. Attempt
3912 /// to resolve case value causes an assertion.
3913 /// Also note, that increment and decrement also causes an assertion and
3914 /// makes iterator invalid.
3916 return CaseIt(this, DefaultPseudoIndex);
3917 }
3919 return ConstCaseIt(this, DefaultPseudoIndex);
3920 }
3921
3922 /// Search all of the case values for the specified constant. If it is
3923 /// explicitly handled, return the case iterator of it, otherwise return
3924 /// default case iterator to indicate that it is handled by the default
3925 /// handler.
3927 return CaseIt(
3928 this,
3929 const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex());
3930 }
3932 ConstCaseIt I = llvm::find_if(cases(), [C](const ConstCaseHandle &Case) {
3933 return Case.getCaseValue() == C;
3934 });
3935 if (I != case_end())
3936 return I;
3937
3938 return case_default();
3939 }
3940
3941 /// Finds the unique case value for a given successor. Returns null if the
3942 /// successor is not found, not unique, or is the default case.
3944 if (BB == getDefaultDest())
3945 return nullptr;
3946
3947 ConstantInt *CI = nullptr;
3948 for (auto Case : cases()) {
3949 if (Case.getCaseSuccessor() != BB)
3950 continue;
3951
3952 if (CI)
3953 return nullptr; // Multiple cases lead to BB.
3954
3955 CI = Case.getCaseValue();
3956 }
3957
3958 return CI;
3959 }
3960
3961 /// Add an entry to the switch instruction.
3962 /// Note:
3963 /// This action invalidates case_end(). Old case_end() iterator will
3964 /// point to the added case.
3965 void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3966
3967 /// This method removes the specified case and its successor from the switch
3968 /// instruction. Note that this operation may reorder the remaining cases at
3969 /// index idx and above.
3970 /// Note:
3971 /// This action invalidates iterators for all cases following the one removed,
3972 /// including the case_end() iterator. It returns an iterator for the next
3973 /// case.
3974 CaseIt removeCase(CaseIt I);
3975
3976 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3977 BasicBlock *getSuccessor(unsigned idx) const {
3978 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!");
3979 return cast<BasicBlock>(getOperand(idx*2+1));
3980 }
3981 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3982 assert(idx < getNumSuccessors() && "Successor # out of range for switch!");
3983 setOperand(idx * 2 + 1, NewSucc);
3984 }
3985
3986 // Methods for support type inquiry through isa, cast, and dyn_cast:
3987 static bool classof(const Instruction *I) {
3988 return I->getOpcode() == Instruction::Switch;
3989 }
3990 static bool classof(const Value *V) {
3991 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3992 }
3993};
3994
3995/// A wrapper class to simplify modification of SwitchInst cases along with
3996/// their prof branch_weights metadata.
3998 SwitchInst &SI;
3999 std::optional<SmallVector<uint32_t, 8>> Weights;
4000 bool Changed = false;
4001
4002protected:
4004
4005 void init();
4006
4007public:
4008 using CaseWeightOpt = std::optional<uint32_t>;
4009 SwitchInst *operator->() { return &SI; }
4010 SwitchInst &operator*() { return SI; }
4011 operator SwitchInst *() { return &SI; }
4012
4014
4016 if (Changed)
4017 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
4018 }
4019
4020 /// Delegate the call to the underlying SwitchInst::removeCase() and remove
4021 /// correspondent branch weight.
4023
4024 /// Delegate the call to the underlying SwitchInst::addCase() and set the
4025 /// specified branch weight for the added case.
4026 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
4027
4028 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
4029 /// this object to not touch the underlying SwitchInst in destructor.
4031
4032 void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
4033 CaseWeightOpt getSuccessorWeight(unsigned idx);
4034
4035 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
4036};
4037
4038template <>
4040};
4041
4043
4044//===----------------------------------------------------------------------===//
4045// IndirectBrInst Class
4046//===----------------------------------------------------------------------===//
4047
4048//===---------------------------------------------------------------------------
4049/// Indirect Branch Instruction.
4050///
4052 unsigned ReservedSpace;
4053
4054 // Operand[0] = Address to jump to
4055 // Operand[n+1] = n-th destination
4056 IndirectBrInst(const IndirectBrInst &IBI);
4057
4058 /// Create a new indirectbr instruction, specifying an
4059 /// Address to jump to. The number of expected destinations can be specified
4060 /// here to make memory allocation more efficient. This constructor can also
4061 /// autoinsert before another instruction.
4062 IndirectBrInst(Value *Address, unsigned NumDests,
4063 BasicBlock::iterator InsertBefore);
4064
4065 /// Create a new indirectbr instruction, specifying an
4066 /// Address to jump to. The number of expected destinations can be specified
4067 /// here to make memory allocation more efficient. This constructor can also
4068 /// autoinsert before another instruction.
4069 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
4070
4071 /// Create a new indirectbr instruction, specifying an
4072 /// Address to jump to. The number of expected destinations can be specified
4073 /// here to make memory allocation more efficient. This constructor also
4074 /// autoinserts at the end of the specified BasicBlock.
4075 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
4076
4077 // allocate space for exactly zero operands
4078 void *operator new(size_t S) { return User::operator new(S); }
4079
4080 void init(Value *Address, unsigned NumDests);
4081 void growOperands();
4082
4083protected:
4084 // Note: Instruction needs to be a friend here to call cloneImpl.
4085 friend class Instruction;
4086
4087 IndirectBrInst *cloneImpl() const;
4088
4089public:
4090 void operator delete(void *Ptr) { User::operator delete(Ptr); }
4091
4092 /// Iterator type that casts an operand to a basic block.
4093 ///
4094 /// This only makes sense because the successors are stored as adjacent
4095 /// operands for indirectbr instructions.
4097 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
4098 std::random_access_iterator_tag, BasicBlock *,
4099 ptrdiff_t, BasicBlock *, BasicBlock *> {
4101
4102 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
4103 BasicBlock *operator->() const { return operator*(); }
4104 };
4105
4106 /// The const version of `succ_op_iterator`.
4108 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
4109 std::random_access_iterator_tag,
4110 const BasicBlock *, ptrdiff_t, const BasicBlock *,
4111 const BasicBlock *> {
4114
4115 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
4116 const BasicBlock *operator->() const { return operator*(); }
4117 };
4118
4119 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
4120 BasicBlock::iterator InsertBefore) {
4121 return new IndirectBrInst(Address, NumDests, InsertBefore);
4122 }
4123
4124 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
4125 Instruction *InsertBefore = nullptr) {
4126 return new IndirectBrInst(Address, NumDests, InsertBefore);
4127 }
4128
4129 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
4130 BasicBlock *InsertAtEnd) {
4131 return new IndirectBrInst(Address, NumDests, InsertAtEnd);
4132 }
4133
4134 /// Provide fast operand accessors.
4136
4137 // Accessor Methods for IndirectBrInst instruction.
4138 Value *getAddress() { return getOperand(0); }
4139 const Value *getAddress() const { return getOperand(0); }
4140 void setAddress(Value *V) { setOperand(0, V); }
4141
4142 /// return the number of possible destinations in this
4143 /// indirectbr instruction.
4144 unsigned getNumDestinations() const { return getNumOperands()-1; }
4145
4146 /// Return the specified destination.
4147 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
4148 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
4149
4150 /// Add a destination.
4151 ///
4152 void addDestination(BasicBlock *Dest);
4153
4154 /// This method removes the specified successor from the
4155 /// indirectbr instruction.
4156 void removeDestination(unsigned i);
4157
4158 unsigned getNumSuccessors() const { return getNumOperands()-1; }
4159 BasicBlock *getSuccessor(unsigned i) const {
4160 return cast<BasicBlock>(getOperand(i+1));
4161 }
4162 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
4163 setOperand(i + 1, NewSucc);
4164 }
4165
4167 return make_range(succ_op_iterator(std::next(value_op_begin())),
4168 succ_op_iterator(value_op_end()));
4169 }
4170
4172 return make_range(const_succ_op_iterator(std::next(value_op_begin())),
4173 const_succ_op_iterator(value_op_end()));
4174 }
4175
4176 // Methods for support type inquiry through isa, cast, and dyn_cast:
4177 static bool classof(const Instruction *I) {
4178 return I->getOpcode() == Instruction::IndirectBr;
4179 }
4180 static bool classof(const Value *V) {
4181 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4182 }
4183};
4184
4185template <>
4187};
4188
4190
4191//===----------------------------------------------------------------------===//
4192// InvokeInst Class
4193//===----------------------------------------------------------------------===//
4194
4195/// Invoke instruction. The SubclassData field is used to hold the
4196/// calling convention of the call.
4197///
4198class InvokeInst : public CallBase {
4199 /// The number of operands for this call beyond the called function,
4200 /// arguments, and operand bundles.
4201 static constexpr int NumExtraOperands = 2;
4202
4203 /// The index from the end of the operand array to the normal destination.
4204 static constexpr int NormalDestOpEndIdx = -3;
4205
4206 /// The index from the end of the operand array to the unwind destination.
4207 static constexpr int UnwindDestOpEndIdx = -2;
4208
4209 InvokeInst(const InvokeInst &BI);
4210
4211 /// Construct an InvokeInst given a range of arguments.
4212 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4213 BasicBlock *IfException, ArrayRef<Value *> Args,
4214 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4215 const Twine &NameStr, BasicBlock::iterator InsertBefore);
4216
4217 /// Construct an InvokeInst given a range of arguments.
4218 ///
4219 /// Construct an InvokeInst from a range of arguments
4220 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4221 BasicBlock *IfException, ArrayRef<Value *> Args,
4222 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4223 const Twine &NameStr, Instruction *InsertBefore);
4224
4225 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4226 BasicBlock *IfException, ArrayRef<Value *> Args,
4227 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4228 const Twine &NameStr, BasicBlock *InsertAtEnd);
4229
4230 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4231 BasicBlock *IfException, ArrayRef<Value *> Args,
4232 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
4233
4234 /// Compute the number of operands to allocate.
4235 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
4236 // We need one operand for the called function, plus our extra operands and
4237 // the input operand counts provided.
4238 return 1 + NumExtraOperands + NumArgs + NumBundleInputs;
4239 }
4240
4241protected:
4242 // Note: Instruction needs to be a friend here to call cloneImpl.
4243 friend class Instruction;
4244
4245 InvokeInst *cloneImpl() const;
4246
4247public:
4248 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4249 BasicBlock *IfException, ArrayRef<Value *> Args,
4250 const Twine &NameStr,
4251 BasicBlock::iterator InsertBefore) {
4252 int NumOperands = ComputeNumOperands(Args.size());
4253 return new (NumOperands)
4254 InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt,
4255 NumOperands, NameStr, InsertBefore);
4256 }
4257
4258 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4259 BasicBlock *IfException, ArrayRef<Value *> Args,
4260 const Twine &NameStr,
4261 Instruction *InsertBefore = nullptr) {
4262 int NumOperands = ComputeNumOperands(Args.size());
4263 return new (NumOperands)
4264 InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt,
4265 NumOperands, NameStr, InsertBefore);
4266 }
4267
4268 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4269 BasicBlock *IfException, ArrayRef<Value *> Args,
4271 const Twine &NameStr,
4272 BasicBlock::iterator InsertBefore) {
4273 int NumOperands =
4274 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
4275 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4276
4277 return new (NumOperands, DescriptorBytes)
4278 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
4279 NameStr, InsertBefore);
4280 }
4281
4282 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4283 BasicBlock *IfException, ArrayRef<Value *> Args,
4284 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
4285 const Twine &NameStr = "",
4286 Instruction *InsertBefore = nullptr) {
4287 int NumOperands =
4288 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
4289 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4290
4291 return new (NumOperands, DescriptorBytes)
4292 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
4293 NameStr, InsertBefore);
4294 }
4295
4296 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4297 BasicBlock *IfException, ArrayRef<Value *> Args,
4298 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4299 int NumOperands = ComputeNumOperands(Args.size());
4300 return new (NumOperands)
4301 InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt,
4302 NumOperands, NameStr, InsertAtEnd);
4303 }
4304
4305 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4306 BasicBlock *IfException, ArrayRef<Value *> Args,
4308 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4309 int NumOperands =
4310 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
4311 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4312
4313 return new (NumOperands, DescriptorBytes)
4314 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
4315 NameStr, InsertAtEnd);
4316 }
4317
4319 BasicBlock *IfException, ArrayRef<Value *> Args,
4320 const Twine &NameStr,
4321 BasicBlock::iterator InsertBefore) {
4322 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
4323 IfException, Args, std::nullopt, NameStr, InsertBefore);
4324 }
4325
4327 BasicBlock *IfException, ArrayRef<Value *> Args,
4328 const Twine &NameStr,
4329 Instruction *InsertBefore = nullptr) {
4330 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
4331 IfException, Args, std::nullopt, NameStr, InsertBefore);
4332 }
4333
4335 BasicBlock *IfException, ArrayRef<Value *> Args,
4337 const Twine &NameStr,
4338 BasicBlock::iterator InsertBefore) {
4339 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
4340 IfException, Args, Bundles, NameStr, InsertBefore);
4341 }
4342
4344 BasicBlock *IfException, ArrayRef<Value *> Args,
4345 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
4346 const Twine &NameStr = "",
4347 Instruction *InsertBefore = nullptr) {
4348 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
4349 IfException, Args, Bundles, NameStr, InsertBefore);
4350 }
4351
4353 BasicBlock *IfException, ArrayRef<Value *> Args,
4354 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4355 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
4356 IfException, Args, NameStr, InsertAtEnd);
4357 }
4358
4360 BasicBlock *IfException, ArrayRef<Value *> Args,
4362 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4363 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
4364 IfException, Args, Bundles, NameStr, InsertAtEnd);
4365 }
4366
4367 /// Create a clone of \p II with a different set of operand bundles and
4368 /// insert it before \p InsertPt.
4369 ///
4370 /// The returned invoke instruction is identical to \p II in every way except
4371 /// that the operand bundles for the new instruction are set to the operand
4372 /// bundles in \p Bundles.
4373 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
4374 BasicBlock::iterator InsertPt);
4375 static