LLVM  15.0.0git
Instructions.h
Go to the documentation of this file.
1 //===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file exposes the class definitions of all of the subclasses of the
10 // Instruction class. This is meant to be an easy way to get access to all
11 // instruction subclasses.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef LLVM_IR_INSTRUCTIONS_H
16 #define LLVM_IR_INSTRUCTIONS_H
17 
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/Bitfields.h"
20 #include "llvm/ADT/MapVector.h"
21 #include "llvm/ADT/None.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Twine.h"
25 #include "llvm/ADT/iterator.h"
27 #include "llvm/IR/CFG.h"
28 #include "llvm/IR/Constant.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/InstrTypes.h"
31 #include "llvm/IR/Instruction.h"
32 #include "llvm/IR/OperandTraits.h"
33 #include "llvm/IR/Use.h"
34 #include "llvm/IR/User.h"
37 #include <cassert>
38 #include <cstddef>
39 #include <cstdint>
40 #include <iterator>
41 
42 namespace llvm {
43 
44 class APFloat;
45 class APInt;
46 class BasicBlock;
47 class ConstantInt;
48 class DataLayout;
49 class StringRef;
50 class Type;
51 class Value;
52 
53 //===----------------------------------------------------------------------===//
54 // AllocaInst Class
55 //===----------------------------------------------------------------------===//
56 
57 /// an instruction to allocate memory on the stack
58 class AllocaInst : public UnaryInstruction {
59  Type *AllocatedType;
60 
61  using AlignmentField = AlignmentBitfieldElementT<0>;
62  using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
64  static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
65  SwiftErrorField>(),
66  "Bitfields must be contiguous");
67 
68 protected:
69  // Note: Instruction needs to be a friend here to call cloneImpl.
70  friend class Instruction;
71 
72  AllocaInst *cloneImpl() const;
73 
74 public:
75  explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
76  const Twine &Name, Instruction *InsertBefore);
77  AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
78  const Twine &Name, BasicBlock *InsertAtEnd);
79 
80  AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
81  Instruction *InsertBefore);
82  AllocaInst(Type *Ty, unsigned AddrSpace,
83  const Twine &Name, BasicBlock *InsertAtEnd);
84 
85  AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
86  const Twine &Name = "", Instruction *InsertBefore = nullptr);
87  AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
88  const Twine &Name, BasicBlock *InsertAtEnd);
89 
90  /// Return true if there is an allocation size parameter to the allocation
91  /// instruction that is not 1.
92  bool isArrayAllocation() const;
93 
94  /// Get the number of elements allocated. For a simple allocation of a single
95  /// element, this will return a constant 1 value.
96  const Value *getArraySize() const { return getOperand(0); }
97  Value *getArraySize() { return getOperand(0); }
98 
99  /// Overload to return most specific pointer type.
100  PointerType *getType() const {
101  return cast<PointerType>(Instruction::getType());
102  }
103 
104  /// Return the address space for the allocation.
105  unsigned getAddressSpace() const {
106  return getType()->getAddressSpace();
107  }
108 
109  /// Get allocation size in bits. Returns None if size can't be determined,
110  /// e.g. in case of a VLA.
112 
113  /// Return the type that is being allocated by the instruction.
114  Type *getAllocatedType() const { return AllocatedType; }
115  /// for use only in special circumstances that need to generically
116  /// transform a whole instruction (eg: IR linking and vectorization).
117  void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
118 
119  /// Return the alignment of the memory that is being allocated by the
120  /// instruction.
121  Align getAlign() const {
122  return Align(1ULL << getSubclassData<AlignmentField>());
123  }
124 
126  setSubclassData<AlignmentField>(Log2(Align));
127  }
128 
129  /// Return true if this alloca is in the entry block of the function and is a
130  /// constant size. If so, the code generator will fold it into the
131  /// prolog/epilog code, so it is basically free.
132  bool isStaticAlloca() const;
133 
134  /// Return true if this alloca is used as an inalloca argument to a call. Such
135  /// allocas are never considered static even if they are in the entry block.
136  bool isUsedWithInAlloca() const {
137  return getSubclassData<UsedWithInAllocaField>();
138  }
139 
140  /// Specify whether this alloca is used to represent the arguments to a call.
141  void setUsedWithInAlloca(bool V) {
142  setSubclassData<UsedWithInAllocaField>(V);
143  }
144 
145  /// Return true if this alloca is used as a swifterror argument to a call.
146  bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
147  /// Specify whether this alloca is used to represent a swifterror.
148  void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
149 
150  // Methods for support type inquiry through isa, cast, and dyn_cast:
151  static bool classof(const Instruction *I) {
152  return (I->getOpcode() == Instruction::Alloca);
153  }
154  static bool classof(const Value *V) {
155  return isa<Instruction>(V) && classof(cast<Instruction>(V));
156  }
157 
158 private:
159  // Shadow Instruction::setInstructionSubclassData with a private forwarding
160  // method so that subclasses cannot accidentally use it.
161  template <typename Bitfield>
162  void setSubclassData(typename Bitfield::Type Value) {
163  Instruction::setSubclassData<Bitfield>(Value);
164  }
165 };
166 
167 //===----------------------------------------------------------------------===//
168 // LoadInst Class
169 //===----------------------------------------------------------------------===//
170 
171 /// An instruction for reading from memory. This uses the SubclassData field in
172 /// Value to store whether or not the load is volatile.
173 class LoadInst : public UnaryInstruction {
174  using VolatileField = BoolBitfieldElementT<0>;
177  static_assert(
178  Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
179  "Bitfields must be contiguous");
180 
181  void AssertOK();
182 
183 protected:
184  // Note: Instruction needs to be a friend here to call cloneImpl.
185  friend class Instruction;
186 
187  LoadInst *cloneImpl() const;
188 
189 public:
190  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
191  Instruction *InsertBefore);
192  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
193  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
194  Instruction *InsertBefore);
195  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
196  BasicBlock *InsertAtEnd);
197  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
198  Align Align, Instruction *InsertBefore = nullptr);
199  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
200  Align Align, BasicBlock *InsertAtEnd);
201  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
202  Align Align, AtomicOrdering Order,
204  Instruction *InsertBefore = nullptr);
205  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
207  BasicBlock *InsertAtEnd);
208 
209  /// Return true if this is a load from a volatile memory location.
210  bool isVolatile() const { return getSubclassData<VolatileField>(); }
211 
212  /// Specify whether this is a volatile load or not.
213  void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
214 
215  /// Return the alignment of the access that is being performed.
216  Align getAlign() const {
217  return Align(1ULL << (getSubclassData<AlignmentField>()));
218  }
219 
221  setSubclassData<AlignmentField>(Log2(Align));
222  }
223 
224  /// Returns the ordering constraint of this load instruction.
226  return getSubclassData<OrderingField>();
227  }
228  /// Sets the ordering constraint of this load instruction. May not be Release
229  /// or AcquireRelease.
230  void setOrdering(AtomicOrdering Ordering) {
231  setSubclassData<OrderingField>(Ordering);
232  }
233 
234  /// Returns the synchronization scope ID of this load instruction.
236  return SSID;
237  }
238 
239  /// Sets the synchronization scope ID of this load instruction.
241  this->SSID = SSID;
242  }
243 
244  /// Sets the ordering constraint and the synchronization scope ID of this load
245  /// instruction.
246  void setAtomic(AtomicOrdering Ordering,
248  setOrdering(Ordering);
249  setSyncScopeID(SSID);
250  }
251 
252  bool isSimple() const { return !isAtomic() && !isVolatile(); }
253 
254  bool isUnordered() const {
255  return (getOrdering() == AtomicOrdering::NotAtomic ||
257  !isVolatile();
258  }
259 
261  const Value *getPointerOperand() const { return getOperand(0); }
262  static unsigned getPointerOperandIndex() { return 0U; }
264 
265  /// Returns the address space of the pointer operand.
266  unsigned getPointerAddressSpace() const {
268  }
269 
270  // Methods for support type inquiry through isa, cast, and dyn_cast:
271  static bool classof(const Instruction *I) {
272  return I->getOpcode() == Instruction::Load;
273  }
274  static bool classof(const Value *V) {
275  return isa<Instruction>(V) && classof(cast<Instruction>(V));
276  }
277 
278 private:
279  // Shadow Instruction::setInstructionSubclassData with a private forwarding
280  // method so that subclasses cannot accidentally use it.
281  template <typename Bitfield>
282  void setSubclassData(typename Bitfield::Type Value) {
283  Instruction::setSubclassData<Bitfield>(Value);
284  }
285 
286  /// The synchronization scope ID of this load instruction. Not quite enough
287  /// room in SubClassData for everything, so synchronization scope ID gets its
288  /// own field.
289  SyncScope::ID SSID;
290 };
291 
292 //===----------------------------------------------------------------------===//
293 // StoreInst Class
294 //===----------------------------------------------------------------------===//
295 
296 /// An instruction for storing to memory.
297 class StoreInst : public Instruction {
298  using VolatileField = BoolBitfieldElementT<0>;
301  static_assert(
302  Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
303  "Bitfields must be contiguous");
304 
305  void AssertOK();
306 
307 protected:
308  // Note: Instruction needs to be a friend here to call cloneImpl.
309  friend class Instruction;
310 
311  StoreInst *cloneImpl() const;
312 
313 public:
314  StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
315  StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
316  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
317  StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
318  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
319  Instruction *InsertBefore = nullptr);
320  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
321  BasicBlock *InsertAtEnd);
322  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
324  Instruction *InsertBefore = nullptr);
325  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
326  AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
327 
328  // allocate space for exactly two operands
329  void *operator new(size_t S) { return User::operator new(S, 2); }
330  void operator delete(void *Ptr) { User::operator delete(Ptr); }
331 
332  /// Return true if this is a store to a volatile memory location.
333  bool isVolatile() const { return getSubclassData<VolatileField>(); }
334 
335  /// Specify whether this is a volatile store or not.
336  void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
337 
338  /// Transparently provide more efficient getOperand methods.
340 
341  Align getAlign() const {
342  return Align(1ULL << (getSubclassData<AlignmentField>()));
343  }
344 
346  setSubclassData<AlignmentField>(Log2(Align));
347  }
348 
349  /// Returns the ordering constraint of this store instruction.
351  return getSubclassData<OrderingField>();
352  }
353 
354  /// Sets the ordering constraint of this store instruction. May not be
355  /// Acquire or AcquireRelease.
356  void setOrdering(AtomicOrdering Ordering) {
357  setSubclassData<OrderingField>(Ordering);
358  }
359 
360  /// Returns the synchronization scope ID of this store instruction.
362  return SSID;
363  }
364 
365  /// Sets the synchronization scope ID of this store instruction.
367  this->SSID = SSID;
368  }
369 
370  /// Sets the ordering constraint and the synchronization scope ID of this
371  /// store instruction.
372  void setAtomic(AtomicOrdering Ordering,
374  setOrdering(Ordering);
375  setSyncScopeID(SSID);
376  }
377 
378  bool isSimple() const { return !isAtomic() && !isVolatile(); }
379 
380  bool isUnordered() const {
381  return (getOrdering() == AtomicOrdering::NotAtomic ||
383  !isVolatile();
384  }
385 
386  Value *getValueOperand() { return getOperand(0); }
387  const Value *getValueOperand() const { return getOperand(0); }
388 
390  const Value *getPointerOperand() const { return getOperand(1); }
391  static unsigned getPointerOperandIndex() { return 1U; }
393 
394  /// Returns the address space of the pointer operand.
395  unsigned getPointerAddressSpace() const {
397  }
398 
399  // Methods for support type inquiry through isa, cast, and dyn_cast:
400  static bool classof(const Instruction *I) {
401  return I->getOpcode() == Instruction::Store;
402  }
403  static bool classof(const Value *V) {
404  return isa<Instruction>(V) && classof(cast<Instruction>(V));
405  }
406 
407 private:
408  // Shadow Instruction::setInstructionSubclassData with a private forwarding
409  // method so that subclasses cannot accidentally use it.
410  template <typename Bitfield>
411  void setSubclassData(typename Bitfield::Type Value) {
412  Instruction::setSubclassData<Bitfield>(Value);
413  }
414 
415  /// The synchronization scope ID of this store instruction. Not quite enough
416  /// room in SubClassData for everything, so synchronization scope ID gets its
417  /// own field.
418  SyncScope::ID SSID;
419 };
420 
421 template <>
422 struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
423 };
424 
426 
427 //===----------------------------------------------------------------------===//
428 // FenceInst Class
429 //===----------------------------------------------------------------------===//
430 
431 /// An instruction for ordering other memory operations.
432 class FenceInst : public Instruction {
433  using OrderingField = AtomicOrderingBitfieldElementT<0>;
434 
435  void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
436 
437 protected:
438  // Note: Instruction needs to be a friend here to call cloneImpl.
439  friend class Instruction;
440 
441  FenceInst *cloneImpl() const;
442 
443 public:
444  // Ordering may only be Acquire, Release, AcquireRelease, or
445  // SequentiallyConsistent.
448  Instruction *InsertBefore = nullptr);
450  BasicBlock *InsertAtEnd);
451 
452  // allocate space for exactly zero operands
453  void *operator new(size_t S) { return User::operator new(S, 0); }
454  void operator delete(void *Ptr) { User::operator delete(Ptr); }
455 
456  /// Returns the ordering constraint of this fence instruction.
458  return getSubclassData<OrderingField>();
459  }
460 
461  /// Sets the ordering constraint of this fence instruction. May only be
462  /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
463  void setOrdering(AtomicOrdering Ordering) {
464  setSubclassData<OrderingField>(Ordering);
465  }
466 
467  /// Returns the synchronization scope ID of this fence instruction.
469  return SSID;
470  }
471 
472  /// Sets the synchronization scope ID of this fence instruction.
474  this->SSID = SSID;
475  }
476 
477  // Methods for support type inquiry through isa, cast, and dyn_cast:
478  static bool classof(const Instruction *I) {
479  return I->getOpcode() == Instruction::Fence;
480  }
481  static bool classof(const Value *V) {
482  return isa<Instruction>(V) && classof(cast<Instruction>(V));
483  }
484 
485 private:
486  // Shadow Instruction::setInstructionSubclassData with a private forwarding
487  // method so that subclasses cannot accidentally use it.
488  template <typename Bitfield>
489  void setSubclassData(typename Bitfield::Type Value) {
490  Instruction::setSubclassData<Bitfield>(Value);
491  }
492 
493  /// The synchronization scope ID of this fence instruction. Not quite enough
494  /// room in SubClassData for everything, so synchronization scope ID gets its
495  /// own field.
496  SyncScope::ID SSID;
497 };
498 
499 //===----------------------------------------------------------------------===//
500 // AtomicCmpXchgInst Class
501 //===----------------------------------------------------------------------===//
502 
503 /// An instruction that atomically checks whether a
504 /// specified value is in a memory location, and, if it is, stores a new value
505 /// there. The value returned by this instruction is a pair containing the
506 /// original value as first element, and an i1 indicating success (true) or
507 /// failure (false) as second element.
508 ///
510  void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
511  AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
512  SyncScope::ID SSID);
513 
514  template <unsigned Offset>
515  using AtomicOrderingBitfieldElement =
516  typename Bitfield::Element<AtomicOrdering, Offset, 3,
518 
519 protected:
520  // Note: Instruction needs to be a friend here to call cloneImpl.
521  friend class Instruction;
522 
523  AtomicCmpXchgInst *cloneImpl() const;
524 
525 public:
526  AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
527  AtomicOrdering SuccessOrdering,
528  AtomicOrdering FailureOrdering, SyncScope::ID SSID,
529  Instruction *InsertBefore = nullptr);
530  AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
531  AtomicOrdering SuccessOrdering,
532  AtomicOrdering FailureOrdering, SyncScope::ID SSID,
533  BasicBlock *InsertAtEnd);
534 
535  // allocate space for exactly three operands
536  void *operator new(size_t S) { return User::operator new(S, 3); }
537  void operator delete(void *Ptr) { User::operator delete(Ptr); }
538 
541  using SuccessOrderingField =
543  using FailureOrderingField =
545  using AlignmentField =
547  static_assert(
550  "Bitfields must be contiguous");
551 
552  /// Return the alignment of the memory that is being allocated by the
553  /// instruction.
554  Align getAlign() const {
555  return Align(1ULL << getSubclassData<AlignmentField>());
556  }
557 
559  setSubclassData<AlignmentField>(Log2(Align));
560  }
561 
562  /// Return true if this is a cmpxchg from a volatile memory
563  /// location.
564  ///
565  bool isVolatile() const { return getSubclassData<VolatileField>(); }
566 
567  /// Specify whether this is a volatile cmpxchg.
568  ///
569  void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
570 
571  /// Return true if this cmpxchg may spuriously fail.
572  bool isWeak() const { return getSubclassData<WeakField>(); }
573 
574  void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
575 
576  /// Transparently provide more efficient getOperand methods.
578 
579  static bool isValidSuccessOrdering(AtomicOrdering Ordering) {
580  return Ordering != AtomicOrdering::NotAtomic &&
581  Ordering != AtomicOrdering::Unordered;
582  }
583 
584  static bool isValidFailureOrdering(AtomicOrdering Ordering) {
585  return Ordering != AtomicOrdering::NotAtomic &&
586  Ordering != AtomicOrdering::Unordered &&
587  Ordering != AtomicOrdering::AcquireRelease &&
588  Ordering != AtomicOrdering::Release;
589  }
590 
591  /// Returns the success ordering constraint of this cmpxchg instruction.
593  return getSubclassData<SuccessOrderingField>();
594  }
595 
596  /// Sets the success ordering constraint of this cmpxchg instruction.
598  assert(isValidSuccessOrdering(Ordering) &&
599  "invalid CmpXchg success ordering");
600  setSubclassData<SuccessOrderingField>(Ordering);
601  }
602 
603  /// Returns the failure ordering constraint of this cmpxchg instruction.
605  return getSubclassData<FailureOrderingField>();
606  }
607 
608  /// Sets the failure ordering constraint of this cmpxchg instruction.
610  assert(isValidFailureOrdering(Ordering) &&
611  "invalid CmpXchg failure ordering");
612  setSubclassData<FailureOrderingField>(Ordering);
613  }
614 
615  /// Returns a single ordering which is at least as strong as both the
616  /// success and failure orderings for this cmpxchg.
625  }
626  return getSuccessOrdering();
627  }
628 
629  /// Returns the synchronization scope ID of this cmpxchg instruction.
631  return SSID;
632  }
633 
634  /// Sets the synchronization scope ID of this cmpxchg instruction.
636  this->SSID = SSID;
637  }
638 
640  const Value *getPointerOperand() const { return getOperand(0); }
641  static unsigned getPointerOperandIndex() { return 0U; }
642 
644  const Value *getCompareOperand() const { return getOperand(1); }
645 
647  const Value *getNewValOperand() const { return getOperand(2); }
648 
649  /// Returns the address space of the pointer operand.
650  unsigned getPointerAddressSpace() const {
652  }
653 
654  /// Returns the strongest permitted ordering on failure, given the
655  /// desired ordering on success.
656  ///
657  /// If the comparison in a cmpxchg operation fails, there is no atomic store
658  /// so release semantics cannot be provided. So this function drops explicit
659  /// Release requests from the AtomicOrdering. A SequentiallyConsistent
660  /// operation would remain SequentiallyConsistent.
661  static AtomicOrdering
663  switch (SuccessOrdering) {
664  default:
665  llvm_unreachable("invalid cmpxchg success ordering");
674  }
675  }
676 
677  // Methods for support type inquiry through isa, cast, and dyn_cast:
678  static bool classof(const Instruction *I) {
679  return I->getOpcode() == Instruction::AtomicCmpXchg;
680  }
681  static bool classof(const Value *V) {
682  return isa<Instruction>(V) && classof(cast<Instruction>(V));
683  }
684 
685 private:
686  // Shadow Instruction::setInstructionSubclassData with a private forwarding
687  // method so that subclasses cannot accidentally use it.
688  template <typename Bitfield>
689  void setSubclassData(typename Bitfield::Type Value) {
690  Instruction::setSubclassData<Bitfield>(Value);
691  }
692 
693  /// The synchronization scope ID of this cmpxchg instruction. Not quite
694  /// enough room in SubClassData for everything, so synchronization scope ID
695  /// gets its own field.
696  SyncScope::ID SSID;
697 };
698 
699 template <>
701  public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
702 };
703 
705 
706 //===----------------------------------------------------------------------===//
707 // AtomicRMWInst Class
708 //===----------------------------------------------------------------------===//
709 
710 /// an instruction that atomically reads a memory location,
711 /// combines it with another value, and then stores the result back. Returns
712 /// the old value.
713 ///
714 class AtomicRMWInst : public Instruction {
715 protected:
716  // Note: Instruction needs to be a friend here to call cloneImpl.
717  friend class Instruction;
718 
719  AtomicRMWInst *cloneImpl() const;
720 
721 public:
722  /// This enumeration lists the possible modifications atomicrmw can make. In
723  /// the descriptions, 'p' is the pointer to the instruction's memory location,
724  /// 'old' is the initial value of *p, and 'v' is the other value passed to the
725  /// instruction. These instructions always return 'old'.
726  enum BinOp : unsigned {
727  /// *p = v
729  /// *p = old + v
731  /// *p = old - v
733  /// *p = old & v
735  /// *p = ~(old & v)
737  /// *p = old | v
738  Or,
739  /// *p = old ^ v
741  /// *p = old >signed v ? old : v
743  /// *p = old <signed v ? old : v
745  /// *p = old >unsigned v ? old : v
747  /// *p = old <unsigned v ? old : v
749 
750  /// *p = old + v
752 
753  /// *p = old - v
755 
756  FIRST_BINOP = Xchg,
757  LAST_BINOP = FSub,
758  BAD_BINOP
759  };
760 
761 private:
762  template <unsigned Offset>
763  using AtomicOrderingBitfieldElement =
766 
767  template <unsigned Offset>
768  using BinOpBitfieldElement =
770 
771 public:
772  AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
773  AtomicOrdering Ordering, SyncScope::ID SSID,
774  Instruction *InsertBefore = nullptr);
775  AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
776  AtomicOrdering Ordering, SyncScope::ID SSID,
777  BasicBlock *InsertAtEnd);
778 
779  // allocate space for exactly two operands
780  void *operator new(size_t S) { return User::operator new(S, 2); }
781  void operator delete(void *Ptr) { User::operator delete(Ptr); }
782 
784  using AtomicOrderingField =
786  using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
790  "Bitfields must be contiguous");
791 
792  BinOp getOperation() const { return getSubclassData<OperationField>(); }
793 
794  static StringRef getOperationName(BinOp Op);
795 
796  static bool isFPOperation(BinOp Op) {
797  switch (Op) {
798  case AtomicRMWInst::FAdd:
799  case AtomicRMWInst::FSub:
800  return true;
801  default:
802  return false;
803  }
804  }
805 
807  setSubclassData<OperationField>(Operation);
808  }
809 
810  /// Return the alignment of the memory that is being allocated by the
811  /// instruction.
812  Align getAlign() const {
813  return Align(1ULL << getSubclassData<AlignmentField>());
814  }
815 
817  setSubclassData<AlignmentField>(Log2(Align));
818  }
819 
820  /// Return true if this is a RMW on a volatile memory location.
821  ///
822  bool isVolatile() const { return getSubclassData<VolatileField>(); }
823 
824  /// Specify whether this is a volatile RMW or not.
825  ///
826  void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
827 
828  /// Transparently provide more efficient getOperand methods.
830 
831  /// Returns the ordering constraint of this rmw instruction.
833  return getSubclassData<AtomicOrderingField>();
834  }
835 
836  /// Sets the ordering constraint of this rmw instruction.
837  void setOrdering(AtomicOrdering Ordering) {
838  assert(Ordering != AtomicOrdering::NotAtomic &&
839  "atomicrmw instructions can only be atomic.");
840  setSubclassData<AtomicOrderingField>(Ordering);
841  }
842 
843  /// Returns the synchronization scope ID of this rmw instruction.
845  return SSID;
846  }
847 
848  /// Sets the synchronization scope ID of this rmw instruction.
850  this->SSID = SSID;
851  }
852 
853  Value *getPointerOperand() { return getOperand(0); }
854  const Value *getPointerOperand() const { return getOperand(0); }
855  static unsigned getPointerOperandIndex() { return 0U; }
856 
857  Value *getValOperand() { return getOperand(1); }
858  const Value *getValOperand() const { return getOperand(1); }
859 
860  /// Returns the address space of the pointer operand.
861  unsigned getPointerAddressSpace() const {
863  }
864 
866  return isFPOperation(getOperation());
867  }
868 
869  // Methods for support type inquiry through isa, cast, and dyn_cast:
870  static bool classof(const Instruction *I) {
871  return I->getOpcode() == Instruction::AtomicRMW;
872  }
873  static bool classof(const Value *V) {
874  return isa<Instruction>(V) && classof(cast<Instruction>(V));
875  }
876 
877 private:
878  void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
879  AtomicOrdering Ordering, SyncScope::ID SSID);
880 
881  // Shadow Instruction::setInstructionSubclassData with a private forwarding
882  // method so that subclasses cannot accidentally use it.
883  template <typename Bitfield>
884  void setSubclassData(typename Bitfield::Type Value) {
885  Instruction::setSubclassData<Bitfield>(Value);
886  }
887 
888  /// The synchronization scope ID of this rmw instruction. Not quite enough
889  /// room in SubClassData for everything, so synchronization scope ID gets its
890  /// own field.
891  SyncScope::ID SSID;
892 };
893 
894 template <>
896  : public FixedNumOperandTraits<AtomicRMWInst,2> {
897 };
898 
900 
901 //===----------------------------------------------------------------------===//
902 // GetElementPtrInst Class
903 //===----------------------------------------------------------------------===//
904 
905 // checkGEPType - Simple wrapper function to give a better assertion failure
906 // message on bad indexes for a gep instruction.
907 //
909  assert(Ty && "Invalid GetElementPtrInst indices for type!");
910  return Ty;
911 }
912 
913 /// an instruction for type-safe pointer arithmetic to
914 /// access elements of arrays and structs
915 ///
917  Type *SourceElementType;
918  Type *ResultElementType;
919 
921 
922  /// Constructors - Create a getelementptr instruction with a base pointer an
923  /// list of indices. The first ctor can optionally insert before an existing
924  /// instruction, the second appends the new instruction to the specified
925  /// BasicBlock.
926  inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
927  ArrayRef<Value *> IdxList, unsigned Values,
928  const Twine &NameStr, Instruction *InsertBefore);
929  inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
930  ArrayRef<Value *> IdxList, unsigned Values,
931  const Twine &NameStr, BasicBlock *InsertAtEnd);
932 
933  void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
934 
935 protected:
936  // Note: Instruction needs to be a friend here to call cloneImpl.
937  friend class Instruction;
938 
939  GetElementPtrInst *cloneImpl() const;
940 
941 public:
942  static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
943  ArrayRef<Value *> IdxList,
944  const Twine &NameStr = "",
945  Instruction *InsertBefore = nullptr) {
946  unsigned Values = 1 + unsigned(IdxList.size());
947  assert(PointeeType && "Must specify element type");
948  assert(cast<PointerType>(Ptr->getType()->getScalarType())
949  ->isOpaqueOrPointeeTypeMatches(PointeeType));
950  return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
951  NameStr, InsertBefore);
952  }
953 
954  static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
955  ArrayRef<Value *> IdxList,
956  const Twine &NameStr,
957  BasicBlock *InsertAtEnd) {
958  unsigned Values = 1 + unsigned(IdxList.size());
959  assert(PointeeType && "Must specify element type");
960  assert(cast<PointerType>(Ptr->getType()->getScalarType())
961  ->isOpaqueOrPointeeTypeMatches(PointeeType));
962  return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
963  NameStr, InsertAtEnd);
964  }
965 
966  /// Create an "inbounds" getelementptr. See the documentation for the
967  /// "inbounds" flag in LangRef.html for details.
968  static GetElementPtrInst *
969  CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
970  const Twine &NameStr = "",
971  Instruction *InsertBefore = nullptr) {
973  Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
974  GEP->setIsInBounds(true);
975  return GEP;
976  }
977 
978  static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
979  ArrayRef<Value *> IdxList,
980  const Twine &NameStr,
981  BasicBlock *InsertAtEnd) {
983  Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
984  GEP->setIsInBounds(true);
985  return GEP;
986  }
987 
988  /// Transparently provide more efficient getOperand methods.
990 
991  Type *getSourceElementType() const { return SourceElementType; }
992 
993  void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
994  void setResultElementType(Type *Ty) { ResultElementType = Ty; }
995 
997  assert(cast<PointerType>(getType()->getScalarType())
998  ->isOpaqueOrPointeeTypeMatches(ResultElementType));
999  return ResultElementType;
1000  }
1001 
1002  /// Returns the address space of this instruction's pointer type.
1003  unsigned getAddressSpace() const {
1004  // Note that this is always the same as the pointer operand's address space
1005  // and that is cheaper to compute, so cheat here.
1006  return getPointerAddressSpace();
1007  }
1008 
1009  /// Returns the result type of a getelementptr with the given source
1010  /// element type and indexes.
1011  ///
1012  /// Null is returned if the indices are invalid for the specified
1013  /// source element type.
1014  static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1015  static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1016  static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1017 
1018  /// Return the type of the element at the given index of an indexable
1019  /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1020  ///
1021  /// Returns null if the type can't be indexed, or the given index is not
1022  /// legal for the given type.
1023  static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1024  static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1025 
1026  inline op_iterator idx_begin() { return op_begin()+1; }
1027  inline const_op_iterator idx_begin() const { return op_begin()+1; }
1028  inline op_iterator idx_end() { return op_end(); }
1029  inline const_op_iterator idx_end() const { return op_end(); }
1030 
1032  return make_range(idx_begin(), idx_end());
1033  }
1034 
1036  return make_range(idx_begin(), idx_end());
1037  }
1038 
1040  return getOperand(0);
1041  }
1042  const Value *getPointerOperand() const {
1043  return getOperand(0);
1044  }
1045  static unsigned getPointerOperandIndex() {
1046  return 0U; // get index for modifying correct operand.
1047  }
1048 
1049  /// Method to return the pointer operand as a
1050  /// PointerType.
1052  return getPointerOperand()->getType();
1053  }
1054 
1055  /// Returns the address space of the pointer operand.
1056  unsigned getPointerAddressSpace() const {
1058  }
1059 
1060  /// Returns the pointer type returned by the GEP
1061  /// instruction, which may be a vector of pointers.
1062  static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1063  ArrayRef<Value *> IdxList) {
1064  PointerType *OrigPtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1065  unsigned AddrSpace = OrigPtrTy->getAddressSpace();
1066  Type *ResultElemTy = checkGEPType(getIndexedType(ElTy, IdxList));
1067  Type *PtrTy = OrigPtrTy->isOpaque()
1068  ? PointerType::get(OrigPtrTy->getContext(), AddrSpace)
1069  : PointerType::get(ResultElemTy, AddrSpace);
1070  // Vector GEP
1071  if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
1072  ElementCount EltCount = PtrVTy->getElementCount();
1073  return VectorType::get(PtrTy, EltCount);
1074  }
1075  for (Value *Index : IdxList)
1076  if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1077  ElementCount EltCount = IndexVTy->getElementCount();
1078  return VectorType::get(PtrTy, EltCount);
1079  }
1080  // Scalar GEP
1081  return PtrTy;
1082  }
1083 
1084  unsigned getNumIndices() const { // Note: always non-negative
1085  return getNumOperands() - 1;
1086  }
1087 
1088  bool hasIndices() const {
1089  return getNumOperands() > 1;
1090  }
1091 
1092  /// Return true if all of the indices of this GEP are
1093  /// zeros. If so, the result pointer and the first operand have the same
1094  /// value, just potentially different types.
1095  bool hasAllZeroIndices() const;
1096 
1097  /// Return true if all of the indices of this GEP are
1098  /// constant integers. If so, the result pointer and the first operand have
1099  /// a constant offset between them.
1100  bool hasAllConstantIndices() const;
1101 
1102  /// Set or clear the inbounds flag on this GEP instruction.
1103  /// See LangRef.html for the meaning of inbounds on a getelementptr.
1104  void setIsInBounds(bool b = true);
1105 
1106  /// Determine whether the GEP has the inbounds flag.
1107  bool isInBounds() const;
1108 
1109  /// Accumulate the constant address offset of this GEP if possible.
1110  ///
1111  /// This routine accepts an APInt into which it will accumulate the constant
1112  /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1113  /// all-constant, it returns false and the value of the offset APInt is
1114  /// undefined (it is *not* preserved!). The APInt passed into this routine
1115  /// must be at least as wide as the IntPtr type for the address space of
1116  /// the base GEP pointer.
1117  bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1118  bool collectOffset(const DataLayout &DL, unsigned BitWidth,
1119  MapVector<Value *, APInt> &VariableOffsets,
1120  APInt &ConstantOffset) const;
1121  // Methods for support type inquiry through isa, cast, and dyn_cast:
1122  static bool classof(const Instruction *I) {
1123  return (I->getOpcode() == Instruction::GetElementPtr);
1124  }
1125  static bool classof(const Value *V) {
1126  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1127  }
1128 };
1129 
1130 template <>
1132  public VariadicOperandTraits<GetElementPtrInst, 1> {
1133 };
1134 
1135 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1136  ArrayRef<Value *> IdxList, unsigned Values,
1137  const Twine &NameStr,
1138  Instruction *InsertBefore)
1139  : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1140  OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1141  Values, InsertBefore),
1142  SourceElementType(PointeeType),
1143  ResultElementType(getIndexedType(PointeeType, IdxList)) {
1144  assert(cast<PointerType>(getType()->getScalarType())
1145  ->isOpaqueOrPointeeTypeMatches(ResultElementType));
1146  init(Ptr, IdxList, NameStr);
1147 }
1148 
1149 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1150  ArrayRef<Value *> IdxList, unsigned Values,
1151  const Twine &NameStr,
1152  BasicBlock *InsertAtEnd)
1153  : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1154  OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1155  Values, InsertAtEnd),
1156  SourceElementType(PointeeType),
1157  ResultElementType(getIndexedType(PointeeType, IdxList)) {
1158  assert(cast<PointerType>(getType()->getScalarType())
1159  ->isOpaqueOrPointeeTypeMatches(ResultElementType));
1160  init(Ptr, IdxList, NameStr);
1161 }
1162 
1163 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)
1164 
1165 //===----------------------------------------------------------------------===//
1166 // ICmpInst Class
1167 //===----------------------------------------------------------------------===//
1168 
1169 /// This instruction compares its operands according to the predicate given
1170 /// to the constructor. It only operates on integers or pointers. The operands
1171 /// must be identical types.
1172 /// Represent an integer comparison operator.
1173 class ICmpInst: public CmpInst {
1174  void AssertOK() {
1175  assert(isIntPredicate() &&
1176  "Invalid ICmp predicate value");
1177  assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1178  "Both operands to ICmp instruction are not of the same type!");
1179  // Check that the operands are the right type
1180  assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||
1181  getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
1182  "Invalid operand types for ICmp instruction");
1183  }
1184 
1185 protected:
1186  // Note: Instruction needs to be a friend here to call cloneImpl.
1187  friend class Instruction;
1188 
1189  /// Clone an identical ICmpInst
1190  ICmpInst *cloneImpl() const;
1191 
1192 public:
1193  /// Constructor with insert-before-instruction semantics.
1195  Instruction *InsertBefore, ///< Where to insert
1196  Predicate pred, ///< The predicate to use for the comparison
1197  Value *LHS, ///< The left-hand-side of the expression
1198  Value *RHS, ///< The right-hand-side of the expression
1199  const Twine &NameStr = "" ///< Name of the instruction
1200  ) : CmpInst(makeCmpResultType(LHS->getType()),
1201  Instruction::ICmp, pred, LHS, RHS, NameStr,
1202  InsertBefore) {
1203 #ifndef NDEBUG
1204  AssertOK();
1205 #endif
1206  }
1207 
1208  /// Constructor with insert-at-end semantics.
1210  BasicBlock &InsertAtEnd, ///< Block to insert into.
1211  Predicate pred, ///< The predicate to use for the comparison
1212  Value *LHS, ///< The left-hand-side of the expression
1213  Value *RHS, ///< The right-hand-side of the expression
1214  const Twine &NameStr = "" ///< Name of the instruction
1215  ) : CmpInst(makeCmpResultType(LHS->getType()),
1216  Instruction::ICmp, pred, LHS, RHS, NameStr,
1217  &InsertAtEnd) {
1218 #ifndef NDEBUG
1219  AssertOK();
1220 #endif
1221  }
1222 
1223  /// Constructor with no-insertion semantics
1225  Predicate pred, ///< The predicate to use for the comparison
1226  Value *LHS, ///< The left-hand-side of the expression
1227  Value *RHS, ///< The right-hand-side of the expression
1228  const Twine &NameStr = "" ///< Name of the instruction
1229  ) : CmpInst(makeCmpResultType(LHS->getType()),
1230  Instruction::ICmp, pred, LHS, RHS, NameStr) {
1231 #ifndef NDEBUG
1232  AssertOK();
1233 #endif
1234  }
1235 
1236  /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1237  /// @returns the predicate that would be the result if the operand were
1238  /// regarded as signed.
1239  /// Return the signed version of the predicate
1241  return getSignedPredicate(getPredicate());
1242  }
1243 
1244  /// This is a static version that you can use without an instruction.
1245  /// Return the signed version of the predicate.
1246  static Predicate getSignedPredicate(Predicate pred);
1247 
1248  /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1249  /// @returns the predicate that would be the result if the operand were
1250  /// regarded as unsigned.
1251  /// Return the unsigned version of the predicate
1253  return getUnsignedPredicate(getPredicate());
1254  }
1255 
1256  /// This is a static version that you can use without an instruction.
1257  /// Return the unsigned version of the predicate.
1258  static Predicate getUnsignedPredicate(Predicate pred);
1259 
1260  /// Return true if this predicate is either EQ or NE. This also
1261  /// tests for commutativity.
1262  static bool isEquality(Predicate P) {
1263  return P == ICMP_EQ || P == ICMP_NE;
1264  }
1265 
1266  /// Return true if this predicate is either EQ or NE. This also
1267  /// tests for commutativity.
1268  bool isEquality() const {
1269  return isEquality(getPredicate());
1270  }
1271 
1272  /// @returns true if the predicate of this ICmpInst is commutative
1273  /// Determine if this relation is commutative.
1274  bool isCommutative() const { return isEquality(); }
1275 
1276  /// Return true if the predicate is relational (not EQ or NE).
1277  ///
1278  bool isRelational() const {
1279  return !isEquality();
1280  }
1281 
1282  /// Return true if the predicate is relational (not EQ or NE).
1283  ///
1284  static bool isRelational(Predicate P) {
1285  return !isEquality(P);
1286  }
1287 
1288  /// Return true if the predicate is SGT or UGT.
1289  ///
1290  static bool isGT(Predicate P) {
1291  return P == ICMP_SGT || P == ICMP_UGT;
1292  }
1293 
1294  /// Return true if the predicate is SLT or ULT.
1295  ///
1296  static bool isLT(Predicate P) {
1297  return P == ICMP_SLT || P == ICMP_ULT;
1298  }
1299 
1300  /// Return true if the predicate is SGE or UGE.
1301  ///
1302  static bool isGE(Predicate P) {
1303  return P == ICMP_SGE || P == ICMP_UGE;
1304  }
1305 
1306  /// Return true if the predicate is SLE or ULE.
1307  ///
1308  static bool isLE(Predicate P) {
1309  return P == ICMP_SLE || P == ICMP_ULE;
1310  }
1311 
1312  /// Returns the sequence of all ICmp predicates.
1313  ///
1314  static auto predicates() { return ICmpPredicates(); }
1315 
1316  /// Exchange the two operands to this instruction in such a way that it does
1317  /// not modify the semantics of the instruction. The predicate value may be
1318  /// changed to retain the same result if the predicate is order dependent
1319  /// (e.g. ult).
1320  /// Swap operands and adjust predicate.
1321  void swapOperands() {
1322  setPredicate(getSwappedPredicate());
1323  Op<0>().swap(Op<1>());
1324  }
1325 
1326  /// Return result of `LHS Pred RHS` comparison.
1327  static bool compare(const APInt &LHS, const APInt &RHS,
1328  ICmpInst::Predicate Pred);
1329 
1330  // Methods for support type inquiry through isa, cast, and dyn_cast:
1331  static bool classof(const Instruction *I) {
1332  return I->getOpcode() == Instruction::ICmp;
1333  }
1334  static bool classof(const Value *V) {
1335  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1336  }
1337 };
1338 
1339 //===----------------------------------------------------------------------===//
1340 // FCmpInst Class
1341 //===----------------------------------------------------------------------===//
1342 
1343 /// This instruction compares its operands according to the predicate given
1344 /// to the constructor. It only operates on floating point values or packed
1345 /// vectors of floating point values. The operands must be identical types.
1346 /// Represents a floating point comparison operator.
1347 class FCmpInst: public CmpInst {
1348  void AssertOK() {
1349  assert(isFPPredicate() && "Invalid FCmp predicate value");
1350  assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1351  "Both operands to FCmp instruction are not of the same type!");
1352  // Check that the operands are the right type
1353  assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&
1354  "Invalid operand types for FCmp instruction");
1355  }
1356 
1357 protected:
1358  // Note: Instruction needs to be a friend here to call cloneImpl.
1359  friend class Instruction;
1360 
1361  /// Clone an identical FCmpInst
1362  FCmpInst *cloneImpl() const;
1363 
1364 public:
1365  /// Constructor with insert-before-instruction semantics.
1367  Instruction *InsertBefore, ///< Where to insert
1368  Predicate pred, ///< The predicate to use for the comparison
1369  Value *LHS, ///< The left-hand-side of the expression
1370  Value *RHS, ///< The right-hand-side of the expression
1371  const Twine &NameStr = "" ///< Name of the instruction
1373  Instruction::FCmp, pred, LHS, RHS, NameStr,
1374  InsertBefore) {
1375  AssertOK();
1376  }
1377 
1378  /// Constructor with insert-at-end semantics.
1380  BasicBlock &InsertAtEnd, ///< Block to insert into.
1381  Predicate pred, ///< The predicate to use for the comparison
1382  Value *LHS, ///< The left-hand-side of the expression
1383  Value *RHS, ///< The right-hand-side of the expression
1384  const Twine &NameStr = "" ///< Name of the instruction
1386  Instruction::FCmp, pred, LHS, RHS, NameStr,
1387  &InsertAtEnd) {
1388  AssertOK();
1389  }
1390 
1391  /// Constructor with no-insertion semantics
1393  Predicate Pred, ///< The predicate to use for the comparison
1394  Value *LHS, ///< The left-hand-side of the expression
1395  Value *RHS, ///< The right-hand-side of the expression
1396  const Twine &NameStr = "", ///< Name of the instruction
1397  Instruction *FlagsSource = nullptr
1398  ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1399  RHS, NameStr, nullptr, FlagsSource) {
1400  AssertOK();
1401  }
1402 
1403  /// @returns true if the predicate of this instruction is EQ or NE.
1404  /// Determine if this is an equality predicate.
1405  static bool isEquality(Predicate Pred) {
1406  return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1407  Pred == FCMP_UNE;
1408  }
1409 
1410  /// @returns true if the predicate of this instruction is EQ or NE.
1411  /// Determine if this is an equality predicate.
1412  bool isEquality() const { return isEquality(getPredicate()); }
1413 
1414  /// @returns true if the predicate of this instruction is commutative.
1415  /// Determine if this is a commutative predicate.
1416  bool isCommutative() const {
1417  return isEquality() ||
1418  getPredicate() == FCMP_FALSE ||
1419  getPredicate() == FCMP_TRUE ||
1420  getPredicate() == FCMP_ORD ||
1421  getPredicate() == FCMP_UNO;
1422  }
1423 
1424  /// @returns true if the predicate is relational (not EQ or NE).
1425  /// Determine if this a relational predicate.
1426  bool isRelational() const { return !isEquality(); }
1427 
1428  /// Exchange the two operands to this instruction in such a way that it does
1429  /// not modify the semantics of the instruction. The predicate value may be
1430  /// changed to retain the same result if the predicate is order dependent
1431  /// (e.g. ult).
1432  /// Swap operands and adjust predicate.
1433  void swapOperands() {
1435  Op<0>().swap(Op<1>());
1436  }
1437 
1438  /// Returns the sequence of all FCmp predicates.
1439  ///
1440  static auto predicates() { return FCmpPredicates(); }
1441 
1442  /// Return result of `LHS Pred RHS` comparison.
1443  static bool compare(const APFloat &LHS, const APFloat &RHS,
1444  FCmpInst::Predicate Pred);
1445 
1446  /// Methods for support type inquiry through isa, cast, and dyn_cast:
1447  static bool classof(const Instruction *I) {
1448  return I->getOpcode() == Instruction::FCmp;
1449  }
1450  static bool classof(const Value *V) {
1451  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1452  }
1453 };
1454 
1455 //===----------------------------------------------------------------------===//
1456 /// This class represents a function call, abstracting a target
1457 /// machine's calling convention. This class uses low bit of the SubClassData
1458 /// field to indicate whether or not this is a tail call. The rest of the bits
1459 /// hold the calling convention of the call.
1460 ///
1461 class CallInst : public CallBase {
1462  CallInst(const CallInst &CI);
1463 
1464  /// Construct a CallInst given a range of arguments.
1465  /// Construct a CallInst from a range of arguments
1466  inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1467  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1468  Instruction *InsertBefore);
1469 
1470  inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1471  const Twine &NameStr, Instruction *InsertBefore)
1472  : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {}
1473 
1474  /// Construct a CallInst given a range of arguments.
1475  /// Construct a CallInst from a range of arguments
1476  inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1477  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1478  BasicBlock *InsertAtEnd);
1479 
1480  explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1481  Instruction *InsertBefore);
1482 
1483  CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1484  BasicBlock *InsertAtEnd);
1485 
1486  void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1487  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1488  void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1489 
1490  /// Compute the number of operands to allocate.
1491  static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1492  // We need one operand for the called function, plus the input operand
1493  // counts provided.
1494  return 1 + NumArgs + NumBundleInputs;
1495  }
1496 
1497 protected:
1498  // Note: Instruction needs to be a friend here to call cloneImpl.
1499  friend class Instruction;
1500 
1501  CallInst *cloneImpl() const;
1502 
1503 public:
1504  static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1505  Instruction *InsertBefore = nullptr) {
1506  return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1507  }
1508 
1510  const Twine &NameStr,
1511  Instruction *InsertBefore = nullptr) {
1512  return new (ComputeNumOperands(Args.size()))
1513  CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1514  }
1515 
1517  ArrayRef<OperandBundleDef> Bundles = None,
1518  const Twine &NameStr = "",
1519  Instruction *InsertBefore = nullptr) {
1520  const int NumOperands =
1521  ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1522  const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1523 
1524  return new (NumOperands, DescriptorBytes)
1525  CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1526  }
1527 
1528  static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1529  BasicBlock *InsertAtEnd) {
1530  return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1531  }
1532 
1534  const Twine &NameStr, BasicBlock *InsertAtEnd) {
1535  return new (ComputeNumOperands(Args.size()))
1536  CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd);
1537  }
1538 
1541  const Twine &NameStr, BasicBlock *InsertAtEnd) {
1542  const int NumOperands =
1543  ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1544  const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1545 
1546  return new (NumOperands, DescriptorBytes)
1547  CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1548  }
1549 
1550  static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1551  Instruction *InsertBefore = nullptr) {
1552  return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1553  InsertBefore);
1554  }
1555 
1557  ArrayRef<OperandBundleDef> Bundles = None,
1558  const Twine &NameStr = "",
1559  Instruction *InsertBefore = nullptr) {
1560  return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1561  NameStr, InsertBefore);
1562  }
1563 
1565  const Twine &NameStr,
1566  Instruction *InsertBefore = nullptr) {
1567  return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1568  InsertBefore);
1569  }
1570 
1571  static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1572  BasicBlock *InsertAtEnd) {
1573  return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1574  InsertAtEnd);
1575  }
1576 
1578  const Twine &NameStr, BasicBlock *InsertAtEnd) {
1579  return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1580  InsertAtEnd);
1581  }
1582 
1585  const Twine &NameStr, BasicBlock *InsertAtEnd) {
1586  return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1587  NameStr, InsertAtEnd);
1588  }
1589 
1590  /// Create a clone of \p CI with a different set of operand bundles and
1591  /// insert it before \p InsertPt.
1592  ///
1593  /// The returned call instruction is identical \p CI in every way except that
1594  /// the operand bundles for the new instruction are set to the operand bundles
1595  /// in \p Bundles.
1596  static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1597  Instruction *InsertPt = nullptr);
1598 
1599  /// Generate the IR for a call to malloc:
1600  /// 1. Compute the malloc call's argument as the specified type's size,
1601  /// possibly multiplied by the array size if the array size is not
1602  /// constant 1.
1603  /// 2. Call malloc with that argument.
1604  /// 3. Bitcast the result of the malloc call to the specified type.
1605  static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1606  Type *AllocTy, Value *AllocSize,
1607  Value *ArraySize = nullptr,
1608  Function *MallocF = nullptr,
1609  const Twine &Name = "");
1610  static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1611  Type *AllocTy, Value *AllocSize,
1612  Value *ArraySize = nullptr,
1613  Function *MallocF = nullptr,
1614  const Twine &Name = "");
1615  static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1616  Type *AllocTy, Value *AllocSize,
1617  Value *ArraySize = nullptr,
1618  ArrayRef<OperandBundleDef> Bundles = None,
1619  Function *MallocF = nullptr,
1620  const Twine &Name = "");
1621  static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1622  Type *AllocTy, Value *AllocSize,
1623  Value *ArraySize = nullptr,
1624  ArrayRef<OperandBundleDef> Bundles = None,
1625  Function *MallocF = nullptr,
1626  const Twine &Name = "");
1627  /// Generate the IR for a call to the builtin free function.
1628  static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
1629  static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
1632  Instruction *InsertBefore);
1635  BasicBlock *InsertAtEnd);
1636 
1637  // Note that 'musttail' implies 'tail'.
1638  enum TailCallKind : unsigned {
1644  };
1645 
1647  static_assert(
1648  Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1649  "Bitfields must be contiguous");
1650 
1652  return getSubclassData<TailCallKindField>();
1653  }
1654 
1655  bool isTailCall() const {
1657  return Kind == TCK_Tail || Kind == TCK_MustTail;
1658  }
1659 
1660  bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1661 
1662  bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1663 
1665  setSubclassData<TailCallKindField>(TCK);
1666  }
1667 
1668  void setTailCall(bool IsTc = true) {
1669  setTailCallKind(IsTc ? TCK_Tail : TCK_None);
1670  }
1671 
1672  /// Return true if the call can return twice
1673  bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1674  void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); }
1675 
1676  // Methods for support type inquiry through isa, cast, and dyn_cast:
1677  static bool classof(const Instruction *I) {
1678  return I->getOpcode() == Instruction::Call;
1679  }
1680  static bool classof(const Value *V) {
1681  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1682  }
1683 
1684  /// Updates profile metadata by scaling it by \p S / \p T.
1686 
1687 private:
1688  // Shadow Instruction::setInstructionSubclassData with a private forwarding
1689  // method so that subclasses cannot accidentally use it.
1690  template <typename Bitfield>
1691  void setSubclassData(typename Bitfield::Type Value) {
1692  Instruction::setSubclassData<Bitfield>(Value);
1693  }
1694 };
1695 
1696 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1697  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1698  BasicBlock *InsertAtEnd)
1699  : CallBase(Ty->getReturnType(), Instruction::Call,
1700  OperandTraits<CallBase>::op_end(this) -
1701  (Args.size() + CountBundleInputs(Bundles) + 1),
1702  unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1703  InsertAtEnd) {
1704  init(Ty, Func, Args, Bundles, NameStr);
1705 }
1706 
1707 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1708  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1709  Instruction *InsertBefore)
1710  : CallBase(Ty->getReturnType(), Instruction::Call,
1711  OperandTraits<CallBase>::op_end(this) -
1712  (Args.size() + CountBundleInputs(Bundles) + 1),
1713  unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1714  InsertBefore) {
1715  init(Ty, Func, Args, Bundles, NameStr);
1716 }
1717 
1718 //===----------------------------------------------------------------------===//
1719 // SelectInst Class
1720 //===----------------------------------------------------------------------===//
1721 
1722 /// This class represents the LLVM 'select' instruction.
1723 ///
1724 class SelectInst : public Instruction {
1725  SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1726  Instruction *InsertBefore)
1728  &Op<0>(), 3, InsertBefore) {
1729  init(C, S1, S2);
1730  setName(NameStr);
1731  }
1732 
1733  SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1734  BasicBlock *InsertAtEnd)
1736  &Op<0>(), 3, InsertAtEnd) {
1737  init(C, S1, S2);
1738  setName(NameStr);
1739  }
1740 
1741  void init(Value *C, Value *S1, Value *S2) {
1742  assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select");
1743  Op<0>() = C;
1744  Op<1>() = S1;
1745  Op<2>() = S2;
1746  }
1747 
1748 protected:
1749  // Note: Instruction needs to be a friend here to call cloneImpl.
1750  friend class Instruction;
1751 
1752  SelectInst *cloneImpl() const;
1753 
1754 public:
1755  static SelectInst *Create(Value *C, Value *S1, Value *S2,
1756  const Twine &NameStr = "",
1757  Instruction *InsertBefore = nullptr,
1758  Instruction *MDFrom = nullptr) {
1759  SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1760  if (MDFrom)
1761  Sel->copyMetadata(*MDFrom);
1762  return Sel;
1763  }
1764 
1765  static SelectInst *Create(Value *C, Value *S1, Value *S2,
1766  const Twine &NameStr,
1767  BasicBlock *InsertAtEnd) {
1768  return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1769  }
1770 
1771  const Value *getCondition() const { return Op<0>(); }
1772  const Value *getTrueValue() const { return Op<1>(); }
1773  const Value *getFalseValue() const { return Op<2>(); }
1774  Value *getCondition() { return Op<0>(); }
1775  Value *getTrueValue() { return Op<1>(); }
1776  Value *getFalseValue() { return Op<2>(); }
1777 
1778  void setCondition(Value *V) { Op<0>() = V; }
1779  void setTrueValue(Value *V) { Op<1>() = V; }
1780  void setFalseValue(Value *V) { Op<2>() = V; }
1781 
1782  /// Swap the true and false values of the select instruction.
1783  /// This doesn't swap prof metadata.
1784  void swapValues() { Op<1>().swap(Op<2>()); }
1785 
1786  /// Return a string if the specified operands are invalid
1787  /// for a select operation, otherwise return null.
1788  static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1789 
1790  /// Transparently provide more efficient getOperand methods.
1792 
1794  return static_cast<OtherOps>(Instruction::getOpcode());
1795  }
1796 
1797  // Methods for support type inquiry through isa, cast, and dyn_cast:
1798  static bool classof(const Instruction *I) {
1799  return I->getOpcode() == Instruction::Select;
1800  }
1801  static bool classof(const Value *V) {
1802  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1803  }
1804 };
1805 
1806 template <>
1807 struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1808 };
1809 
1811 
1812 //===----------------------------------------------------------------------===//
1813 // VAArgInst Class
1814 //===----------------------------------------------------------------------===//
1815 
1816 /// This class represents the va_arg llvm instruction, which returns
1817 /// an argument of the specified type given a va_list and increments that list
1818 ///
1819 class VAArgInst : public UnaryInstruction {
1820 protected:
1821  // Note: Instruction needs to be a friend here to call cloneImpl.
1822  friend class Instruction;
1823 
1824  VAArgInst *cloneImpl() const;
1825 
1826 public:
1827  VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1828  Instruction *InsertBefore = nullptr)
1829  : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1830  setName(NameStr);
1831  }
1832 
1833  VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1834  BasicBlock *InsertAtEnd)
1835  : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1836  setName(NameStr);
1837  }
1838 
1839  Value *getPointerOperand() { return getOperand(0); }
1840  const Value *getPointerOperand() const { return getOperand(0); }
1841  static unsigned getPointerOperandIndex() { return 0U; }
1842 
1843  // Methods for support type inquiry through isa, cast, and dyn_cast:
1844  static bool classof(const Instruction *I) {
1845  return I->getOpcode() == VAArg;
1846  }
1847  static bool classof(const Value *V) {
1848  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1849  }
1850 };
1851 
1852 //===----------------------------------------------------------------------===//
1853 // ExtractElementInst Class
1854 //===----------------------------------------------------------------------===//
1855 
1856 /// This instruction extracts a single (scalar)
1857 /// element from a VectorType value
1858 ///
1860  ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1861  Instruction *InsertBefore = nullptr);
1862  ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1863  BasicBlock *InsertAtEnd);
1864 
1865 protected:
1866  // Note: Instruction needs to be a friend here to call cloneImpl.
1867  friend class Instruction;
1868 
1869  ExtractElementInst *cloneImpl() const;
1870 
1871 public:
1872  static ExtractElementInst *Create(Value *Vec, Value *Idx,
1873  const Twine &NameStr = "",
1874  Instruction *InsertBefore = nullptr) {
1875  return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1876  }
1877 
1878  static ExtractElementInst *Create(Value *Vec, Value *Idx,
1879  const Twine &NameStr,
1880  BasicBlock *InsertAtEnd) {
1881  return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1882  }
1883 
1884  /// Return true if an extractelement instruction can be
1885  /// formed with the specified operands.
1886  static bool isValidOperands(const Value *Vec, const Value *Idx);
1887 
1888  Value *getVectorOperand() { return Op<0>(); }
1889  Value *getIndexOperand() { return Op<1>(); }
1890  const Value *getVectorOperand() const { return Op<0>(); }
1891  const Value *getIndexOperand() const { return Op<1>(); }
1892 
1894  return cast<VectorType>(getVectorOperand()->getType());
1895  }
1896 
1897  /// Transparently provide more efficient getOperand methods.
1899 
1900  // Methods for support type inquiry through isa, cast, and dyn_cast:
1901  static bool classof(const Instruction *I) {
1902  return I->getOpcode() == Instruction::ExtractElement;
1903  }
1904  static bool classof(const Value *V) {
1905  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1906  }
1907 };
1908 
1909 template <>
1911  public FixedNumOperandTraits<ExtractElementInst, 2> {
1912 };
1913 
1915 
1916 //===----------------------------------------------------------------------===//
1917 // InsertElementInst Class
1918 //===----------------------------------------------------------------------===//
1919 
1920 /// This instruction inserts a single (scalar)
1921 /// element into a VectorType value
1922 ///
1924  InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1925  const Twine &NameStr = "",
1926  Instruction *InsertBefore = nullptr);
1927  InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1928  BasicBlock *InsertAtEnd);
1929 
1930 protected:
1931  // Note: Instruction needs to be a friend here to call cloneImpl.
1932  friend class Instruction;
1933 
1934  InsertElementInst *cloneImpl() const;
1935 
1936 public:
1937  static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1938  const Twine &NameStr = "",
1939  Instruction *InsertBefore = nullptr) {
1940  return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1941  }
1942 
1943  static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1944  const Twine &NameStr,
1945  BasicBlock *InsertAtEnd) {
1946  return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1947  }
1948 
1949  /// Return true if an insertelement instruction can be
1950  /// formed with the specified operands.
1951  static bool isValidOperands(const Value *Vec, const Value *NewElt,
1952  const Value *Idx);
1953 
1954  /// Overload to return most specific vector type.
1955  ///
1956  VectorType *getType() const {
1957  return cast<VectorType>(Instruction::getType());
1958  }
1959 
1960  /// Transparently provide more efficient getOperand methods.
1962 
1963  // Methods for support type inquiry through isa, cast, and dyn_cast:
1964  static bool classof(const Instruction *I) {
1965  return I->getOpcode() == Instruction::InsertElement;
1966  }
1967  static bool classof(const Value *V) {
1968  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1969  }
1970 };
1971 
1972 template <>
1974  public FixedNumOperandTraits<InsertElementInst, 3> {
1975 };
1976 
1978 
1979 //===----------------------------------------------------------------------===//
1980 // ShuffleVectorInst Class
1981 //===----------------------------------------------------------------------===//
1982 
1983 constexpr int UndefMaskElem = -1;
1984 
1985 /// This instruction constructs a fixed permutation of two
1986 /// input vectors.
1987 ///
1988 /// For each element of the result vector, the shuffle mask selects an element
1989 /// from one of the input vectors to copy to the result. Non-negative elements
1990 /// in the mask represent an index into the concatenated pair of input vectors.
1991 /// UndefMaskElem (-1) specifies that the result element is undefined.
1992 ///
1993 /// For scalable vectors, all the elements of the mask must be 0 or -1. This
1994 /// requirement may be relaxed in the future.
1996  SmallVector<int, 4> ShuffleMask;
1997  Constant *ShuffleMaskForBitcode;
1998 
1999 protected:
2000  // Note: Instruction needs to be a friend here to call cloneImpl.
2001  friend class Instruction;
2002 
2003  ShuffleVectorInst *cloneImpl() const;
2004 
2005 public:
2006  ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "",
2007  Instruction *InsertBefore = nullptr);
2008  ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr,
2009  BasicBlock *InsertAtEnd);
2010  ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "",
2011  Instruction *InsertBefore = nullptr);
2012  ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr,
2013  BasicBlock *InsertAtEnd);
2015  const Twine &NameStr = "",
2016  Instruction *InsertBefor = nullptr);
2018  const Twine &NameStr, BasicBlock *InsertAtEnd);
2020  const Twine &NameStr = "",
2021  Instruction *InsertBefor = nullptr);
2023  const Twine &NameStr, BasicBlock *InsertAtEnd);
2024 
2025  void *operator new(size_t S) { return User::operator new(S, 2); }
2026  void operator delete(void *Ptr) { return User::operator delete(Ptr); }
2027 
2028  /// Swap the operands and adjust the mask to preserve the semantics
2029  /// of the instruction.
2030  void commute();
2031 
2032  /// Return true if a shufflevector instruction can be
2033  /// formed with the specified operands.
2034  static bool isValidOperands(const Value *V1, const Value *V2,
2035  const Value *Mask);
2036  static bool isValidOperands(const Value *V1, const Value *V2,
2038 
2039  /// Overload to return most specific vector type.
2040  ///
2041  VectorType *getType() const {
2042  return cast<VectorType>(Instruction::getType());
2043  }
2044 
2045  /// Transparently provide more efficient getOperand methods.
2047 
2048  /// Return the shuffle mask value of this instruction for the given element
2049  /// index. Return UndefMaskElem if the element is undef.
2050  int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2051 
2052  /// Convert the input shuffle mask operand to a vector of integers. Undefined
2053  /// elements of the mask are returned as UndefMaskElem.
2054  static void getShuffleMask(const Constant *Mask,
2055  SmallVectorImpl<int> &Result);
2056 
2057  /// Return the mask for this instruction as a vector of integers. Undefined
2058  /// elements of the mask are returned as UndefMaskElem.
2059  void getShuffleMask(SmallVectorImpl<int> &Result) const {
2060  Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2061  }
2062 
2063  /// Return the mask for this instruction, for use in bitcode.
2064  ///
2065  /// TODO: This is temporary until we decide a new bitcode encoding for
2066  /// shufflevector.
2067  Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2068 
2069  static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2070  Type *ResultTy);
2071 
2072  void setShuffleMask(ArrayRef<int> Mask);
2073 
2074  ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2075 
2076  /// Return true if this shuffle returns a vector with a different number of
2077  /// elements than its source vectors.
2078  /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2079  /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2080  bool changesLength() const {
2081  unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2082  ->getElementCount()
2083  .getKnownMinValue();
2084  unsigned NumMaskElts = ShuffleMask.size();
2085  return NumSourceElts != NumMaskElts;
2086  }
2087 
2088  /// Return true if this shuffle returns a vector with a greater number of
2089  /// elements than its source vectors.
2090  /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2091  bool increasesLength() const {
2092  unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2093  ->getElementCount()
2094  .getKnownMinValue();
2095  unsigned NumMaskElts = ShuffleMask.size();
2096  return NumSourceElts < NumMaskElts;
2097  }
2098 
2099  /// Return true if this shuffle mask chooses elements from exactly one source
2100  /// vector.
2101  /// Example: <7,5,undef,7>
2102  /// This assumes that vector operands are the same length as the mask.
2103  static bool isSingleSourceMask(ArrayRef<int> Mask);
2104  static bool isSingleSourceMask(const Constant *Mask) {
2105  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2106  SmallVector<int, 16> MaskAsInts;
2107  getShuffleMask(Mask, MaskAsInts);
2108  return isSingleSourceMask(MaskAsInts);
2109  }
2110 
2111  /// Return true if this shuffle chooses elements from exactly one source
2112  /// vector without changing the length of that vector.
2113  /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2114  /// TODO: Optionally allow length-changing shuffles.
2115  bool isSingleSource() const {
2116  return !changesLength() && isSingleSourceMask(ShuffleMask);
2117  }
2118 
2119  /// Return true if this shuffle mask chooses elements from exactly one source
2120  /// vector without lane crossings. A shuffle using this mask is not
2121  /// necessarily a no-op because it may change the number of elements from its
2122  /// input vectors or it may provide demanded bits knowledge via undef lanes.
2123  /// Example: <undef,undef,2,3>
2124  static bool isIdentityMask(ArrayRef<int> Mask);
2125  static bool isIdentityMask(const Constant *Mask) {
2126  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2127 
2128  // Not possible to express a shuffle mask for a scalable vector for this
2129  // case.
2130  if (isa<ScalableVectorType>(Mask->getType()))
2131  return false;
2132 
2133  SmallVector<int, 16> MaskAsInts;
2134  getShuffleMask(Mask, MaskAsInts);
2135  return isIdentityMask(MaskAsInts);
2136  }
2137 
2138  /// Return true if this shuffle chooses elements from exactly one source
2139  /// vector without lane crossings and does not change the number of elements
2140  /// from its input vectors.
2141  /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2142  bool isIdentity() const {
2143  // Not possible to express a shuffle mask for a scalable vector for this
2144  // case.
2145  if (isa<ScalableVectorType>(getType()))
2146  return false;
2147 
2148  return !changesLength() && isIdentityMask(ShuffleMask);
2149  }
2150 
2151  /// Return true if this shuffle lengthens exactly one source vector with
2152  /// undefs in the high elements.
2153  bool isIdentityWithPadding() const;
2154 
2155  /// Return true if this shuffle extracts the first N elements of exactly one
2156  /// source vector.
2157  bool isIdentityWithExtract() const;
2158 
2159  /// Return true if this shuffle concatenates its 2 source vectors. This
2160  /// returns false if either input is undefined. In that case, the shuffle is
2161  /// is better classified as an identity with padding operation.
2162  bool isConcat() const;
2163 
2164  /// Return true if this shuffle mask chooses elements from its source vectors
2165  /// without lane crossings. A shuffle using this mask would be
2166  /// equivalent to a vector select with a constant condition operand.
2167  /// Example: <4,1,6,undef>
2168  /// This returns false if the mask does not choose from both input vectors.
2169  /// In that case, the shuffle is better classified as an identity shuffle.
2170  /// This assumes that vector operands are the same length as the mask
2171  /// (a length-changing shuffle can never be equivalent to a vector select).
2172  static bool isSelectMask(ArrayRef<int> Mask);
2173  static bool isSelectMask(const Constant *Mask) {
2174  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2175  SmallVector<int, 16> MaskAsInts;
2176  getShuffleMask(Mask, MaskAsInts);
2177  return isSelectMask(MaskAsInts);
2178  }
2179 
2180  /// Return true if this shuffle chooses elements from its source vectors
2181  /// without lane crossings and all operands have the same number of elements.
2182  /// In other words, this shuffle is equivalent to a vector select with a
2183  /// constant condition operand.
2184  /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2185  /// This returns false if the mask does not choose from both input vectors.
2186  /// In that case, the shuffle is better classified as an identity shuffle.
2187  /// TODO: Optionally allow length-changing shuffles.
2188  bool isSelect() const {
2189  return !changesLength() && isSelectMask(ShuffleMask);
2190  }
2191 
2192  /// Return true if this shuffle mask swaps the order of elements from exactly
2193  /// one source vector.
2194  /// Example: <7,6,undef,4>
2195  /// This assumes that vector operands are the same length as the mask.
2196  static bool isReverseMask(ArrayRef<int> Mask);
2197  static bool isReverseMask(const Constant *Mask) {
2198  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2199  SmallVector<int, 16> MaskAsInts;
2200  getShuffleMask(Mask, MaskAsInts);
2201  return isReverseMask(MaskAsInts);
2202  }
2203 
2204  /// Return true if this shuffle swaps the order of elements from exactly
2205  /// one source vector.
2206  /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2207  /// TODO: Optionally allow length-changing shuffles.
2208  bool isReverse() const {
2209  return !changesLength() && isReverseMask(ShuffleMask);
2210  }
2211 
2212  /// Return true if this shuffle mask chooses all elements with the same value
2213  /// as the first element of exactly one source vector.
2214  /// Example: <4,undef,undef,4>
2215  /// This assumes that vector operands are the same length as the mask.
2216  static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2217  static bool isZeroEltSplatMask(const Constant *Mask) {
2218  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2219  SmallVector<int, 16> MaskAsInts;
2220  getShuffleMask(Mask, MaskAsInts);
2221  return isZeroEltSplatMask(MaskAsInts);
2222  }
2223 
2224  /// Return true if all elements of this shuffle are the same value as the
2225  /// first element of exactly one source vector without changing the length
2226  /// of that vector.
2227  /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2228  /// TODO: Optionally allow length-changing shuffles.
2229  /// TODO: Optionally allow splats from other elements.
2230  bool isZeroEltSplat() const {
2231  return !changesLength() && isZeroEltSplatMask(ShuffleMask);
2232  }
2233 
2234  /// Return true if this shuffle mask is a transpose mask.
2235  /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2236  /// even- or odd-numbered vector elements from two n-dimensional source
2237  /// vectors and write each result into consecutive elements of an
2238  /// n-dimensional destination vector. Two shuffles are necessary to complete
2239  /// the transpose, one for the even elements and another for the odd elements.
2240  /// This description closely follows how the TRN1 and TRN2 AArch64
2241  /// instructions operate.
2242  ///
2243  /// For example, a simple 2x2 matrix can be transposed with:
2244  ///
2245  /// ; Original matrix
2246  /// m0 = < a, b >
2247  /// m1 = < c, d >
2248  ///
2249  /// ; Transposed matrix
2250  /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2251  /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2252  ///
2253  /// For matrices having greater than n columns, the resulting nx2 transposed
2254  /// matrix is stored in two result vectors such that one vector contains
2255  /// interleaved elements from all the even-numbered rows and the other vector
2256  /// contains interleaved elements from all the odd-numbered rows. For example,
2257  /// a 2x4 matrix can be transposed with:
2258  ///
2259  /// ; Original matrix
2260  /// m0 = < a, b, c, d >
2261  /// m1 = < e, f, g, h >
2262  ///
2263  /// ; Transposed matrix
2264  /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2265  /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2266  static bool isTransposeMask(ArrayRef<int> Mask);
2267  static bool isTransposeMask(const Constant *Mask) {
2268  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2269  SmallVector<int, 16> MaskAsInts;
2270  getShuffleMask(Mask, MaskAsInts);
2271  return isTransposeMask(MaskAsInts);
2272  }
2273 
2274  /// Return true if this shuffle transposes the elements of its inputs without
2275  /// changing the length of the vectors. This operation may also be known as a
2276  /// merge or interleave. See the description for isTransposeMask() for the
2277  /// exact specification.
2278  /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2279  bool isTranspose() const {
2280  return !changesLength() && isTransposeMask(ShuffleMask);
2281  }
2282 
2283  /// Return true if this shuffle mask is an extract subvector mask.
2284  /// A valid extract subvector mask returns a smaller vector from a single
2285  /// source operand. The base extraction index is returned as well.
2286  static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2287  int &Index);
2288  static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2289  int &Index) {
2290  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2291  // Not possible to express a shuffle mask for a scalable vector for this
2292  // case.
2293  if (isa<ScalableVectorType>(Mask->getType()))
2294  return false;
2295  SmallVector<int, 16> MaskAsInts;
2296  getShuffleMask(Mask, MaskAsInts);
2297  return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2298  }
2299 
2300  /// Return true if this shuffle mask is an extract subvector mask.
2301  bool isExtractSubvectorMask(int &Index) const {
2302  // Not possible to express a shuffle mask for a scalable vector for this
2303  // case.
2304  if (isa<ScalableVectorType>(getType()))
2305  return false;
2306 
2307  int NumSrcElts =
2308  cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2309  return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2310  }
2311 
2312  /// Return true if this shuffle mask is an insert subvector mask.
2313  /// A valid insert subvector mask inserts the lowest elements of a second
2314  /// source operand into an in-place first source operand operand.
2315  /// Both the sub vector width and the insertion index is returned.
2316  static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2317  int &NumSubElts, int &Index);
2318  static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts,
2319  int &NumSubElts, int &Index) {
2320  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2321  // Not possible to express a shuffle mask for a scalable vector for this
2322  // case.
2323  if (isa<ScalableVectorType>(Mask->getType()))
2324  return false;
2325  SmallVector<int, 16> MaskAsInts;
2326  getShuffleMask(Mask, MaskAsInts);
2327  return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index);
2328  }
2329 
2330  /// Return true if this shuffle mask is an insert subvector mask.
2331  bool isInsertSubvectorMask(int &NumSubElts, int &Index) const {
2332  // Not possible to express a shuffle mask for a scalable vector for this
2333  // case.
2334  if (isa<ScalableVectorType>(getType()))
2335  return false;
2336 
2337  int NumSrcElts =
2338  cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2339  return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index);
2340  }
2341 
2342  /// Return true if this shuffle mask replicates each of the \p VF elements
2343  /// in a vector \p ReplicationFactor times.
2344  /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
2345  /// <0,0,0,1,1,1,2,2,2,3,3,3>
2346  static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor,
2347  int &VF);
2348  static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor,
2349  int &VF) {
2350  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2351  // Not possible to express a shuffle mask for a scalable vector for this
2352  // case.
2353  if (isa<ScalableVectorType>(Mask->getType()))
2354  return false;
2355  SmallVector<int, 16> MaskAsInts;
2356  getShuffleMask(Mask, MaskAsInts);
2357  return isReplicationMask(MaskAsInts, ReplicationFactor, VF);
2358  }
2359 
2360  /// Return true if this shuffle mask is a replication mask.
2361  bool isReplicationMask(int &ReplicationFactor, int &VF) const;
2362 
2363  /// Change values in a shuffle permute mask assuming the two vector operands
2364  /// of length InVecNumElts have swapped position.
2366  unsigned InVecNumElts) {
2367  for (int &Idx : Mask) {
2368  if (Idx == -1)
2369  continue;
2370  Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2371  assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
2372  "shufflevector mask index out of range");
2373  }
2374  }
2375 
2376  // Methods for support type inquiry through isa, cast, and dyn_cast:
2377  static bool classof(const Instruction *I) {
2378  return I->getOpcode() == Instruction::ShuffleVector;
2379  }
2380  static bool classof(const Value *V) {
2381  return isa<Instruction>(V) && classof(cast<Instruction>(V));
2382  }
2383 };
2384 
2385 template <>
2387  : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2388 
2390 
2391 //===----------------------------------------------------------------------===//
2392 // ExtractValueInst Class
2393 //===----------------------------------------------------------------------===//
2394 
2395 /// This instruction extracts a struct member or array
2396 /// element value from an aggregate value.
2397 ///
2399  SmallVector<unsigned, 4> Indices;
2400 
2401  ExtractValueInst(const ExtractValueInst &EVI);
2402 
2403  /// Constructors - Create a extractvalue instruction with a base aggregate
2404  /// value and a list of indices. The first ctor can optionally insert before
2405  /// an existing instruction, the second appends the new instruction to the
2406  /// specified BasicBlock.
2407  inline ExtractValueInst(Value *Agg,
2408  ArrayRef<unsigned> Idxs,
2409  const Twine &NameStr,
2410  Instruction *InsertBefore);
2411  inline ExtractValueInst(Value *Agg,
2412  ArrayRef<unsigned> Idxs,
2413  const Twine &NameStr, BasicBlock *InsertAtEnd);
2414 
2415  void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2416 
2417 protected:
2418  // Note: Instruction needs to be a friend here to call cloneImpl.
2419  friend class Instruction;
2420 
2421  ExtractValueInst *cloneImpl() const;
2422 
2423 public:
2425  ArrayRef<unsigned> Idxs,
2426  const Twine &NameStr = "",
2427  Instruction *InsertBefore = nullptr) {
2428  return new
2429  ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2430  }
2431 
2433  ArrayRef<unsigned> Idxs,
2434  const Twine &NameStr,
2435  BasicBlock *InsertAtEnd) {
2436  return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2437  }
2438 
2439  /// Returns the type of the element that would be extracted
2440  /// with an extractvalue instruction with the specified parameters.
2441  ///
2442  /// Null is returned if the indices are invalid for the specified type.
2443  static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2444 
2445  using idx_iterator = const unsigned*;
2446 
2447  inline idx_iterator idx_begin() const { return Indices.begin(); }
2448  inline idx_iterator idx_end() const { return Indices.end(); }
2450  return make_range(idx_begin(), idx_end());
2451  }
2452 
2454  return getOperand(0);
2455  }
2456  const Value *getAggregateOperand() const {
2457  return getOperand(0);
2458  }
2459  static unsigned getAggregateOperandIndex() {
2460  return 0U; // get index for modifying correct operand
2461  }
2462 
2464  return Indices;
2465  }
2466 
2467  unsigned getNumIndices() const {
2468  return (unsigned)Indices.size();
2469  }
2470 
2471  bool hasIndices() const {
2472  return true;
2473  }
2474 
2475  // Methods for support type inquiry through isa, cast, and dyn_cast:
2476  static bool classof(const Instruction *I) {
2477  return I->getOpcode() == Instruction::ExtractValue;
2478  }
2479  static bool classof(const Value *V) {
2480  return isa<Instruction>(V) && classof(cast<Instruction>(V));
2481  }
2482 };
2483 
2484 ExtractValueInst::ExtractValueInst(Value *Agg,
2485  ArrayRef<unsigned> Idxs,
2486  const Twine &NameStr,
2487  Instruction *InsertBefore)
2488  : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2489  ExtractValue, Agg, InsertBefore) {
2490  init(Idxs, NameStr);
2491 }
2492 
2493 ExtractValueInst::ExtractValueInst(Value *Agg,
2494  ArrayRef<unsigned> Idxs,
2495  const Twine &NameStr,
2496  BasicBlock *InsertAtEnd)
2497  : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2498  ExtractValue, Agg, InsertAtEnd) {
2499  init(Idxs, NameStr);
2500 }
2501 
2502 //===----------------------------------------------------------------------===//
2503 // InsertValueInst Class
2504 //===----------------------------------------------------------------------===//
2505 
2506 /// This instruction inserts a struct field of array element
2507 /// value into an aggregate value.
2508 ///
2510  SmallVector<unsigned, 4> Indices;
2511 
2512  InsertValueInst(const InsertValueInst &IVI);
2513 
2514  /// Constructors - Create a insertvalue instruction with a base aggregate
2515  /// value, a value to insert, and a list of indices. The first ctor can
2516  /// optionally insert before an existing instruction, the second appends
2517  /// the new instruction to the specified BasicBlock.
2518  inline InsertValueInst(Value *Agg, Value *Val,
2519  ArrayRef<unsigned> Idxs,
2520  const Twine &NameStr,
2521  Instruction *InsertBefore);
2522  inline InsertValueInst(Value *Agg, Value *Val,
2523  ArrayRef<unsigned> Idxs,
2524  const Twine &NameStr, BasicBlock *InsertAtEnd);
2525 
2526  /// Constructors - These two constructors are convenience methods because one
2527  /// and two index insertvalue instructions are so common.
2528  InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2529  const Twine &NameStr = "",
2530  Instruction *InsertBefore = nullptr);
2531  InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2532  BasicBlock *InsertAtEnd);
2533 
2534  void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2535  const Twine &NameStr);
2536 
2537 protected:
2538  // Note: Instruction needs to be a friend here to call cloneImpl.
2539  friend class Instruction;
2540 
2541  InsertValueInst *cloneImpl() const;
2542 
2543 public:
2544  // allocate space for exactly two operands
2545  void *operator new(size_t S) { return User::operator new(S, 2); }
2546  void operator delete(void *Ptr) { User::operator delete(Ptr); }
2547 
2548  static InsertValueInst *Create(Value *Agg, Value *Val,
2549  ArrayRef<unsigned> Idxs,
2550  const Twine &NameStr = "",
2551  Instruction *InsertBefore = nullptr) {
2552  return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2553  }
2554 
2555  static InsertValueInst *Create(Value *Agg, Value *Val,
2556  ArrayRef<unsigned> Idxs,
2557  const Twine &NameStr,
2558  BasicBlock *InsertAtEnd) {
2559  return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2560  }
2561 
2562  /// Transparently provide more efficient getOperand methods.
2564 
2565  using idx_iterator = const unsigned*;
2566 
2567  inline idx_iterator idx_begin() const { return Indices.begin(); }
2568  inline idx_iterator idx_end() const { return Indices.end(); }
2570  return make_range(idx_begin(), idx_end());
2571  }
2572 
2574  return getOperand(0);
2575  }
2576  const Value *getAggregateOperand() const {
2577  return getOperand(0);
2578  }
2579  static unsigned getAggregateOperandIndex() {
2580  return 0U; // get index for modifying correct operand
2581  }
2582 
2584  return getOperand(1);
2585  }
2587  return getOperand(1);
2588  }
2589  static unsigned getInsertedValueOperandIndex() {
2590  return 1U; // get index for modifying correct operand
2591  }
2592 
2594  return Indices;
2595  }
2596 
2597  unsigned getNumIndices() const {
2598  return (unsigned)Indices.size();
2599  }
2600 
2601  bool hasIndices() const {
2602  return true;
2603  }
2604 
2605  // Methods for support type inquiry through isa, cast, and dyn_cast:
2606  static bool classof(const Instruction *I) {
2607  return I->getOpcode() == Instruction::InsertValue;
2608  }
2609  static bool classof(const Value *V) {
2610  return isa<Instruction>(V) && classof(cast<Instruction>(V));
2611  }
2612 };
2613 
2614 template <>
2616  public FixedNumOperandTraits<InsertValueInst, 2> {
2617 };
2618 
2619 InsertValueInst::InsertValueInst(Value *Agg,
2620  Value *Val,
2621  ArrayRef<unsigned> Idxs,
2622  const Twine &NameStr,
2623  Instruction *InsertBefore)
2624  : Instruction(Agg->getType(), InsertValue,
2625  OperandTraits<InsertValueInst>::op_begin(this),
2626  2, InsertBefore) {
2627  init(Agg, Val, Idxs, NameStr);
2628 }
2629 
2630 InsertValueInst::InsertValueInst(Value *Agg,
2631  Value *Val,
2632  ArrayRef<unsigned> Idxs,
2633  const Twine &NameStr,
2634  BasicBlock *InsertAtEnd)
2635  : Instruction(Agg->getType(), InsertValue,
2636  OperandTraits<InsertValueInst>::op_begin(this),
2637  2, InsertAtEnd) {
2638  init(Agg, Val, Idxs, NameStr);
2639 }
2640 
2642 
2643 //===----------------------------------------------------------------------===//
2644 // PHINode Class
2645 //===----------------------------------------------------------------------===//
2646 
2647 // PHINode - The PHINode class is used to represent the magical mystical PHI
2648 // node, that can not exist in nature, but can be synthesized in a computer
2649 // scientist's overactive imagination.
2650 //
2651 class PHINode : public Instruction {
2652  /// The number of operands actually allocated. NumOperands is
2653  /// the number actually in use.
2654  unsigned ReservedSpace;
2655 
2656  PHINode(const PHINode &PN);
2657 
2658  explicit PHINode(Type *Ty, unsigned NumReservedValues,
2659  const Twine &NameStr = "",
2660  Instruction *InsertBefore = nullptr)
2661  : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2662  ReservedSpace(NumReservedValues) {
2663  assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
2664  setName(NameStr);
2665  allocHungoffUses(ReservedSpace);
2666  }
2667 
2668  PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2669  BasicBlock *InsertAtEnd)
2670  : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2671  ReservedSpace(NumReservedValues) {
2672  assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
2673  setName(NameStr);
2674  allocHungoffUses(ReservedSpace);
2675  }
2676 
2677 protected:
2678  // Note: Instruction needs to be a friend here to call cloneImpl.
2679  friend class Instruction;
2680 
2681  PHINode *cloneImpl() const;
2682 
2683  // allocHungoffUses - this is more complicated than the generic
2684  // User::allocHungoffUses, because we have to allocate Uses for the incoming
2685  // values and pointers to the incoming blocks, all in one allocation.
2686  void allocHungoffUses(unsigned N) {
2687  User::allocHungoffUses(N, /* IsPhi */ true);
2688  }
2689 
2690 public:
2691  /// Constructors - NumReservedValues is a hint for the number of incoming
2692  /// edges that this phi node will have (use 0 if you really have no idea).
2693  static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2694  const Twine &NameStr = "",
2695  Instruction *InsertBefore = nullptr) {
2696  return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2697  }
2698 
2699  static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2700  const Twine &NameStr, BasicBlock *InsertAtEnd) {
2701  return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2702  }
2703 
2704  /// Provide fast operand accessors
2706 
2707  // Block iterator interface. This provides access to the list of incoming
2708  // basic blocks, which parallels the list of incoming values.
2709 
2712 
2714  return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
2715  }
2716 
2718  return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2719  }
2720 
2722  return block_begin() + getNumOperands();
2723  }
2724 
2726  return block_begin() + getNumOperands();
2727  }
2728 
2730  return make_range(block_begin(), block_end());
2731  }
2732 
2734  return make_range(block_begin(), block_end());
2735  }
2736 
2737  op_range incoming_values() { return operands(); }
2738 
2739  const_op_range incoming_values() const { return operands(); }
2740 
2741  /// Return the number of incoming edges
2742  ///
2743  unsigned getNumIncomingValues() const { return getNumOperands(); }
2744 
2745  /// Return incoming value number x
2746  ///
2747  Value *getIncomingValue(unsigned i) const {
2748  return getOperand(i);
2749  }
2750  void setIncomingValue(unsigned i, Value *V) {
2751  assert(V && "PHI node got a null value!");
2752  assert(getType() == V->getType() &&
2753  "All operands to PHI node must be the same type as the PHI node!");
2754  setOperand(i, V);
2755  }
2756 
2757  static unsigned getOperandNumForIncomingValue(unsigned i) {
2758  return i;
2759  }
2760 
2761  static unsigned getIncomingValueNumForOperand(unsigned i) {
2762  return i;
2763  }
2764 
2765  /// Return incoming basic block number @p i.
2766  ///
2767  BasicBlock *getIncomingBlock(unsigned i) const {
2768  return block_begin()[i];
2769  }
2770 
2771  /// Return incoming basic block corresponding
2772  /// to an operand of the PHI.
2773  ///
2774  BasicBlock *getIncomingBlock(const Use &U) const {
2775  assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
2776  return getIncomingBlock(unsigned(&U - op_begin()));
2777  }
2778 
2779  /// Return incoming basic block corresponding
2780  /// to value use iterator.
2781  ///
2783  return getIncomingBlock(I.getUse());
2784  }
2785 
2786  void setIncomingBlock(unsigned i, BasicBlock *BB) {
2787  assert(BB && "PHI node got a null basic block!");
2788  block_begin()[i] = BB;
2789  }
2790 
2791  /// Replace every incoming basic block \p Old to basic block \p New.
2793  assert(New && Old && "PHI node got a null basic block!");
2794  for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2795  if (getIncomingBlock(Op) == Old)
2796  setIncomingBlock(Op, New);
2797  }
2798 
2799  /// Add an incoming value to the end of the PHI list
2800  ///
2802  if (getNumOperands() == ReservedSpace)
2803  growOperands(); // Get more space!
2804  // Initialize some new operands.
2805  setNumHungOffUseOperands(getNumOperands() + 1);
2806  setIncomingValue(getNumOperands() - 1, V);
2807  setIncomingBlock(getNumOperands() - 1, BB);
2808  }
2809 
2810  /// Remove an incoming value. This is useful if a
2811  /// predecessor basic block is deleted. The value removed is returned.
2812  ///
2813  /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2814  /// is true), the PHI node is destroyed and any uses of it are replaced with
2815  /// dummy values. The only time there should be zero incoming values to a PHI
2816  /// node is when the block is dead, so this strategy is sound.
2817  ///
2818  Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2819 
2820  Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2821  int Idx = getBasicBlockIndex(BB);
2822  assert(Idx >= 0 && "Invalid basic block argument to remove!");
2823  return removeIncomingValue(Idx, DeletePHIIfEmpty);
2824  }
2825 
2826  /// Return the first index of the specified basic
2827  /// block in the value list for this PHI. Returns -1 if no instance.
2828  ///
2829  int getBasicBlockIndex(const BasicBlock *BB) const {
2830  for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2831  if (block_begin()[i] == BB)
2832  return i;
2833  return -1;
2834  }
2835 
2837  int Idx = getBasicBlockIndex(BB);
2838  assert(Idx >= 0 && "Invalid basic block argument!");
2839  return getIncomingValue(Idx);
2840  }
2841 
2842  /// Set every incoming value(s) for block \p BB to \p V.
2844  assert(BB && "PHI node got a null basic block!");
2845  bool Found = false;
2846  for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2847  if (getIncomingBlock(Op) == BB) {
2848  Found = true;
2849  setIncomingValue(Op, V);
2850  }
2851  (void)Found;
2852  assert(Found && "Invalid basic block argument to set!");
2853  }
2854 
2855  /// If the specified PHI node always merges together the
2856  /// same value, return the value, otherwise return null.
2857  Value *hasConstantValue() const;
2858 
2859  /// Whether the specified PHI node always merges
2860  /// together the same value, assuming undefs are equal to a unique
2861  /// non-undef value.
2862  bool hasConstantOrUndefValue() const;
2863 
2864  /// If the PHI node is complete which means all of its parent's predecessors
2865  /// have incoming value in this PHI, return true, otherwise return false.
2866  bool isComplete() const {
2868  [this](const BasicBlock *Pred) {
2869  return getBasicBlockIndex(Pred) >= 0;
2870  });
2871  }
2872 
2873  /// Methods for support type inquiry through isa, cast, and dyn_cast:
2874  static bool classof(const Instruction *I) {
2875  return I->getOpcode() == Instruction::PHI;
2876  }
2877  static bool classof(const Value *V) {
2878  return isa<Instruction>(V) && classof(cast<Instruction>(V));
2879  }
2880 
2881 private:
2882  void growOperands();
2883 };
2884 
2885 template <>
2887 };
2888 
2890 
2891 //===----------------------------------------------------------------------===//
2892 // LandingPadInst Class
2893 //===----------------------------------------------------------------------===//
2894 
2895 //===---------------------------------------------------------------------------
2896 /// The landingpad instruction holds all of the information
2897 /// necessary to generate correct exception handling. The landingpad instruction
2898 /// cannot be moved from the top of a landing pad block, which itself is
2899 /// accessible only from the 'unwind' edge of an invoke. This uses the
2900 /// SubclassData field in Value to store whether or not the landingpad is a
2901 /// cleanup.
2902 ///
2903 class LandingPadInst : public Instruction {
2904  using CleanupField = BoolBitfieldElementT<0>;
2905 
2906  /// The number of operands actually allocated. NumOperands is
2907  /// the number actually in use.
2908  unsigned ReservedSpace;
2909 
2910  LandingPadInst(const LandingPadInst &LP);
2911 
2912 public:
2914 
2915 private:
2916  explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2917  const Twine &NameStr, Instruction *InsertBefore);
2918  explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2919  const Twine &NameStr, BasicBlock *InsertAtEnd);
2920 
2921  // Allocate space for exactly zero operands.
2922  void *operator new(size_t S) { return User::operator new(S); }
2923 
2924  void growOperands(unsigned Size);
2925  void init(unsigned NumReservedValues, const Twine &NameStr);
2926 
2927 protected:
2928  // Note: Instruction needs to be a friend here to call cloneImpl.
2929  friend class Instruction;
2930 
2931  LandingPadInst *cloneImpl() const;
2932 
2933 public:
2934  void operator delete(void *Ptr) { User::operator delete(Ptr); }
2935 
2936  /// Constructors - NumReservedClauses is a hint for the number of incoming
2937  /// clauses that this landingpad will have (use 0 if you really have no idea).
2938  static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2939  const Twine &NameStr = "",
2940  Instruction *InsertBefore = nullptr);
2941  static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2942  const Twine &NameStr, BasicBlock *InsertAtEnd);
2943 
2944  /// Provide fast operand accessors
2946 
2947  /// Return 'true' if this landingpad instruction is a
2948  /// cleanup. I.e., it should be run when unwinding even if its landing pad
2949  /// doesn't catch the exception.
2950  bool isCleanup() const { return getSubclassData<CleanupField>(); }
2951 
2952  /// Indicate that this landingpad instruction is a cleanup.
2953  void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
2954 
2955  /// Add a catch or filter clause to the landing pad.
2956  void addClause(Constant *ClauseVal);
2957 
2958  /// Get the value of the clause at index Idx. Use isCatch/isFilter to
2959  /// determine what type of clause this is.
2960  Constant *getClause(unsigned Idx) const {
2961  return cast<Constant>(getOperandList()[Idx]);
2962  }
2963 
2964  /// Return 'true' if the clause and index Idx is a catch clause.
2965  bool isCatch(unsigned Idx) const {
2966  return !isa<ArrayType>(getOperandList()[Idx]->getType());
2967  }
2968 
2969  /// Return 'true' if the clause and index Idx is a filter clause.
2970  bool isFilter(unsigned Idx) const {
2971  return isa<ArrayType>(getOperandList()[Idx]->getType());
2972  }
2973 
2974  /// Get the number of clauses for this landing pad.
2975  unsigned getNumClauses() const { return getNumOperands(); }
2976 
2977  /// Grow the size of the operand list to accommodate the new
2978  /// number of clauses.
2979  void reserveClauses(unsigned Size) { growOperands(Size); }
2980 
2981  // Methods for support type inquiry through isa, cast, and dyn_cast:
2982  static bool classof(const Instruction *I) {
2983  return I->getOpcode() == Instruction::LandingPad;
2984  }
2985  static bool classof(const Value *V) {
2986  return isa<Instruction>(V) && classof(cast<Instruction>(V));
2987  }
2988 };
2989 
2990 template <>
2992 };
2993 
2995 
2996 //===----------------------------------------------------------------------===//
2997 // ReturnInst Class
2998 //===----------------------------------------------------------------------===//
2999 
3000 //===---------------------------------------------------------------------------
3001 /// Return a value (possibly void), from a function. Execution
3002 /// does not continue in this function any longer.
3003 ///
3004 class ReturnInst : public Instruction {
3005  ReturnInst(const ReturnInst &RI);
3006 
3007 private:
3008  // ReturnInst constructors:
3009  // ReturnInst() - 'ret void' instruction
3010  // ReturnInst( null) - 'ret void' instruction
3011  // ReturnInst(Value* X) - 'ret X' instruction
3012  // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
3013  // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
3014  // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
3015  // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
3016  //
3017  // NOTE: If the Value* passed is of type void then the constructor behaves as
3018  // if it was passed NULL.
3019  explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
3020  Instruction *InsertBefore = nullptr);
3021  ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
3022  explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
3023 
3024 protected:
3025  // Note: Instruction needs to be a friend here to call cloneImpl.
3026  friend class Instruction;
3027 
3028  ReturnInst *cloneImpl() const;
3029 
3030 public:
3031  static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
3032  Instruction *InsertBefore = nullptr) {
3033  return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
3034  }
3035 
3036  static ReturnInst* Create(LLVMContext &C, Value *retVal,
3037  BasicBlock *InsertAtEnd) {
3038  return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
3039  }
3040 
3041  static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
3042  return new(0) ReturnInst(C, InsertAtEnd);
3043  }
3044 
3045  /// Provide fast operand accessors
3047 
3048  /// Convenience accessor. Returns null if there is no return value.
3050  return getNumOperands() != 0 ? getOperand(0) : nullptr;
3051  }
3052 
3053  unsigned getNumSuccessors() const { return 0; }
3054 
3055  // Methods for support type inquiry through isa, cast, and dyn_cast:
3056  static bool classof(const Instruction *I) {
3057  return (I->getOpcode() == Instruction::Ret);
3058  }
3059  static bool classof(const Value *V) {
3060  return isa<Instruction>(V) && classof(cast<Instruction>(V));
3061  }
3062 
3063 private:
3064  BasicBlock *getSuccessor(unsigned idx) const {
3065  llvm_unreachable("ReturnInst has no successors!");
3066  }
3067 
3068  void setSuccessor(unsigned idx, BasicBlock *B) {
3069  llvm_unreachable("ReturnInst has no successors!");
3070  }
3071 };
3072 
3073 template <>
3074 struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3075 };
3076 
3078 
3079 //===----------------------------------------------------------------------===//
3080 // BranchInst Class
3081 //===----------------------------------------------------------------------===//
3082 
3083 //===---------------------------------------------------------------------------
3084 /// Conditional or Unconditional Branch instruction.
3085 ///
3086 class BranchInst : public Instruction {
3087  /// Ops list - Branches are strange. The operands are ordered:
3088  /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3089  /// they don't have to check for cond/uncond branchness. These are mostly
3090  /// accessed relative from op_end().
3091  BranchInst(const BranchInst &BI);
3092  // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3093  // BranchInst(BB *B) - 'br B'
3094  // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3095  // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3096  // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3097  // BranchInst(BB* B, BB *I) - 'br B' insert at end
3098  // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3099  explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3100  BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3101  Instruction *InsertBefore = nullptr);
3102  BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3103  BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3104  BasicBlock *InsertAtEnd);
3105 
3106  void AssertOK();
3107 
3108 protected:
3109  // Note: Instruction needs to be a friend here to call cloneImpl.
3110  friend class Instruction;
3111 
3112  BranchInst *cloneImpl() const;
3113 
3114 public:
3115  /// Iterator type that casts an operand to a basic block.
3116  ///
3117  /// This only makes sense because the successors are stored as adjacent
3118  /// operands for branch instructions.
3120  : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3121  std::random_access_iterator_tag, BasicBlock *,
3122  ptrdiff_t, BasicBlock *, BasicBlock *> {
3123  explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3124 
3125  BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3126  BasicBlock *operator->() const { return operator*(); }
3127  };
3128 
3129  /// The const version of `succ_op_iterator`.
3131  : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3132  std::random_access_iterator_tag,
3133  const BasicBlock *, ptrdiff_t, const BasicBlock *,
3134  const BasicBlock *> {
3136  : iterator_adaptor_base(I) {}
3137 
3138  const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3139  const BasicBlock *operator->() const { return operator*(); }
3140  };
3141 
3142  static BranchInst *Create(BasicBlock *IfTrue,
3143  Instruction *InsertBefore = nullptr) {
3144  return new(1) BranchInst(IfTrue, InsertBefore);
3145  }
3146 
3147  static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3148  Value *Cond, Instruction *InsertBefore = nullptr) {
3149  return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3150  }
3151 
3152  static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3153  return new(1) BranchInst(IfTrue, InsertAtEnd);
3154  }
3155 
3156  static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3157  Value *Cond, BasicBlock *InsertAtEnd) {
3158  return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3159  }
3160 
3161  /// Transparently provide more efficient getOperand methods.
3163 
3164  bool isUnconditional() const { return getNumOperands() == 1; }
3165  bool isConditional() const { return getNumOperands() == 3; }
3166 
3167  Value *getCondition() const {
3168  assert(isConditional() && "Cannot get condition of an uncond branch!");
3169  return Op<-3>();
3170  }
3171 
3172  void setCondition(Value *V) {
3173  assert(isConditional() && "Cannot set condition of unconditional branch!");
3174  Op<-3>() = V;
3175  }
3176 
3177  unsigned getNumSuccessors() const { return 1+isConditional(); }
3178 
3179  BasicBlock *getSuccessor(unsigned i) const {
3180  assert(i < getNumSuccessors() && "Successor # out of range for Branch!");
3181  return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3182  }
3183 
3184  void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3185  assert(idx < getNumSuccessors() && "Successor # out of range for Branch!");
3186  *(&Op<-1>() - idx) = NewSucc;
3187  }
3188 
3189  /// Swap the successors of this branch instruction.
3190  ///
3191  /// Swaps the successors of the branch instruction. This also swaps any
3192  /// branch weight metadata associated with the instruction so that it
3193  /// continues to map correctly to each operand.
3194  void swapSuccessors();
3195 
3197  return make_range(
3198  succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3199  succ_op_iterator(value_op_end()));
3200  }
3201 
3204  std::next(value_op_begin(), isConditional() ? 1 : 0)),
3205  const_succ_op_iterator(value_op_end()));
3206  }
3207 
3208  // Methods for support type inquiry through isa, cast, and dyn_cast:
3209  static bool classof(const Instruction *I) {
3210  return (I->getOpcode() == Instruction::Br);
3211  }
3212  static bool classof(const Value *V) {
3213  return isa<Instruction>(V) && classof(cast<Instruction>(V));
3214  }
3215 };
3216 
3217 template <>
3218 struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3219 };
3220 
3222 
3223 //===----------------------------------------------------------------------===//
3224 // SwitchInst Class
3225 //===----------------------------------------------------------------------===//
3226 
3227 //===---------------------------------------------------------------------------
3228 /// Multiway switch
3229 ///
3230 class SwitchInst : public Instruction {
3231  unsigned ReservedSpace;
3232 
3233  // Operand[0] = Value to switch on
3234  // Operand[1] = Default basic block destination
3235  // Operand[2n ] = Value to match
3236  // Operand[2n+1] = BasicBlock to go to on match
3237  SwitchInst(const SwitchInst &SI);
3238 
3239  /// Create a new switch instruction, specifying a value to switch on and a
3240  /// default destination. The number of additional cases can be specified here
3241  /// to make memory allocation more efficient. This constructor can also
3242  /// auto-insert before another instruction.
3243  SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3244  Instruction *InsertBefore);
3245 
3246  /// Create a new switch instruction, specifying a value to switch on and a
3247  /// default destination. The number of additional cases can be specified here
3248  /// to make memory allocation more efficient. This constructor also
3249  /// auto-inserts at the end of the specified BasicBlock.
3250  SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3251  BasicBlock *InsertAtEnd);
3252 
3253  // allocate space for exactly zero operands
3254  void *operator new(size_t S) { return User::operator new(S); }
3255 
3256  void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3257  void growOperands();
3258 
3259 protected:
3260  // Note: Instruction needs to be a friend here to call cloneImpl.
3261  friend class Instruction;
3262 
3263  SwitchInst *cloneImpl() const;
3264 
3265 public:
3266  void operator delete(void *Ptr) { User::operator delete(Ptr); }
3267 
3268  // -2
3269  static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3270 
3271  template <typename CaseHandleT> class CaseIteratorImpl;
3272 
3273  /// A handle to a particular switch case. It exposes a convenient interface
3274  /// to both the case value and the successor block.
3275  ///
3276  /// We define this as a template and instantiate it to form both a const and
3277  /// non-const handle.
3278  template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3280  // Directly befriend both const and non-const iterators.
3281  friend class SwitchInst::CaseIteratorImpl<
3282  CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3283 
3284  protected:
3285  // Expose the switch type we're parameterized with to the iterator.
3286  using SwitchInstType = SwitchInstT;
3287 
3288  SwitchInstT *SI;
3290 
3291  CaseHandleImpl() = default;
3292  CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3293 
3294  public:
3295  /// Resolves case value for current case.
3296  ConstantIntT *getCaseValue() const {
3297  assert((unsigned)Index < SI->getNumCases() &&
3298  "Index out the number of cases.");
3299  return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3300  }
3301 
3302  /// Resolves successor for current case.
3303  BasicBlockT *getCaseSuccessor() const {
3304  assert(((unsigned)Index < SI->getNumCases() ||
3305  (unsigned)Index == DefaultPseudoIndex) &&
3306  "Index out the number of cases.");
3307  return SI->getSuccessor(getSuccessorIndex());
3308  }
3309 
3310  /// Returns number of current case.
3311  unsigned getCaseIndex() const { return Index; }
3312 
3313  /// Returns successor index for current case successor.
3314  unsigned getSuccessorIndex() const {
3315  assert(((unsigned)Index == DefaultPseudoIndex ||
3316  (unsigned)Index < SI->getNumCases()) &&
3317  "Index out the number of cases.");
3318  return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3319  }
3320 
3321  bool operator==(const CaseHandleImpl &RHS) const {
3322  assert(SI == RHS.SI && "Incompatible operators.");
3323  return Index == RHS.Index;
3324  }
3325  };
3326 
3327  using ConstCaseHandle =
3329 
3331  : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3333 
3334  public:
3336 
3337  /// Sets the new value for current case.
3338  void setValue(ConstantInt *V) const {
3339  assert((unsigned)Index < SI->getNumCases() &&
3340  "Index out the number of cases.");
3341  SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3342  }
3343 
3344  /// Sets the new successor for current case.
3345  void setSuccessor(BasicBlock *S) const {
3346  SI->setSuccessor(getSuccessorIndex(), S);
3347  }
3348  };
3349 
3350  template <typename CaseHandleT>
3351  class CaseIteratorImpl
3352  : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3353  std::random_access_iterator_tag,
3354  const CaseHandleT> {
3355  using SwitchInstT = typename CaseHandleT::SwitchInstType;
3356 
3357  CaseHandleT Case;
3358 
3359  public:
3360  /// Default constructed iterator is in an invalid state until assigned to
3361  /// a case for a particular switch.
3362  CaseIteratorImpl() = default;
3363 
3364  /// Initializes case iterator for given SwitchInst and for given
3365  /// case number.
3366  CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3367 
3368  /// Initializes case iterator for given SwitchInst and for given
3369  /// successor index.
3371  unsigned SuccessorIndex) {
3372  assert(SuccessorIndex < SI->getNumSuccessors() &&
3373  "Successor index # out of range!");
3374  return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3375  : CaseIteratorImpl(SI, DefaultPseudoIndex);
3376  }
3377 
3378  /// Support converting to the const variant. This will be a no-op for const
3379  /// variant.
3381  return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3382  }
3383 
3385  // Check index correctness after addition.
3386  // Note: Index == getNumCases() means end().
3387  assert(Case.Index + N >= 0 &&
3388  (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
3389  "Case.Index out the number of cases.");
3390  Case.Index += N;
3391  return *this;
3392  }
3394  // Check index correctness after subtraction.
3395  // Note: Case.Index == getNumCases() means end().
3396  assert(Case.Index - N >= 0 &&
3397  (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
3398  "Case.Index out the number of cases.");
3399  Case.Index -= N;
3400  return *this;
3401  }
3403  assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3404  return Case.Index - RHS.Case.Index;
3405  }
3406  bool operator==(const CaseIteratorImpl &RHS) const {
3407  return Case == RHS.Case;
3408  }
3409  bool operator<(const CaseIteratorImpl &RHS) const {
3410  assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3411  return Case.Index < RHS.Case.Index;
3412  }
3413  const CaseHandleT &operator*() const { return Case; }
3414  };
3415 
3418 
3420  unsigned NumCases,
3421  Instruction *InsertBefore = nullptr) {
3422  return new SwitchInst(Value, Default, NumCases, InsertBefore);
3423  }
3424 
3426  unsigned NumCases, BasicBlock *InsertAtEnd) {
3427  return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3428  }
3429 
3430  /// Provide fast operand accessors
3432 
3433  // Accessor Methods for Switch stmt
3434  Value *getCondition() const { return getOperand(0); }
3435  void setCondition(Value *V) { setOperand(0, V); }
3436 
3438  return cast<BasicBlock>(getOperand(1));
3439  }
3440 
3441  void setDefaultDest(BasicBlock *DefaultCase) {
3442  setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3443  }
3444 
3445  /// Return the number of 'cases' in this switch instruction, excluding the
3446  /// default case.
3447  unsigned getNumCases() const {
3448  return getNumOperands()/2 - 1;
3449  }
3450 
3451  /// Returns a read/write iterator that points to the first case in the
3452  /// SwitchInst.
3454  return CaseIt(this, 0);
3455  }
3456 
3457  /// Returns a read-only iterator that points to the first case in the
3458  /// SwitchInst.
3460  return ConstCaseIt(this, 0);
3461  }
3462 
3463  /// Returns a read/write iterator that points one past the last in the
3464  /// SwitchInst.
3466  return CaseIt(this, getNumCases());
3467  }
3468 
3469  /// Returns a read-only iterator that points one past the last in the
3470  /// SwitchInst.
3472  return ConstCaseIt(this, getNumCases());
3473  }
3474 
3475  /// Iteration adapter for range-for loops.
3477  return make_range(case_begin(), case_end());
3478  }
3479 
3480  /// Constant iteration adapter for range-for loops.
3482  return make_range(case_begin(), case_end());
3483  }
3484 
3485  /// Returns an iterator that points to the default case.
3486  /// Note: this iterator allows to resolve successor only. Attempt
3487  /// to resolve case value causes an assertion.
3488  /// Also note, that increment and decrement also causes an assertion and
3489  /// makes iterator invalid.
3491  return CaseIt(this, DefaultPseudoIndex);
3492  }
3494  return ConstCaseIt(this, DefaultPseudoIndex);
3495  }
3496 
3497  /// Search all of the case values for the specified constant. If it is
3498  /// explicitly handled, return the case iterator of it, otherwise return
3499  /// default case iterator to indicate that it is handled by the default
3500  /// handler.
3502  return CaseIt(
3503  this,
3504  const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex());
3505  }
3507  ConstCaseIt I = llvm::find_if(cases(), [C](const ConstCaseHandle &Case) {
3508  return Case.getCaseValue() == C;
3509  });
3510  if (I != case_end())
3511  return I;
3512 
3513  return case_default();
3514  }
3515 
3516  /// Finds the unique case value for a given successor. Returns null if the
3517  /// successor is not found, not unique, or is the default case.
3519  if (BB == getDefaultDest())
3520  return nullptr;
3521 
3522  ConstantInt *CI = nullptr;
3523  for (auto Case : cases()) {
3524  if (Case.getCaseSuccessor() != BB)
3525  continue;
3526 
3527  if (CI)
3528  return nullptr; // Multiple cases lead to BB.
3529 
3530  CI = Case.getCaseValue();
3531  }
3532 
3533  return CI;
3534  }
3535 
3536  /// Add an entry to the switch instruction.
3537  /// Note:
3538  /// This action invalidates case_end(). Old case_end() iterator will
3539  /// point to the added case.
3540  void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3541 
3542  /// This method removes the specified case and its successor from the switch
3543  /// instruction. Note that this operation may reorder the remaining cases at
3544  /// index idx and above.
3545  /// Note:
3546  /// This action invalidates iterators for all cases following the one removed,
3547  /// including the case_end() iterator. It returns an iterator for the next
3548  /// case.
3549  CaseIt removeCase(CaseIt I);
3550 
3551  unsigned getNumSuccessors() const { return getNumOperands()/2; }
3552  BasicBlock *getSuccessor(unsigned idx) const {
3553  assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!");
3554  return cast<BasicBlock>(getOperand(idx*2+1));
3555  }
3556  void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3557  assert(idx < getNumSuccessors() && "Successor # out of range for switch!");
3558  setOperand(idx * 2 + 1, NewSucc);
3559  }
3560 
3561  // Methods for support type inquiry through isa, cast, and dyn_cast:
3562  static bool classof(const Instruction *I) {
3563  return I->getOpcode() == Instruction::Switch;
3564  }
3565  static bool classof(const Value *V) {
3566  return isa<Instruction>(V) && classof(cast<Instruction>(V));
3567  }
3568 };
3569 
3570 /// A wrapper class to simplify modification of SwitchInst cases along with
3571 /// their prof branch_weights metadata.
3573  SwitchInst &SI;
3575  bool Changed = false;
3576 
3577 protected:
3578  static MDNode *getProfBranchWeightsMD(const SwitchInst &SI);
3579 
3581 
3582  void init();
3583 
3584 public:
3586  SwitchInst *operator->() { return &SI; }
3587  SwitchInst &operator*() { return SI; }
3588  operator SwitchInst *() { return &SI; }
3589 
3591 
3593  if (Changed)
3594  SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3595  }
3596 
3597  /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3598  /// correspondent branch weight.
3600 
3601  /// Delegate the call to the underlying SwitchInst::addCase() and set the
3602  /// specified branch weight for the added case.
3603  void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3604 
3605  /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3606  /// this object to not touch the underlying SwitchInst in destructor.
3608 
3609  void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3610  CaseWeightOpt getSuccessorWeight(unsigned idx);
3611 
3612  static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3613 };
3614 
3615 template <>
3617 };
3618 
3620 
3621 //===----------------------------------------------------------------------===//
3622 // IndirectBrInst Class
3623 //===----------------------------------------------------------------------===//
3624 
3625 //===---------------------------------------------------------------------------
3626 /// Indirect Branch Instruction.
3627 ///
3628 class IndirectBrInst : public Instruction {
3629  unsigned ReservedSpace;
3630 
3631  // Operand[0] = Address to jump to
3632  // Operand[n+1] = n-th destination
3633  IndirectBrInst(const IndirectBrInst &IBI);
3634 
3635  /// Create a new indirectbr instruction, specifying an
3636  /// Address to jump to. The number of expected destinations can be specified
3637  /// here to make memory allocation more efficient. This constructor can also
3638  /// autoinsert before another instruction.
3639  IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3640 
3641  /// Create a new indirectbr instruction, specifying an
3642  /// Address to jump to. The number of expected destinations can be specified
3643  /// here to make memory allocation more efficient. This constructor also
3644  /// autoinserts at the end of the specified BasicBlock.
3645  IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3646 
3647  // allocate space for exactly zero operands
3648  void *operator new(size_t S) { return User::operator new(S); }
3649 
3650  void init(Value *Address, unsigned NumDests);
3651  void growOperands();
3652 
3653 protected:
3654  // Note: Instruction needs to be a friend here to call cloneImpl.
3655  friend class Instruction;
3656 
3657  IndirectBrInst *cloneImpl() const;
3658 
3659 public:
3660  void operator delete(void *Ptr) { User::operator delete(Ptr); }
3661 
3662  /// Iterator type that casts an operand to a basic block.
3663  ///
3664  /// This only makes sense because the successors are stored as adjacent
3665  /// operands for indirectbr instructions.
3667  : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3668  std::random_access_iterator_tag, BasicBlock *,
3669  ptrdiff_t, BasicBlock *, BasicBlock *> {
3671 
3672  BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3673  BasicBlock *operator->() const { return operator*(); }
3674  };
3675 
3676  /// The const version of `succ_op_iterator`.
3678  : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3679  std::random_access_iterator_tag,
3680  const BasicBlock *, ptrdiff_t, const BasicBlock *,
3681  const BasicBlock *> {
3682  explicit const_succ_op_iterator(const_value_op_iterator I)
3683  : iterator_adaptor_base(I) {}
3684 
3685  const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3686  const BasicBlock *operator->() const { return operator*(); }
3687  };
3688 
3689  static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3690  Instruction *InsertBefore = nullptr) {
3691  return new IndirectBrInst(Address, NumDests, InsertBefore);
3692  }
3693 
3694  static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3695  BasicBlock *InsertAtEnd) {
3696  return new IndirectBrInst(Address, NumDests, InsertAtEnd);
3697  }
3698 
3699  /// Provide fast operand accessors.
3701 
3702  // Accessor Methods for IndirectBrInst instruction.
3703  Value *getAddress() { return getOperand(0); }
3704  const Value *getAddress() const { return getOperand(0); }
3705  void setAddress(Value *V) { setOperand(0, V); }
3706 
3707  /// return the number of possible destinations in this
3708  /// indirectbr instruction.
3709  unsigned getNumDestinations() const { return getNumOperands()-1; }
3710 
3711  /// Return the specified destination.
3712  BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3713  const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3714 
3715  /// Add a destination.
3716  ///
3717  void addDestination(BasicBlock *Dest);
3718 
3719  /// This method removes the specified successor from the
3720  /// indirectbr instruction.
3721  void removeDestination(unsigned i);
3722 
3723  unsigned getNumSuccessors() const { return getNumOperands()-1; }
3724  BasicBlock *getSuccessor(unsigned i) const {
3725  return cast<BasicBlock>(getOperand(i+1));
3726  }
3727  void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3728  setOperand(i + 1, NewSucc);
3729  }
3730 
3732  return make_range(succ_op_iterator(std::next(value_op_begin())),
3733  succ_op_iterator(value_op_end()));
3734  }
3735 
3737  return make_range(const_succ_op_iterator(std::next(value_op_begin())),
3738  const_succ_op_iterator(value_op_end()));
3739  }
3740 
3741  // Methods for support type inquiry through isa, cast, and dyn_cast:
3742  static bool classof(const Instruction *I) {
3743  return I->getOpcode() == Instruction::IndirectBr;
3744  }
3745  static bool classof(const Value *V) {
3746  return isa<Instruction>(V) && classof(cast<Instruction>(V));
3747  }
3748 };
3749 
3750 template <>
3752 };
3753 
3755 
3756 //===----------------------------------------------------------------------===//
3757 // InvokeInst Class
3758 //===----------------------------------------------------------------------===//
3759 
3760 /// Invoke instruction. The SubclassData field is used to hold the
3761 /// calling convention of the call.
3762 ///
3763 class InvokeInst : public CallBase {
3764  /// The number of operands for this call beyond the called function,
3765  /// arguments, and operand bundles.
3766  static constexpr int NumExtraOperands = 2;
3767 
3768  /// The index from the end of the operand array to the normal destination.
3769  static constexpr int NormalDestOpEndIdx = -3;
3770 
3771  /// The index from the end of the operand array to the unwind destination.
3772  static constexpr int UnwindDestOpEndIdx = -2;
3773 
3774  InvokeInst(const InvokeInst &BI);
3775 
3776  /// Construct an InvokeInst given a range of arguments.
3777  ///
3778  /// Construct an InvokeInst from a range of arguments
3779  inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3780  BasicBlock *IfException, ArrayRef<Value *> Args,
3781  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3782  const Twine &NameStr, Instruction *InsertBefore);
3783 
3784  inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3785  BasicBlock *IfException, ArrayRef<Value *> Args,
3786  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3787  const Twine &NameStr, BasicBlock *InsertAtEnd);
3788 
3789  void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3790  BasicBlock *IfException, ArrayRef<Value *> Args,
3791  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3792 
3793  /// Compute the number of operands to allocate.
3794  static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
3795  // We need one operand for the called function, plus our extra operands and
3796  // the input operand counts provided.
3797  return 1 + NumExtraOperands + NumArgs + NumBundleInputs;
3798  }
3799 
3800 protected:
3801  // Note: Instruction needs to be a friend here to call cloneImpl.
3802  friend class Instruction;
3803 
3804  InvokeInst *cloneImpl() const;
3805 
3806 public:
3807  static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3808  BasicBlock *IfException, ArrayRef<Value *> Args,
3809  const Twine &NameStr,
3810  Instruction *InsertBefore = nullptr) {
3811  int NumOperands = ComputeNumOperands(Args.size());
3812  return new (NumOperands)
3813  InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3814  NameStr, InsertBefore);
3815  }
3816 
3817  static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3818  BasicBlock *IfException, ArrayRef<Value *> Args,
3819  ArrayRef<OperandBundleDef> Bundles = None,
3820  const Twine &NameStr = "",
3821  Instruction *InsertBefore = nullptr) {
3822  int NumOperands =
3823  ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3824  unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3825 
3826  return new (NumOperands, DescriptorBytes)
3827  InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3828  NameStr, InsertBefore);
3829  }
3830 
3831  static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3832  BasicBlock *IfException, ArrayRef<Value *> Args,
3833  const Twine &NameStr, BasicBlock *InsertAtEnd) {
3834  int NumOperands = ComputeNumOperands(Args.size());
3835  return new (NumOperands)
3836  InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3837  NameStr, InsertAtEnd);
3838  }
3839 
3840  static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3841  BasicBlock *IfException, ArrayRef<Value *> Args,
3843  const Twine &NameStr, BasicBlock *InsertAtEnd) {
3844  int NumOperands =
3845  ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3846  unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3847 
3848  return new (NumOperands, DescriptorBytes)
3849  InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3850  NameStr, InsertAtEnd);
3851  }
3852 
3853  static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3854  BasicBlock *IfException, ArrayRef<Value *> Args,
3855  const Twine &NameStr,
3856  Instruction *InsertBefore = nullptr) {
3857  return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3858  IfException, Args, None, NameStr, InsertBefore);
3859  }
3860 
3861  static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3862  BasicBlock *IfException, ArrayRef<Value *> Args,
3863  ArrayRef<OperandBundleDef> Bundles = None,
3864  const Twine &NameStr = "",
3865  Instruction *InsertBefore = nullptr) {
3866  return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3867  IfException, Args, Bundles, NameStr, InsertBefore);
3868  }
3869 
3870  static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3871  BasicBlock *IfException, ArrayRef<Value *> Args,
3872  const Twine &NameStr, BasicBlock *InsertAtEnd) {
3873  return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3874  IfException, Args, NameStr, InsertAtEnd);
3875  }
3876 
3877  static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3878  BasicBlock *IfException, ArrayRef<Value *> Args,
3880  const Twine &NameStr, BasicBlock *InsertAtEnd) {
3881  return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3882  IfException, Args, Bundles, NameStr, InsertAtEnd);
3883  }
3884 
3885  /// Create a clone of \p II with a different set of operand bundles and
3886  /// insert it before \p InsertPt.
3887  ///
3888  /// The returned invoke instruction is identical to \p II in every way except
3889  /// that the operand bundles for the new instruction are set to the operand
3890  /// bundles in \p Bundles.
3891  static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3892  Instruction *InsertPt = nullptr);
3893 
3894  // get*Dest - Return the destination basic blocks...
3896  return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
3897  }
3899  return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
3900  }
3902  Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3903  }
3905  Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3906  }
3907 
3908  /// Get the landingpad instruction from the landing pad
3909  /// block (the unwind destination).
3910  LandingPadInst *getLandingPadInst() const;
3911 
3912  BasicBlock *getSuccessor(unsigned i) const {
3913  assert(i < 2 && "Successor # out of range for invoke!");
3914  return i == 0 ? getNormalDest() : getUnwindDest();
3915  }
3916 
3917  void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3918  assert(i < 2 && "Successor # out of range for invoke!");
3919  if (i == 0)
3920  setNormalDest(NewSucc);
3921  else
3922  setUnwindDest(NewSucc);
3923  }
3924 
3925  unsigned getNumSuccessors() const { return 2; }
3926 
3927  // Methods for support type inquiry through isa, cast, and dyn_cast:
3928  static bool classof(const Instruction *I) {
3929  return (I->getOpcode() == Instruction::Invoke);
3930  }
3931  static bool classof(const Value *V) {
3932  return isa<Instruction>(V) && classof(cast<Instruction>(V));
3933  }
3934 
3935 private:
3936  // Shadow Instruction::setInstructionSubclassData with a private forwarding
3937  // method so that subclasses cannot accidentally use it.
3938  template <typename Bitfield>
3939  void setSubclassData(typename Bitfield::Type Value) {
3940  Instruction::setSubclassData<Bitfield>(Value);
3941  }
3942 };
3943 
3944 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3945  BasicBlock *IfException, ArrayRef<Value *> Args,
3946  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3947  const Twine &NameStr, Instruction *InsertBefore)
3948  : CallBase(Ty->getReturnType(), Instruction::Invoke,
3949  OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3950  InsertBefore) {
3951  init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3952 }
3953 
3954 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3955  BasicBlock *IfException, ArrayRef<Value *> Args,
3956  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3957  const Twine &NameStr, BasicBlock *InsertAtEnd)
3958  : CallBase(Ty->getReturnType(), Instruction::Invoke,
3959  OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3960  InsertAtEnd) {
3961  init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3962 }
3963 
3964 //===----------------------------------------------------------------------===//
3965 // CallBrInst Class
3966 //===----------------------------------------------------------------------===//
3967 
3968 /// CallBr instruction, tracking function calls that may not return control but
3969 /// instead transfer it to a third location. The SubclassData field is used to
3970 /// hold the calling convention of the call.
3971 ///
3972 class CallBrInst : public CallBase {
3973 
3974  unsigned NumIndirectDests;
3975 
3976  CallBrInst(const CallBrInst &BI);
3977 
3978  /// Construct a CallBrInst given a range of arguments.
3979  ///
3980  /// Construct a CallBrInst from a range of arguments
3981  inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3982  ArrayRef<BasicBlock *> IndirectDests,
3984  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3985  const Twine &NameStr, Instruction *InsertBefore);
3986 
3987  inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3988  ArrayRef<BasicBlock *> IndirectDests,
3990  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3991  const Twine &NameStr, BasicBlock *InsertAtEnd);
3992 
3993  void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
3995  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3996 
3997  /// Should the Indirect Destinations change, scan + update the Arg list.
3998  void updateArgBlockAddresses(unsigned i, BasicBlock *B);
3999 
4000  /// Compute the number of operands to allocate.
4001  static int ComputeNumOperands(int NumArgs, int NumIndirectDests,
4002  int NumBundleInputs = 0) {
4003  // We need one operand for the called function, plus our extra operands and
4004  // the input operand counts provided.
4005  return 2 + NumIndirectDests + NumArgs + NumBundleInputs;
4006  }
4007 
4008 protected:
4009  // Note: Instruction needs to be a friend here to call cloneImpl.
4010  friend class Instruction;
4011 
4012  CallBrInst *cloneImpl() const;
4013 
4014 public:
4015  static CallBrInst *Create(FunctionType *Ty, Value *Func,
4016  BasicBlock *DefaultDest,
4017  ArrayRef<BasicBlock *> IndirectDests,
4018  ArrayRef<Value *> Args, const Twine &NameStr,
4019  Instruction *InsertBefore = nullptr) {
4020  int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4021  return new (NumOperands)
4022  CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
4023  NumOperands, NameStr, InsertBefore);
4024  }
4025 
4026  static CallBrInst *Create(FunctionType *Ty, Value *Func,
4027  BasicBlock *DefaultDest,
4028  ArrayRef<BasicBlock *> IndirectDests,
4030  ArrayRef<OperandBundleDef> Bundles = None,
4031  const Twine &NameStr = "",
4032  Instruction *InsertBefore = nullptr) {
4033  int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4034  CountBundleInputs(Bundles));
4035  unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4036 
4037  return new (NumOperands, DescriptorBytes)
4038  CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4039  NumOperands, NameStr, InsertBefore);
4040  }
4041 
4042  static CallBrInst *Create(FunctionType *Ty, Value *Func,
4043  BasicBlock *DefaultDest,
4044  ArrayRef<BasicBlock *> IndirectDests,
4045  ArrayRef<Value *> Args, const Twine &NameStr,
4046  BasicBlock *InsertAtEnd) {
4047  int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4048  return new (NumOperands)
4049  CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
4050  NumOperands, NameStr, InsertAtEnd);
4051  }
4052 
4053  static CallBrInst *Create(FunctionType *Ty, Value *Func,
4054  BasicBlock *DefaultDest,
4055  ArrayRef<BasicBlock *> IndirectDests,
4058  const Twine &NameStr, BasicBlock *InsertAtEnd) {
4059  int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4060  CountBundleInputs(Bundles));
4061  unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4062 
4063  return new (NumOperands, DescriptorBytes)
4064  CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4065  NumOperands, NameStr, InsertAtEnd);
4066  }
4067 
4068  static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4069  ArrayRef<BasicBlock *> IndirectDests,
4070  ArrayRef<Value *> Args, const Twine &NameStr,
4071  Instruction *InsertBefore = nullptr) {
4072  return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4073  IndirectDests, Args, NameStr, InsertBefore);
4074  }
4075 
4076  static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4077  ArrayRef<BasicBlock *> IndirectDests,
4079  ArrayRef<OperandBundleDef> Bundles = None,
4080  const Twine &NameStr = "",
4081  Instruction *InsertBefore = nullptr) {
4082  return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4083  IndirectDests, Args, Bundles, NameStr, InsertBefore);
4084  }
4085 
4086  static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4087  ArrayRef<BasicBlock *> IndirectDests,
4088  ArrayRef<Value *> Args, const Twine &NameStr,
4089  BasicBlock *InsertAtEnd) {
4090  return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4091  IndirectDests, Args, NameStr, InsertAtEnd);
4092  }
4093 
4095  BasicBlock *DefaultDest,
4096  ArrayRef<BasicBlock *> IndirectDests,
4099  const Twine &NameStr, BasicBlock *InsertAtEnd) {
4100  return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4101  IndirectDests, Args, Bundles, NameStr, InsertAtEnd);
4102  }
4103 
4104  /// Create a clone of \p CBI with a different set of operand bundles and
4105  /// insert it before \p InsertPt.
4106  ///
4107  /// The returned callbr instruction is identical to \p CBI in every way
4108  /// except that the operand bundles for the new instruction are set to the
4109  /// operand bundles in \p Bundles.
4110  static CallBrInst *Create(CallBrInst *CBI,
4112  Instruction *InsertPt = nullptr);
4113 
4114  /// Return the number of callbr indirect dest labels.
4115  ///
4116  unsigned getNumIndirectDests() const { return NumIndirectDests; }
4117 
4118  /// getIndirectDestLabel - Return the i-th indirect dest label.
4119  ///
4120  Value *getIndirectDestLabel(unsigned i) const {
4121  assert(i < getNumIndirectDests() && "Out of bounds!");
4122  return getOperand(i + arg_size() + getNumTotalBundleOperands() + 1);
4123  }
4124 
4125  Value *getIndirectDestLabelUse(unsigned i) const {
4126  assert(i < getNumIndirectDests() && "Out of bounds!");
4127  return getOperandUse(i + arg_size() + getNumTotalBundleOperands() + 1);
4128  }
4129 
4130  // Return the destination basic blocks...
4132  return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1));
4133  }
4134  BasicBlock *getIndirectDest(unsigned i) const {
4135  return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
4136  }
4138  SmallVector<BasicBlock *, 16> IndirectDests;
4139  for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
4140  IndirectDests.push_back(getIndirectDest(i));
4141  return IndirectDests;
4142  }
4144  *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
4145  }
4146  void setIndirectDest(unsigned i, BasicBlock *B) {
4147  updateArgBlockAddresses(i, B);
4148  *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
4149  }
4150 
4151  BasicBlock *getSuccessor(unsigned i) const {
4152  assert(i < getNumSuccessors() + 1 &&
4153  "Successor # out of range for callbr!");
4154  return i == 0 ? getDefaultDest() : getIndirectDest(i - 1);
4155  }
4156 
4157  void setSuccessor(unsigned i, BasicBlock *NewSucc) {
4158  assert(i < getNumIndirectDests() + 1 &&
4159  "Successor # out of range for callbr!");
4160  return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc);
4161  }
4162 
4163  unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
4164 
4165  // Methods for support type inquiry through isa, cast, and dyn_cast:
4166  static bool classof(const Instruction *I) {
4167  return (I->getOpcode() == Instruction::CallBr);
4168  }
4169  static bool classof(const Value *V) {
4170  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4171  }
4172 
4173 private:
4174  // Shadow Instruction::setInstructionSubclassData with a private forwarding
4175  // method so that subclasses cannot accidentally use it.
4176  template <typename Bitfield>
4177  void setSubclassData(typename Bitfield::Type Value) {
4178  Instruction::setSubclassData<Bitfield>(Value);
4179  }
4180 };
4181 
4182 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4183  ArrayRef<BasicBlock *> IndirectDests,
4184  ArrayRef<Value *> Args,
4185  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4186  const Twine &NameStr, Instruction *InsertBefore)
4187  : CallBase(Ty->getReturnType(), Instruction::CallBr,
4188  OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4189  InsertBefore) {
4190  init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4191 }
4192 
4193 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4194  ArrayRef<BasicBlock *> IndirectDests,
4195  ArrayRef<Value *> Args,
4196  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4197  const Twine &NameStr, BasicBlock *InsertAtEnd)
4198  : CallBase(Ty->getReturnType(), Instruction::CallBr,
4199  OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4200  InsertAtEnd) {
4201  init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4202 }
4203 
4204 //===----------------------------------------------------------------------===//
4205 // ResumeInst Class
4206 //===----------------------------------------------------------------------===//
4207 
4208 //===---------------------------------------------------------------------------
4209 /// Resume the propagation of an exception.
4210 ///
4211 class ResumeInst : public Instruction {
4212  ResumeInst(const ResumeInst &RI);
4213 
4214  explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
4215  ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
4216 
4217 protected:
4218  // Note: Instruction needs to be a friend here to call cloneImpl.
4219  friend class Instruction;
4220 
4221  ResumeInst *cloneImpl() const;
4222 
4223 public:
4224  static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
4225  return new(1) ResumeInst(Exn, InsertBefore);
4226  }
4227 
4228  static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
4229  return new(1) ResumeInst(Exn, InsertAtEnd);
4230  }
4231 
4232  /// Provide fast operand accessors
4234 
4235  /// Convenience accessor.
4236  Value *getValue() const { return Op<0>(); }
4237 
4238  unsigned getNumSuccessors() const { return 0; }
4239 
4240  // Methods for support type inquiry through isa, cast, and dyn_cast:
4241  static bool classof(const Instruction *I) {
4242  return I->getOpcode() == Instruction::Resume;
4243  }
4244  static bool classof(const Value *V) {
4245  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4246  }
4247 
4248 private:
4249  BasicBlock *getSuccessor(unsigned idx) const {
4250  llvm_unreachable("ResumeInst has no successors!");
4251  }
4252 
4253  void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4254  llvm_unreachable("ResumeInst has no successors!");
4255  }
4256 };
4257 
4258 template <>
4260  public FixedNumOperandTraits<ResumeInst, 1> {
4261 };
4262 
4264 
4265 //===----------------------------------------------------------------------===//
4266 // CatchSwitchInst Class
4267 //===----------------------------------------------------------------------===//
4269  using UnwindDestField = BoolBitfieldElementT<0>;
4270 
4271  /// The number of operands actually allocated. NumOperands is
4272  /// the number actually in use.
4273  unsigned ReservedSpace;
4274 
4275  // Operand[0] = Outer scope
4276  // Operand[1] = Unwind block destination
4277  // Operand[n] = BasicBlock to go to on match
4278  CatchSwitchInst(const CatchSwitchInst &CSI);
4279 
4280  /// Create a new switch instruction, specifying a
4281  /// default destination. The number of additional handlers can be specified
4282  /// here to make memory allocation more efficient.
4283  /// This constructor can also autoinsert before another instruction.
4284  CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4285  unsigned NumHandlers, const Twine &NameStr,
4286  Instruction *InsertBefore);
4287 
4288  /// Create a new switch instruction, specifying a
4289  /// default destination. The number of additional handlers can be specified
4290  /// here to make memory allocation more efficient.
4291  /// This constructor also autoinserts at the end of the specified BasicBlock.
4292  CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4293  unsigned NumHandlers, const Twine &NameStr,
4294  BasicBlock *InsertAtEnd);
4295 
4296  // allocate space for exactly zero operands
4297  void *operator new(size_t S) { return User::operator new(S); }
4298 
4299  void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4300  void growOperands(unsigned Size);
4301 
4302 protected:
4303  // Note: Instruction needs to be a friend here to call cloneImpl.
4304  friend class Instruction;
4305 
4306  CatchSwitchInst *cloneImpl() const;
4307 
4308 public:
4309  void operator delete(void *Ptr) { return User::operator delete(Ptr); }
4310 
4311  static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4312  unsigned NumHandlers,
4313  const Twine &NameStr = "",
4314  Instruction *InsertBefore = nullptr) {
4315  return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4316  InsertBefore);
4317  }
4318 
4319  static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4320  unsigned NumHandlers, const Twine &NameStr,
4321  BasicBlock *InsertAtEnd) {
4322  return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4323  InsertAtEnd);
4324  }
4325 
4326  /// Provide fast operand accessors
4328 
4329  // Accessor Methods for CatchSwitch stmt
4330  Value *getParentPad() const { return getOperand(0); }
4331  void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4332 
4333  // Accessor Methods for CatchSwitch stmt
4334  bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4335  bool unwindsToCaller() const { return !hasUnwindDest(); }
4337  if (hasUnwindDest())
4338  return cast<BasicBlock>(getOperand(1));
4339  return nullptr;
4340  }
4341  void setUnwindDest(BasicBlock *UnwindDest) {
4342  assert(UnwindDest);
4343  assert(hasUnwindDest());
4344  setOperand(1, UnwindDest);
4345  }
4346 
4347  /// return the number of 'handlers' in this catchswitch
4348  /// instruction, except the default handler
4349  unsigned getNumHandlers() const {
4350  if (hasUnwindDest())
4351  return getNumOperands() - 2;
4352  return getNumOperands() - 1;
4353  }
4354 
4355 private:
4356  static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
4357  static const BasicBlock *handler_helper(const Value *V) {
4358  return cast<BasicBlock>(V);
4359  }
4360 
4361 public:
4362  using DerefFnTy = BasicBlock *(*)(Value *);
4365  using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4366  using const_handler_iterator =
4369 
4370  /// Returns an iterator that points to the first handler in CatchSwitchInst.
4372  op_iterator It = op_begin() + 1;
4373  if (hasUnwindDest())
4374  ++It;
4375  return handler_iterator(It, DerefFnTy(handler_helper));
4376  }
4377 
4378  /// Returns an iterator that points to the first handler in the
4379  /// CatchSwitchInst.
4381  const_op_iterator It = op_begin() + 1;
4382  if (hasUnwindDest())
4383  ++It;
4384  return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4385  }
4386 
4387  /// Returns a read-only iterator that points one past the last
4388  /// handler in the CatchSwitchInst.
4390  return handler_iterator(op_end(), DerefFnTy(handler_helper));
4391  }
4392 
4393  /// Returns an iterator that points one past the last handler in the
4394  /// CatchSwitchInst.
4396  return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4397  }
4398 
4399  /// iteration adapter for range-for loops.
4401  return make_range(handler_begin(), handler_end());
4402  }
4403 
4404  /// iteration adapter for range-for loops.
4406  return make_range(handler_begin(), handler_end());
4407  }
4408 
4409  /// Add an entry to the switch instruction...
4410  /// Note:
4411  /// This action invalidates handler_end(). Old handler_end() iterator will
4412  /// point to the added handler.
4413  void addHandler(BasicBlock *Dest);
4414 
4415  void removeHandler(handler_iterator HI);
4416 
4417  unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4418  BasicBlock *getSuccessor(unsigned Idx) const {
4419  assert(Idx < getNumSuccessors() &&
4420  "Successor # out of range for catchswitch!");
4421  return cast<BasicBlock>(getOperand(Idx + 1));
4422  }
4423  void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
4424  assert(Idx < getNumSuccessors() &&
4425  "Successor # out of range for catchswitch!");
4426  setOperand(Idx + 1, NewSucc);
4427  }
4428 
4429  // Methods for support type inquiry through isa, cast, and dyn_cast:
4430  static bool classof(const Instruction *I) {
4431  return I->getOpcode() == Instruction::CatchSwitch;
4432  }
4433  static bool classof(const Value *V) {
4434  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4435  }
4436 };
4437 
4438 template <>
4440 
4442 
4443 //===----------------------------------------------------------------------===//
4444 // CleanupPadInst Class
4445 //===----------------------------------------------------------------------===//
4447 private:
4448  explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4449  unsigned Values, const Twine &NameStr,
4450  Instruction *InsertBefore)
4451  : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4452  NameStr, InsertBefore) {}
4453  explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4454  unsigned Values, const Twine &NameStr,
4455  BasicBlock *InsertAtEnd)
4456  : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4457  NameStr, InsertAtEnd) {}
4458 
4459 public:
4461  const Twine &NameStr = "",
4462  Instruction *InsertBefore = nullptr) {
4463  unsigned Values = 1 + Args.size();
4464  return new (Values)
4465  CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore);
4466  }
4467 
4469  const Twine &NameStr, BasicBlock *InsertAtEnd) {
4470  unsigned Values = 1 + Args.size();
4471  return new (Values)
4472  CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd);
4473  }
4474 
4475  /// Methods for support type inquiry through isa, cast, and dyn_cast:
4476  static bool classof(const Instruction *I) {
4477  return I->getOpcode() == Instruction::CleanupPad;
4478  }
4479  static bool classof(const Value *V) {
4480  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4481  }
4482 };
4483 
4484 //===----------------------------------------------------------------------===//
4485 // CatchPadInst Class
4486 //===----------------------------------------------------------------------===//
4488 private:
4489  explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4490  unsigned Values, const Twine &NameStr,
4491  Instruction *InsertBefore)
4492  : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4493  NameStr, InsertBefore) {}
4494  explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4495  unsigned Values, const Twine &NameStr,
4496  BasicBlock *InsertAtEnd)
4497  : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4498  NameStr, InsertAtEnd) {}
4499 
4500 public:
4502  const Twine &NameStr = "",
4503  Instruction *InsertBefore = nullptr) {
4504  unsigned Values = 1 + Args.size();
4505  return new (Values)
4506  CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore);
4507  }
4508 
4510  const Twine &NameStr, BasicBlock *InsertAtEnd) {
4511  unsigned Values = 1 + Args.size();
4512  return new (Values)
4513  CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd);
4514  }
4515 
4516  /// Convenience accessors
4518  return cast<CatchSwitchInst>(Op<-1>());
4519  }
4520  void setCatchSwitch(Value *CatchSwitch) {
4521  assert(CatchSwitch);
4522  Op<-1>() = CatchSwitch;
4523  }
4524 
4525  /// Methods for support type inquiry through isa, cast, and dyn_cast:
4526  static bool classof(const Instruction *I) {
4527  return I->getOpcode() == Instruction::CatchPad;
4528  }
4529  static bool classof(const Value *V) {
4530  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4531  }
4532 };
4533 
4534 //===----------------------------------------------------------------------===//
4535 // CatchReturnInst Class
4536 //===----------------------------------------------------------------------===//
4537 
4539  CatchReturnInst(const CatchReturnInst &RI);
4540  CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore);
4541  CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd);
4542 
4543  void init(Value *CatchPad, BasicBlock *BB);
4544 
4545 protected:
4546  // Note: Instruction needs to be a friend here to call cloneImpl.
4547  friend class Instruction;
4548 
4549  CatchReturnInst *cloneImpl() const;
4550 
4551 public:
4553  Instruction *InsertBefore = nullptr) {
4554  assert(CatchPad);
4555  assert(BB);
4556  return new (2) CatchReturnInst(CatchPad, BB, InsertBefore);
4557  }
4558 
4560  BasicBlock *InsertAtEnd) {
4561  assert(CatchPad);
4562  assert(BB);
4563  return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd);
4564  }
4565 
4566  /// Provide fast operand accessors
4568 
4569  /// Convenience accessors.
4570  CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); }
4571  void setCatchPad(CatchPadInst *CatchPad) {
4572  assert(CatchPad);
4573  Op<0>() = CatchPad;
4574  }
4575 
4576  BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); }
4577  void setSuccessor(BasicBlock *NewSucc) {
4578  assert(NewSucc);
4579  Op<1>() = NewSucc;
4580  }
4581  unsigned getNumSuccessors() const { return 1; }
4582 
4583  /// Get the parentPad of this catchret's catchpad's catchswitch.
4584  /// The successor block is implicitly a member of this funclet.
4586  return getCatchPad()->getCatchSwitch()->getParentPad();
4587  }
4588 
4589  // Methods for support type inquiry through isa, cast, and dyn_cast:
4590  static bool classof(const Instruction *I) {
4591  return (I->getOpcode() == Instruction::CatchRet);
4592  }
4593  static bool classof(const Value *V) {
4594  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4595  }
4596 
4597 private:
4598  BasicBlock *getSuccessor(unsigned Idx) const {
4599  assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
4600  return getSuccessor();
4601  }
4602 
4603  void setSuccessor(unsigned Idx, BasicBlock *B) {
4604  assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
4605  setSuccessor(B);
4606  }
4607 };
4608 
4609 template <>
4611  : public FixedNumOperandTraits<CatchReturnInst, 2> {};
4612 
4614 
4615 //===----------------------------------------------------------------------===//
4616 // CleanupReturnInst Class
4617 //===----------------------------------------------------------------------===//
4618 
4620  using UnwindDestField = BoolBitfieldElementT<0>;
4621 
4622 private:
4624  CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4625  Instruction *InsertBefore = nullptr);
4626  CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4627  BasicBlock *InsertAtEnd);
4628 
4629  void init(Value *CleanupPad, BasicBlock *UnwindBB);
4630